text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
default resource
"""
from zunzuncito import tools
class APIResource(object):
def dispatch(self, request, response):
request.log.debug(tools.log_json({
'API': request.version,
'URI': request.URI,
'method': request.method,
'vroot': request.vroot
}, True))
return __name__
|
{
"content_hash": "8e0457977580720b6395ca8fdbd6a4a7",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 42,
"avg_line_length": 19.72222222222222,
"alnum_prop": 0.5605633802816902,
"repo_name": "nbari/zunzuncito",
"id": "7855e35b9e14eab754dc43d291489e14aeda4120",
"size": "355",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "my_api/oic/v0/zun_default/zun_default.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4811"
},
{
"name": "Python",
"bytes": "126204"
},
{
"name": "Shell",
"bytes": "586"
}
],
"symlink_target": ""
}
|
"""
This module contains routines for calculating and modelling the
flight times of different airline flights to the same
destinations.
Contains routines to answer the following questions:
-Do different airlines have the same travel times to the
same locations?
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def compute_flight_times(airline, airport, flights):
"""
Receives 3 data frames, and returns a data frame suitable
for printing
"""
airline_df = airline
airport_df = airport
flights_df = flights
# reduce columns
ftimes_df = flights_df[['AIRLINE', 'ORIGIN_AIRPORT',
'DESTINATION_AIRPORT', 'ELAPSED_TIME']]
# reduce rows to only three airlines (n=1,658,652)
ftimes_df = ftimes_df[(ftimes_df['AIRLINE'] == "UA") |
(ftimes_df['AIRLINE'] == "F9") |
(ftimes_df['AIRLINE'] == "EV")]
# reduce rows to only three destination airports (n=156,611)
ftimes_df = ftimes_df[(ftimes_df['DESTINATION_AIRPORT'] == "IAH") |
(ftimes_df['DESTINATION_AIRPORT'] == "ORD") |
(ftimes_df['DESTINATION_AIRPORT'] == "ATL")]
# reduce rows to only one origin airports (n=4,210)
ftimes_df = ftimes_df[(ftimes_df['ORIGIN_AIRPORT'] == "IAD")]
# calculate mean times for airline to destination (n=4)
ftimes_df = ftimes_df.groupby(['AIRLINE', 'DESTINATION_AIRPORT'],
as_index=True)['ELAPSED_TIME'].mean().reset_index()
return ftimes_df
def plot_flight_times(ftimes_df):
"""
Produces a bar chart from the flight times data frame
"""
# extract data from dataframe into arrays
means_ev = ftimes_df.ELAPSED_TIME[(ftimes_df.AIRLINE == "EV")].tolist()
means_f9 = ftimes_df.ELAPSED_TIME[(ftimes_df.AIRLINE == "F9")].tolist()
means_ua = ftimes_df.ELAPSED_TIME[(ftimes_df.AIRLINE == "UA")].tolist()
means_f9.insert(1, 0.0)
n_groups = 3
# %matplotlib inline
# create plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.25
opacity = 0.8
rects1 = plt.bar(index, means_ev, bar_width,
alpha=opacity,
color='b',
label='AtlanticSE')
rects2 = plt.bar(index + bar_width, means_ua, bar_width,
alpha=opacity,
color='r',
label='United')
rects3 = plt.bar(index + 2*bar_width, means_f9, bar_width,
alpha=opacity,
color='g',
label='Frontier')
plt.xlabel('Airport')
plt.ylabel('Minutes')
plt.title('Comparison of Flight Times from Dulles Airport')
plt.xticks(index + bar_width, ('ATL', 'IAH', 'ORD'))
plt.legend()
plt.tight_layout()
return fig
|
{
"content_hash": "83200f7b7f8cbe3d0de37f0e67b2e1ff",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 75,
"avg_line_length": 30.28723404255319,
"alnum_prop": 0.5900948366701791,
"repo_name": "jrentsch/2015_flight_analysis",
"id": "8fdcd77bfee9bcea5adbf7ec4d0442bc8f5af5d5",
"size": "2847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flightanalysis2015/flighttimes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "490951"
},
{
"name": "Python",
"bytes": "8422"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import multiselectfield.db.fields
class Migration(migrations.Migration):
dependencies = [
('initiatives', '0026_auto_20201112_1519'),
]
operations = [
migrations.AddField(
model_name='initiativeplatformsettings',
name='enable_office_regions',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='initiativeplatformsettings',
name='activity_types',
field=multiselectfield.db.fields.MultiSelectField(choices=[('funding', 'Funding'), ('periodactivity', 'Activity during a period'), ('dateactivity', 'Activity on a specific date')], max_length=100),
),
]
|
{
"content_hash": "3865b57a18a0599ce3b7b8008bae2bf6",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 209,
"avg_line_length": 33,
"alnum_prop": 0.6464646464646465,
"repo_name": "onepercentclub/bluebottle",
"id": "28989dbd424f8d201eb47592ab7a98f33f382f1a",
"size": "866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bluebottle/initiatives/migrations/0027_auto_20201229_1302.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "41694"
},
{
"name": "HTML",
"bytes": "246695"
},
{
"name": "Handlebars",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "139123"
},
{
"name": "PHP",
"bytes": "35"
},
{
"name": "PLpgSQL",
"bytes": "1369882"
},
{
"name": "PostScript",
"bytes": "2927"
},
{
"name": "Python",
"bytes": "4983116"
},
{
"name": "Rich Text Format",
"bytes": "39109"
},
{
"name": "SCSS",
"bytes": "99555"
},
{
"name": "Shell",
"bytes": "3068"
},
{
"name": "Smarty",
"bytes": "3814"
}
],
"symlink_target": ""
}
|
import urllib
from tempest.common import service_client
class DatabaseVersionsClientJSON(service_client.ServiceClient):
def __init__(self, auth_provider, service, region,
endpoint_type=None, build_interval=None, build_timeout=None,
disable_ssl_certificate_validation=None, ca_certs=None,
trace_requests=None):
dscv = disable_ssl_certificate_validation
super(DatabaseVersionsClientJSON, self).__init__(
auth_provider, service, region,
endpoint_type=endpoint_type,
build_interval=build_interval,
build_timeout=build_timeout,
disable_ssl_certificate_validation=dscv,
ca_certs=ca_certs,
trace_requests=trace_requests)
self.skip_path()
def list_db_versions(self, params=None):
"""List all versions."""
url = ''
if params:
url += '?%s' % urllib.urlencode(params)
resp, body = self.get(url)
self.expected_success(200, resp.status)
return service_client.ResponseBodyList(resp, self._parse_resp(body))
|
{
"content_hash": "fb1920506c4de1b9b6d49cdeef9b3a1c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 77,
"avg_line_length": 36.32258064516129,
"alnum_prop": 0.6225577264653641,
"repo_name": "rzarzynski/tempest",
"id": "aa2fef7c9fa5a9a44251a3366e15d391c6473ee2",
"size": "1762",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/services/database/json/versions_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "695"
},
{
"name": "Python",
"bytes": "2888467"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
import sys
if sys.version_info < (3, 5, 2):
raise SystemExit('ERROR: Tried to install Meson with an unsupported Python version: \n{}'
'\nMeson requires Python 3.5.2 or greater'.format(sys.version))
from mesonbuild.coredata import version
from setuptools import setup
# On windows, will create Scripts/meson.exe and Scripts/meson-script.py
# Other platforms will create bin/meson
entries = {'console_scripts': ['meson=mesonbuild.mesonmain:main']}
packages = ['mesonbuild',
'mesonbuild.ast',
'mesonbuild.backend',
'mesonbuild.cmake',
'mesonbuild.compilers',
'mesonbuild.compilers.mixins',
'mesonbuild.dependencies',
'mesonbuild.modules',
'mesonbuild.scripts',
'mesonbuild.templates',
'mesonbuild.wrap']
package_data = {
'mesonbuild.dependencies': ['data/CMakeLists.txt', 'data/CMakeListsLLVM.txt', 'data/CMakePathInfo.txt'],
'mesonbuild.cmake': ['data/run_ctgt.py', 'data/preload.cmake'],
}
data_files = []
if sys.platform != 'win32':
# Only useful on UNIX-like systems
data_files = [('share/man/man1', ['man/meson.1']),
('share/polkit-1/actions', ['data/com.mesonbuild.install.policy'])]
if __name__ == '__main__':
setup(name='meson',
version=version,
packages=packages,
package_data=package_data,
entry_points=entries,
data_files=data_files,)
|
{
"content_hash": "b11f392880c2dd0f8cd4526beec475fc",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 108,
"avg_line_length": 37,
"alnum_prop": 0.6216216216216216,
"repo_name": "becm/meson",
"id": "1f95be70c84f9b6ebe3eb2862329dc945757f75e",
"size": "2092",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "4190"
},
{
"name": "Batchfile",
"bytes": "131"
},
{
"name": "C",
"bytes": "167971"
},
{
"name": "C#",
"bytes": "1130"
},
{
"name": "C++",
"bytes": "51171"
},
{
"name": "CMake",
"bytes": "27103"
},
{
"name": "Cuda",
"bytes": "7454"
},
{
"name": "D",
"bytes": "5313"
},
{
"name": "Dockerfile",
"bytes": "1960"
},
{
"name": "Emacs Lisp",
"bytes": "919"
},
{
"name": "Fortran",
"bytes": "11539"
},
{
"name": "Genie",
"bytes": "341"
},
{
"name": "HTML",
"bytes": "117"
},
{
"name": "Inno Setup",
"bytes": "354"
},
{
"name": "Java",
"bytes": "2570"
},
{
"name": "JavaScript",
"bytes": "136"
},
{
"name": "LLVM",
"bytes": "75"
},
{
"name": "Lex",
"bytes": "139"
},
{
"name": "Meson",
"bytes": "454262"
},
{
"name": "Objective-C",
"bytes": "1235"
},
{
"name": "Objective-C++",
"bytes": "381"
},
{
"name": "PowerShell",
"bytes": "2242"
},
{
"name": "Python",
"bytes": "2912935"
},
{
"name": "Roff",
"bytes": "569"
},
{
"name": "Rust",
"bytes": "1079"
},
{
"name": "Shell",
"bytes": "6800"
},
{
"name": "Swift",
"bytes": "1152"
},
{
"name": "Vala",
"bytes": "10025"
},
{
"name": "Verilog",
"bytes": "709"
},
{
"name": "Vim script",
"bytes": "9919"
},
{
"name": "Yacc",
"bytes": "50"
}
],
"symlink_target": ""
}
|
import re
import warnings
from itertools import chain
from django.core.exceptions import FieldError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import OrderBy, Random, RawSQL, Ref
from django.db.models.query_utils import QueryWrapper, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.datastructures import EmptyResultSet
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.db.utils import DatabaseError
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.six.moves import zip
class SQLCompiler(object):
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self.ordering_parts = re.compile(r'(.*)\s(ASC|DESC)(.*)')
self.subquery = False
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.tables):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
extra_select = self.get_extra_select(order_by, self.select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Returns a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
for expr, _, _ in select:
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
if expr.contains_aggregate:
continue
# We can skip References to select clause, as all expressions in
# the select clause are already part of the group by.
if is_ref:
continue
expressions.extend(expr.get_source_expressions())
having = self.query.having.get_group_by_cols()
for expr in having:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having)
for expr in expressions:
sql, params = self.compile(expr)
if (sql, tuple(params)) not in seen:
result.append((sql, params))
seen.add((sql, tuple(params)))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key. Currently only the MySQL form is
# implemented.
# MySQLism: however, columns in HAVING clause must be added to the
# GROUP BY.
if self.connection.features.allows_group_by_pk:
# The logic here is: if the main model's primary key is in the
# query, then set new_expressions to that field. If that happens,
# then also add having expressions to group by.
pk = None
for expr in expressions:
if (expr.output_field.primary_key and
getattr(expr.output_field, 'model') == self.query.model):
pk = expr
if pk:
expressions = [pk] + [expr for expr in expressions if expr in having]
return expressions
def get_select(self):
"""
Returns three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- Which model to instantiate
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
select_list = []
for c in self.get_default_columns():
select_list.append(select_idx)
select.append((c, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
# self.query.select is a special case. These columns never go to
# any model.
for col in self.query.select:
select.append((col, None))
select_idx += 1
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
ret.append((col, self.compile(col, select_format=True), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Returns a list of 2-tuples of form (expr, (sql, params)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
else:
ordering = (self.query.order_by or self.query.get_meta().ordering or [])
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
for pos, field in enumerate(ordering):
if hasattr(field, 'resolve_expression'):
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = True if order == 'DESC' else False
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT clause
order_by.append((
OrderBy(self.query.annotations[col], descending=descending),
False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query._extra or col not in self.query._extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
resolved = expr.resolve_expression(
self.query, allow_joins=True, reuse=None)
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql).group(1)
if (without_ordering, tuple(params)) in seen:
continue
seen.add((without_ordering, tuple(params)))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
select_sql = [t[1] for t in select]
if self.query.distinct and not self.query.distinct_fields:
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql).group(1)
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def __call__(self, name):
"""
Backwards-compatibility shim so that calling a SQLCompiler is equivalent to
calling its quote_name_unless_alias method.
"""
warnings.warn(
"Calling a SQLCompiler directly is deprecated. "
"Call compiler.quote_name_unless_alias instead.",
RemovedInDjango20Warning, stacklevel=2)
return self.quote_name_unless_alias(name)
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
name in self.query.external_aliases and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node, select_format=False):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
else:
sql, params = node.as_sql(self, self.connection)
if select_format and not self.subquery:
return node.output_field.select_format(self, sql, params)
return sql, params
def as_sql(self, with_limits=True, with_col_aliases=False, subquery=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
# After executing the query, we must get rid of any joins the query
# setup created. So, take note of alias counts before the query ran.
# However we do not want to get rid of stuff done in pre_sql_setup(),
# as the pre_sql_setup will modify query state in a way that forbids
# another run of it.
self.subquery = subquery
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
if with_limits and self.query.low_mark == self.query.high_mark:
return '', ()
distinct_fields = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct' -- see
# docstring of get_from_clause() for details.
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.query.where)
having, h_params = self.compile(self.query.having)
params = []
result = ['SELECT']
if self.query.distinct:
result.append(self.connection.ops.distinct_sql(distinct_fields))
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result.append(', '.join(out_cols))
result.append('FROM')
result.extend(from_)
params.extend(f_params)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError(
"annotate() + distinct(fields) is not implemented.")
if not order_by:
order_by = self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limits:
if self.query.high_mark is not None:
result.append('LIMIT %d' % (self.query.high_mark - self.query.low_mark))
if self.query.low_mark:
if self.query.high_mark is None:
val = self.connection.ops.no_limit_value()
if val:
result.append('LIMIT %d' % val)
result.append('OFFSET %d' % self.query.low_mark)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError(
"select_for_update cannot be used outside of a transaction."
)
# If we've been asked for a NOWAIT query but the backend does
# not support it, raise a DatabaseError otherwise we could get
# an unexpected deadlock.
nowait = self.query.select_for_update_nowait
if nowait and not self.connection.features.has_select_for_update_nowait:
raise DatabaseError('NOWAIT is not supported on this database backend.')
result.append(self.connection.ops.for_update_sql(nowait=nowait))
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def as_nested_sql(self):
"""
Perform the same functionality as the as_sql() method, returning an
SQL string and parameters. However, the alias prefixes are bumped
beforehand (in a copy -- the current query isn't changed), and any
ordering is removed if the query is unsliced.
Used when nesting this query inside another.
"""
obj = self.query.clone()
if obj.low_mark == 0 and obj.high_mark is None and not self.query.distinct_fields:
# If there is no slicing in use, then we can safely drop all ordering
obj.clear_ordering(True)
return obj.get_compiler(connection=self.connection).as_sql(subquery=True)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
if not start_alias:
start_alias = self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _ = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
result.append("%s.%s" % (qn(alias), qn2(target.column)))
return result
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = True if order == 'DESC' else False
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless the attribute name
# of the field is specified.
if field.rel and path and opts.ordering and name != field.attname:
# Firstly, avoid infinite loops.
if not already_seen:
already_seen = set()
join_tuple = tuple(self.query.alias_map[j].table_name for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(t.get_col(alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_order_by and get_distinct.
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
field, targets, opts, joins, path = self.query.setup_joins(
pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
"""
result = []
params = []
for alias in self.query.tables:
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices)
related_klass_infos = []
if not restricted and self.query.max_depth and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
if isinstance(self.query.select_related, dict):
requested = self.query.select_related
restricted = True
else:
restricted = False
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or (cur_depth == 1 and f.name in requested):
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.rel.to,
'field': f,
'reverse': False,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.rel.to._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.rel.to._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
_, _, _, joins, _ = self.query.setup_joins([related_field_name], opts, root_alias)
alias = joins[-1]
from_parent = issubclass(model, opts.model)
klass_info = {
'model': model,
'field': f,
'reverse': True,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested.keys()).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, row, converters):
row = list(row)
for pos, (convs, expression) in converters.items():
value = row[pos]
for converter in convs:
value = converter(value, expression, self.connection, self.query.context)
row[pos] = value
return tuple(row)
def results_iter(self, results=None):
"""
Returns an iterator over the results from executing this query.
"""
converters = None
if results is None:
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
for rows in results:
for row in rows:
if converters:
row = self.apply_converters(row, converters)
yield row
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI):
"""
Run the query against the database and returns the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
if not result_type:
result_type = NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
cursor.close()
raise
if result_type == CURSOR:
# Caller didn't specify a result_type, so just give them back the
# cursor to process (and close).
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count
)
if not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
if len(columns) == 1:
sql, params = self.as_sql()
return '%s.%s IN (%s)' % (qn(alias), qn2(columns[0]), sql), params
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
QueryWrapper('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
class SQLInsertCompiler(SQLCompiler):
def __init__(self, *args, **kwargs):
self.return_id = False
super(SQLInsertCompiler, self).__init__(*args, **kwargs)
def placeholder(self, field, val):
if field is None:
# A field value of None means the value is raw.
return val
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
return field.get_placeholder(val, self, self.connection)
else:
# Return the common case for the placeholder
return '%s'
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
result = ['INSERT INTO %s' % qn(opts.db_table)]
has_fields = bool(self.query.fields)
fields = self.query.fields if has_fields else [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if has_fields:
params = values = [
[
f.get_db_prep_save(
getattr(obj, f.attname) if self.query.raw else f.pre_save(obj, True),
connection=self.connection
) for f in fields
]
for obj in self.query.objs
]
else:
values = [[self.connection.ops.pk_default_value()] for obj in self.query.objs]
params = [[]]
fields = [None]
can_bulk = (not any(hasattr(field, "get_placeholder") for field in fields) and
not self.return_id and self.connection.features.has_bulk_insert)
if can_bulk:
placeholders = [["%s"] * len(fields)]
else:
placeholders = [
[self.placeholder(field, v) for field, v in zip(fields, val)]
for val in values
]
# Oracle Spatial needs to remove some values due to #10888
params = self.connection.ops.modify_insert_params(placeholders, params)
if self.return_id and self.connection.features.can_return_id_from_insert:
params = params[0]
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
result.append("VALUES (%s)" % ", ".join(placeholders[0]))
r_fmt, r_params = self.connection.ops.return_insert_id()
# Skip empty r_fmt to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
if r_fmt:
result.append(r_fmt % col)
params += r_params
return [(" ".join(result), tuple(params))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, len(values)))
return [(" ".join(result), tuple(v for val in values for v in val))]
else:
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholders, params)
]
def execute_sql(self, return_id=False):
assert not (return_id and len(self.query.objs) != 1)
self.return_id = return_id
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not (return_id and cursor):
return
if self.connection.features.can_return_id_from_insert:
return self.connection.ops.fetch_returned_insert_id(cursor)
return self.connection.ops.last_insert_id(cursor,
self.query.get_meta().db_table, self.query.get_meta().pk.column)
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.query.tables) == 1, \
"Can only delete from one table at a time."
qn = self.quote_name_unless_alias
result = ['DELETE FROM %s' % qn(self.query.tables[0])]
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
table = self.query.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError("Aggregate functions are not allowed in this query")
elif hasattr(val, 'prepare_database_save'):
if field.rel:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError("Database is trying to update a relational field "
"of type %s with a value of type %s. Make sure "
"you are setting the correct relations" %
(field.__class__.__name__, val.__class__.__name__))
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
if not values:
return '', ()
result.append(', '.join(values))
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.clone(klass=Query)
query.select_related = False
query.clear_ordering(True)
query._extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super(SQLUpdateCompiler, self).pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
# Empty SQL for the inner query is a marker that the inner query
# isn't going to produce any results. This can happen when doing
# LIMIT 0 queries (generated by qs[:0]) for example.
if not self.query.subquery:
raise EmptyResultSet
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation, select_format=True)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count):
"""
Yields blocks of rows from a cursor and ensures the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)),
sentinel):
yield [r[0:col_count] for r in rows]
finally:
cursor.close()
|
{
"content_hash": "46b918c6e65c26e333bed9b955052495",
"timestamp": "",
"source": "github",
"line_count": 1152,
"max_line_length": 98,
"avg_line_length": 43.322048611111114,
"alnum_prop": 0.5623660007614162,
"repo_name": "ecrespo/django_kanban-agile",
"id": "810031f47ef59001d7fcbf500a49215568347d9f",
"size": "49907",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "kanban/lib/python2.7/site-packages/django/db/models/sql/compiler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "260210"
},
{
"name": "Groff",
"bytes": "28"
},
{
"name": "HTML",
"bytes": "136316"
},
{
"name": "JavaScript",
"bytes": "456674"
},
{
"name": "Python",
"bytes": "5905331"
},
{
"name": "Shell",
"bytes": "3811"
}
],
"symlink_target": ""
}
|
"""
A parser for Purple programs.
The result is a parse object that can return a (recursive) relational algebra
expression.
"""
from pyparsing import Literal, CaselessLiteral, Word, delimitedList, \
Optional, Combine, Group, alphas, nums, alphanums, oneOf, quotedString, \
ZeroOrMore, restOfLine
import raco
from raco import expression
import raco.datalog.model as model
def show(x):
print x
return x
drop = lambda x: Literal(x).suppress()
# define Datalog tokens
ident = Word(alphas, alphanums + "_$")
predicate = ident.setName("Predicate")
E = CaselessLiteral("E")
# Get all the aggregate expression classes
aggregate_functions = raco.expression.aggregate_functions()
# All binary operators
binopclasses = expression.binary_ops()
# a list of string literals representing opcodes
opcodes = sum([oc.literals for oc in binopclasses], [])
binopstr = " ".join(opcodes)
def parsebinop(opexpr):
"parse action for binary operators"
left, opstr, right = opexpr
for opclass in binopclasses:
if opstr in opclass.literals:
return opclass(left, right)
binop = oneOf(binopstr)
arithSign = Word("+-", exact=1)
realNum = Combine(Optional(arithSign) +
(Word(nums) + "." + Optional(Word(nums)) | ("." + Word(nums))) # noqa
+ Optional(E + Optional(arithSign) + Word(nums)))
realNum.setParseAction(lambda x: expression.NumericLiteral(float(x[0])))
intNum = Combine(Optional(arithSign) + Word(nums) +
Optional(E + Optional("+") + Word(nums)))
intNum.setParseAction(lambda x: expression.NumericLiteral(int(x[0])))
number = realNum | intNum
variable = ident.copy()
variable.setParseAction(lambda x: model.Var(x[0]))
quotedString.setParseAction(lambda x: expression.StringLiteral(x[0][1:-1]))
literal = quotedString | number
valueref = variable | literal
def mkterm(x):
return model.Term(x)
term = (predicate
+ drop("(")
+ Group(delimitedList(valueref, ","))
+ drop(")")).setParseAction(mkterm)
def checkval(xs):
left, op, right = xs[0]
if op == '=':
result = left == right
else:
result = eval(left + op + right)
return result
groundcondition = Group(literal + binop + literal)
# groundcondition.setParseAction(checkval)
# TODO: deeper expression trees
condition = (valueref + binop + valueref)
condition.setParseAction(parsebinop)
body = delimitedList(term | groundcondition | condition, ",")
# .setParseAction(show) # lambda xs: [Term(x) for x in xs])
partitioner = drop("h(") + delimitedList(variable, ",") + drop(")")
partitioner.setParseAction(lambda x: model.PartitionBy(x))
allservers = Literal("*").setParseAction(lambda x: model.Broadcast())
server = drop("@") + (partitioner | allservers)
timeexpr = variable + oneOf("+ -") + Word(nums)
timeexpr.setParseAction(lambda xs: "".join([str(x) for x in xs]))
timestep = drop("#") + (intNum | timeexpr | variable)
timestep.setParseAction(lambda x: model.Timestep(x[0]))
# expressions without aggregates
# TODO more complete
simpleArithExpression = (valueref + binop + valueref)
simpleArithExpression.setParseAction(parsebinop)
def mkagg(x):
opstr, arg = x
for aggclass in aggregate_functions:
if opstr.lower() == aggclass.__name__.lower():
return aggclass(arg)
raise "Aggregate Function %s not found among %s" % (opstr, aggregate_functions) # noqa
aggregate = (Word(alphas) + drop("(") + variable + drop(")")) | \
(Word(alphas) + drop("(") + simpleArithExpression + drop(")"))
aggregate.setParseAction(mkagg)
# expressions containing aggregates
# TODO deeper instead of enumeration
arithExpression = (aggregate + binop + aggregate) | \
(valueref + binop + valueref) | \
(aggregate + binop + valueref) | \
(valueref + binop + aggregate)
arithExpression.setParseAction(parsebinop)
# greedy parsing so put arithExpression first
headvalueref = arithExpression | aggregate | variable | literal
headterm = (predicate + Optional(server)
+ drop("(") + Group(delimitedList(headvalueref, ",")) + drop(")"))
def mkIDB(x):
if len(x) == 4:
idb = model.IDB(mkterm((x[0], x[2])), x[1], x[3])
elif len(x) == 3:
if isinstance(x[2], model.Timestep):
idb = model.IDB(mkterm((x[0], x[1])), timestep=x[2])
else:
idb = model.IDB(mkterm((x[0], x[2])), x[1])
else:
idb = model.IDB(mkterm(x))
return idb
head = (headterm + Optional(timestep) + drop(":-")).setParseAction(mkIDB)
# head.setParseAction(show)
def mkrule(x):
"""Workaround for AttributeError: Class Rule has no __call__ method when
running through wsgi"""
return model.Rule(x)
rule = (head + Group(body)
+ Optional(drop(";")) + Optional(drop(".")))
rule.setParseAction(mkrule)
def mkprogram(x):
"""Workaround for AttributeError: Class Rule has no __call__ method when
running through wsgi"""
return model.Program(x)
comment = (Literal("#") + restOfLine).suppress()
program = ZeroOrMore(rule | comment).setParseAction(mkprogram)
def parse(query):
return program.parseString(query)[0]
|
{
"content_hash": "31cd4420fc380f9b1f05878a1cc8e3d1",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 91,
"avg_line_length": 28.65193370165746,
"alnum_prop": 0.6629386810644041,
"repo_name": "uwescience/raco",
"id": "06f95a123cdd144e2b91f41f277f8a82d08a2956",
"size": "5186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "raco/datalog/grammar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1777"
},
{
"name": "C++",
"bytes": "81472"
},
{
"name": "Makefile",
"bytes": "1063"
},
{
"name": "Python",
"bytes": "1035525"
},
{
"name": "Ruby",
"bytes": "3706"
},
{
"name": "Shell",
"bytes": "282"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
from ansible.errors import AnsibleError
__metaclass__ = type
try:
from ansible.plugins.terminal import NetconfBase
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/iosxr.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/junos.py
"""
except ImportError:
raise AnsibleError("Netconf Plugin [ ansible-role-scratch-mount ]: Dependency not satisfied")
class Netconf(NetconfBase):
"""
Examples:
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/iosxr.py
https://github.com/ansible/ansible/blob/devel/lib/ansible/plugins/netconf/junos.py
"""
raise AnsibleError("Netconf Plugin [ ansible-role-scratch-mount ]: Not implemented")
|
{
"content_hash": "3963da0d6e493841cf95e6d4983fae12",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 97,
"avg_line_length": 39.59090909090909,
"alnum_prop": 0.7060849598163031,
"repo_name": "mantti/fgci-tut",
"id": "2ef95cd392dd88badf487599aaeba4de91953636",
"size": "1553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roles/ansible-role-scratch-mount/netconf_plugins/example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "3776"
},
{
"name": "HTML",
"bytes": "2119"
},
{
"name": "Python",
"bytes": "2605"
},
{
"name": "Shell",
"bytes": "18925"
}
],
"symlink_target": ""
}
|
"""Tests for ops which manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np # pylint: disable=unused-import
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import list_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.platform import test
from tensorflow.python.training import server_lib
def scalar_shape():
return ops.convert_to_tensor([], dtype=dtypes.int32)
@test_util.with_c_shapes
class ListOpsTest(test_util.TensorFlowTestCase):
@test_util.run_in_graph_and_eager_modes
def testPushPop(self):
l = list_ops.empty_tensor_list(element_dtype=dtypes.float32,
element_shape=scalar_shape())
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
@test_util.run_in_graph_and_eager_modes
def testPushPopGPU(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self.testPushPop()
@test_util.run_in_graph_and_eager_modes
def testStack(self):
l = list_ops.empty_tensor_list(element_dtype=dtypes.float32,
element_shape=scalar_shape())
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [1.0, 2.0])
@test_util.run_in_graph_and_eager_modes
def testGatherGrad(self):
with backprop.GradientTape() as tape:
l = list_ops.empty_tensor_list(element_dtype=dtypes.float32,
element_shape=scalar_shape())
c0 = constant_op.constant(1.0)
tape.watch(c0)
l = list_ops.tensor_list_push_back(l, c0)
l = list_ops.tensor_list_push_back(l, constant_op.constant(2.0))
t = list_ops.tensor_list_gather(l, [1, 0], element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [2.0, 1.0])
s = (t[0] + t[1]) * (t[0] + t[1])
dt = tape.gradient(s, c0)
self.assertAllEqual(self.evaluate(dt), 6.0)
@test_util.run_in_graph_and_eager_modes
def testScatterGrad(self):
with backprop.GradientTape() as tape:
c0 = constant_op.constant([1.0, 2.0])
tape.watch(c0)
l = list_ops.tensor_list_scatter(
c0, [1, 0], ops.convert_to_tensor([], dtype=dtypes.int32))
t0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
t1 = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t0), 2.0)
self.assertAllEqual(self.evaluate(t1), 1.0)
loss = t0 * t0 + t1 * t1
dt = tape.gradient(loss, c0)
self.assertAllEqual(self.evaluate(dt), [2., 4.])
@test_util.run_in_graph_and_eager_modes
def testStackGPU(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self.testStack()
@test_util.run_in_graph_and_eager_modes
def testTensorListFromTensor(self):
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=scalar_shape())
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 2.0)
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
self.assertAllEqual(self.evaluate(list_ops.tensor_list_length(l)), 0)
@test_util.run_in_graph_and_eager_modes
def testFromTensorGPU(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self.testTensorListFromTensor()
@test_util.run_in_graph_and_eager_modes
def testGetSetItem(self):
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=scalar_shape())
e0 = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e0), 1.0)
l = list_ops.tensor_list_set_item(l, 0, 3.0)
t = list_ops.tensor_list_stack(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(t), [3.0, 2.0])
@test_util.run_in_graph_and_eager_modes
def testGetSetGPU(self):
if not context.num_gpus():
return
with context.device("gpu:0"):
self.testGetSetItem()
@test_util.run_in_graph_and_eager_modes
def testUnknownShape(self):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.float32, element_shape=-1)
l = list_ops.tensor_list_push_back(l, constant_op.constant(1.0))
l = list_ops.tensor_list_push_back(l, constant_op.constant([1.0, 2.0]))
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), [1.0, 2.0])
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
self.assertAllEqual(self.evaluate(e), 1.0)
@test_util.run_in_graph_and_eager_modes
def testCPUGPUCopy(self):
if not context.num_gpus():
return
t = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(t, element_shape=scalar_shape())
with context.device("gpu:0"):
l_gpu = array_ops.identity(l)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
l_gpu, element_dtype=dtypes.float32)[1]), 2.0)
l_cpu = array_ops.identity(l_gpu)
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_pop_back(
l_cpu, element_dtype=dtypes.float32)[1]), 2.0)
def testGraphStack(self):
with self.cached_session():
tl = list_ops.empty_tensor_list(
element_shape=constant_op.constant([1], dtype=dtypes.int32),
element_dtype=dtypes.int32)
tl = list_ops.tensor_list_push_back(tl, [1])
self.assertAllEqual(
self.evaluate(
list_ops.tensor_list_stack(tl, element_dtype=dtypes.int32)),
[[1]])
def testGraphStackInLoop(self):
with self.cached_session():
t1 = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.int32)
i = constant_op.constant(0, dtype=dtypes.int32)
def body(i, t1):
t1 = list_ops.tensor_list_push_back(t1, i)
i += 1
return i, t1
i, t1 = control_flow_ops.while_loop(lambda i, t1: math_ops.less(i, 4),
body, [i, t1])
s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.int32)
self.assertAllEqual(self.evaluate(s1), [0, 1, 2, 3])
def testGraphStackSwitchDtype(self):
with self.cached_session():
list_ = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.int32)
m = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
def body(list_, m):
list_ = control_flow_ops.cond(
math_ops.equal(list_ops.tensor_list_length(list_), 0),
lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: list_)
list_ = list_ops.tensor_list_push_back(list_, m)
return list_, m
for _ in range(2):
list_, m = body(list_, m)
s1 = list_ops.tensor_list_stack(list_, element_dtype=dtypes.float32)
np_s1 = np.array([[1, 2, 3], [1, 2, 3]], dtype=np.float32)
self.assertAllEqual(self.evaluate(s1), np_s1)
def testGraphStackInLoopSwitchDtype(self):
with self.cached_session():
t1 = list_ops.empty_tensor_list(
element_shape=constant_op.constant([], dtype=dtypes.int32),
element_dtype=dtypes.int32)
i = constant_op.constant(0, dtype=dtypes.float32)
m = constant_op.constant([1, 2, 3], dtype=dtypes.float32)
def body(i, m, t1):
t1 = control_flow_ops.cond(
math_ops.equal(list_ops.tensor_list_length(t1), 0),
lambda: list_ops.empty_tensor_list(m.shape, m.dtype), lambda: t1)
t1 = list_ops.tensor_list_push_back(t1, m * i)
i += 1.0
return i, m, t1
i, m, t1 = control_flow_ops.while_loop(
lambda i, m, t1: math_ops.less(i, 4), body, [i, m, t1])
s1 = list_ops.tensor_list_stack(t1, element_dtype=dtypes.float32)
np_s1 = np.vstack([np.arange(1, 4) * i for i in range(4)])
self.assertAllEqual(self.evaluate(s1), np_s1)
@test_util.run_in_graph_and_eager_modes
def testSerialize(self):
# pylint: disable=g-import-not-at-top
try:
import portpicker
except ImportError:
return
with context.graph_mode():
worker_port = portpicker.pick_unused_port()
ps_port = portpicker.pick_unused_port()
cluster_dict = {
"worker": ["localhost:%s" % worker_port],
"ps": ["localhost:%s" % ps_port]
}
cs = server_lib.ClusterSpec(cluster_dict)
worker = server_lib.Server(
cs, job_name="worker", protocol="grpc", task_index=0, start=True)
unused_ps = server_lib.Server(
cs, job_name="ps", protocol="grpc", task_index=0, start=True)
with ops.Graph().as_default(), session.Session(target=worker.target):
with ops.device("/job:worker"):
t = constant_op.constant([[1.0], [2.0]])
l = list_ops.tensor_list_from_tensor(t, element_shape=[1])
with ops.device("/job:ps"):
l_ps = array_ops.identity(l)
l_ps, e = list_ops.tensor_list_pop_back(
l_ps, element_dtype=dtypes.float32)
with ops.device("/job:worker"):
worker_e = array_ops.identity(e)
self.assertAllEqual(self.evaluate(worker_e), [2.0])
@test_util.run_in_graph_and_eager_modes
def testPushPopGradients(self):
with backprop.GradientTape() as tape:
l = list_ops.empty_tensor_list(element_dtype=dtypes.float32,
element_shape=scalar_shape())
c = constant_op.constant(1.0)
tape.watch(c)
l = list_ops.tensor_list_push_back(l, c)
l, e = list_ops.tensor_list_pop_back(l, element_dtype=dtypes.float32)
e = 2 * e
self.assertAllEqual(self.evaluate(tape.gradient(e, [c])[0]), 2.0)
@test_util.run_in_graph_and_eager_modes
def testStackFromTensorGradients(self):
with backprop.GradientTape() as tape:
c = constant_op.constant([1.0, 2.0])
tape.watch(c)
l = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
c2 = list_ops.tensor_list_stack(
l, element_dtype=dtypes.float32, num_elements=2)
result = c2 * 2.0
grad = tape.gradient(result, [c])[0]
self.assertAllEqual(self.evaluate(grad), [2.0, 2.0])
@test_util.run_in_graph_and_eager_modes
def testGetSetGradients(self):
with backprop.GradientTape() as tape:
c = constant_op.constant([1.0, 2.0])
tape.watch(c)
l = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
c2 = constant_op.constant(3.0)
tape.watch(c2)
l = list_ops.tensor_list_set_item(l, 0, c2)
e = list_ops.tensor_list_get_item(l, 0, element_dtype=dtypes.float32)
ee = list_ops.tensor_list_get_item(l, 1, element_dtype=dtypes.float32)
y = e * e + ee * ee
grad_c, grad_c2 = tape.gradient(y, [c, c2])
self.assertAllEqual(self.evaluate(grad_c), [0.0, 4.0])
self.assertAllEqual(self.evaluate(grad_c2), 6.0)
@test_util.run_in_graph_and_eager_modes
def testSetOutOfBounds(self):
c = constant_op.constant([1.0, 2.0])
l = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(list_ops.tensor_list_set_item(l, 20, 3.0))
@test_util.run_in_graph_and_eager_modes
def testResourceVariableScatterGather(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
v = vs.get_variable("var", initializer=[l] * 10, use_resource=True)
v_r_0_stacked = list_ops.tensor_list_stack(v[0], dtypes.float32)
self.evaluate(v.initializer)
self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_0_stacked))
v_r_sparse_stacked = list_ops.tensor_list_stack(
v.sparse_read(0), dtypes.float32)
self.assertAllEqual([1.0, 2.0], self.evaluate(v_r_sparse_stacked))
l_new_0 = list_ops.tensor_list_from_tensor(
[3.0, 4.0], element_shape=scalar_shape())
l_new_1 = list_ops.tensor_list_from_tensor(
[5.0, 6.0], element_shape=scalar_shape())
updated_v = state_ops.scatter_update(v, [3, 5], [l_new_0, l_new_1])
updated_v_elems = array_ops.unstack(updated_v)
updated_v_stacked = [
list_ops.tensor_list_stack(el, dtypes.float32) for el in updated_v_elems
]
expected = ([[1.0, 2.0]] * 3 + [[3.0, 4.0], [1.0, 2.0], [5.0, 6.0]] +
[[1.0, 2.0]] * 4)
self.assertAllEqual(self.evaluate(updated_v_stacked), expected)
@test_util.run_in_graph_and_eager_modes
def testConcat(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l0 = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=scalar_shape())
l_batch_0 = array_ops.stack([l0, l1])
l_batch_1 = array_ops.stack([l1, l0])
l_concat_01 = list_ops.tensor_list_concat_lists(
l_batch_0, l_batch_1, element_dtype=dtypes.float32)
l_concat_10 = list_ops.tensor_list_concat_lists(
l_batch_1, l_batch_0, element_dtype=dtypes.float32)
l_concat_00 = list_ops.tensor_list_concat_lists(
l_batch_0, l_batch_0, element_dtype=dtypes.float32)
l_concat_11 = list_ops.tensor_list_concat_lists(
l_batch_1, l_batch_1, element_dtype=dtypes.float32)
expected_00 = [[1.0, 2.0, 1.0, 2.0], [-1.0, -1.0]]
expected_01 = [[1.0, 2.0, -1.0], [-1.0, 1.0, 2.0]]
expected_10 = [[-1.0, 1.0, 2.0], [1.0, 2.0, -1.0]]
expected_11 = [[-1.0, -1.0], [1.0, 2.0, 1.0, 2.0]]
for i, (concat, expected) in enumerate(zip(
[l_concat_00, l_concat_01, l_concat_10, l_concat_11],
[expected_00, expected_01, expected_10, expected_11])):
splitted = array_ops.unstack(concat)
splitted_stacked_ret = self.evaluate(
(list_ops.tensor_list_stack(splitted[0], dtypes.float32),
list_ops.tensor_list_stack(splitted[1], dtypes.float32)))
print("Test concat %d: %s, %s, %s, %s"
% (i, expected[0], splitted_stacked_ret[0],
expected[1], splitted_stacked_ret[1]))
self.assertAllClose(expected[0], splitted_stacked_ret[0])
self.assertAllClose(expected[1], splitted_stacked_ret[1])
# Concatenating mismatched shapes fails.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
self.evaluate(
list_ops.tensor_list_concat_lists(
l_batch_0,
list_ops.empty_tensor_list(scalar_shape(), dtypes.float32),
element_dtype=dtypes.float32))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"element shapes are not identical at index 0"):
l_batch_of_vec_tls = array_ops.stack(
[list_ops.tensor_list_from_tensor([[1.0]], element_shape=[1])] * 2)
self.evaluate(
list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_vec_tls,
element_dtype=dtypes.float32))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
r"input_b\[0\].dtype != element_dtype."):
l_batch_of_int_tls = array_ops.stack(
[list_ops.tensor_list_from_tensor([1], element_shape=scalar_shape())]
* 2)
self.evaluate(
list_ops.tensor_list_concat_lists(l_batch_0, l_batch_of_int_tls,
element_dtype=dtypes.float32))
@test_util.run_in_graph_and_eager_modes
def testPushBackBatch(self):
c = constant_op.constant([1.0, 2.0], dtype=dtypes.float32)
l0 = list_ops.tensor_list_from_tensor(c, element_shape=scalar_shape())
l1 = list_ops.tensor_list_from_tensor([-1.0], element_shape=scalar_shape())
l_batch = array_ops.stack([l0, l1])
l_push = list_ops.tensor_list_push_back_batch(l_batch, [3.0, 4.0])
l_unstack = array_ops.unstack(l_push)
l0_ret = list_ops.tensor_list_stack(l_unstack[0], dtypes.float32)
l1_ret = list_ops.tensor_list_stack(l_unstack[1], dtypes.float32)
self.assertAllClose([1.0, 2.0, 3.0], self.evaluate(l0_ret))
self.assertAllClose([-1.0, 4.0], self.evaluate(l1_ret))
with ops.control_dependencies([l_push]):
l_unstack_orig = array_ops.unstack(l_batch)
l0_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[0],
dtypes.float32)
l1_orig_ret = list_ops.tensor_list_stack(l_unstack_orig[1],
dtypes.float32)
# Check that without aliasing, push_back_batch still works; and
# that it doesn't modify the input.
l0_r_v, l1_r_v, l0_orig_v, l1_orig_v = self.evaluate(
(l0_ret, l1_ret, l0_orig_ret, l1_orig_ret))
self.assertAllClose([1.0, 2.0, 3.0], l0_r_v)
self.assertAllClose([-1.0, 4.0], l1_r_v)
self.assertAllClose([1.0, 2.0], l0_orig_v)
self.assertAllClose([-1.0], l1_orig_v)
# Pushing back mismatched shapes fails.
with self.assertRaises((errors.InvalidArgumentError, ValueError)):
self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, []))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"incompatible shape to a list at index 0"):
self.evaluate(
list_ops.tensor_list_push_back_batch(l_batch, [[3.0], [4.0]]))
with self.assertRaisesRegexp(errors.InvalidArgumentError,
"Invalid data type at index 0"):
self.evaluate(list_ops.tensor_list_push_back_batch(l_batch, [3, 4]))
@test_util.run_in_graph_and_eager_modes
def testZerosLike(self):
for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16,
dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128,
dtypes.bool):
l_empty = list_ops.empty_tensor_list(
element_dtype=dtype, element_shape=scalar_shape())
l_empty_zeros = array_ops.zeros_like(l_empty)
t_empty_zeros = list_ops.tensor_list_stack(
l_empty_zeros, element_dtype=dtype)
l_full = list_ops.tensor_list_push_back(l_empty,
math_ops.cast(0, dtype=dtype))
l_full = list_ops.tensor_list_push_back(l_full,
math_ops.cast(1, dtype=dtype))
l_full_zeros = array_ops.zeros_like(l_full)
t_full_zeros = list_ops.tensor_list_stack(
l_full_zeros, element_dtype=dtype)
self.assertAllEqual(self.evaluate(t_empty_zeros), [])
self.assertAllEqual(
self.evaluate(t_full_zeros), np.zeros(
(2,), dtype=dtype.as_numpy_dtype))
@test_util.run_in_graph_and_eager_modes
def testZerosLikeVariant(self):
for dtype in (dtypes.uint8, dtypes.uint16, dtypes.int8, dtypes.int16,
dtypes.int32, dtypes.int64, dtypes.float16, dtypes.float32,
dtypes.float64, dtypes.complex64, dtypes.complex128,
dtypes.bool):
l = list_ops.empty_tensor_list(
element_dtype=dtypes.variant, element_shape=scalar_shape())
sub_l = list_ops.empty_tensor_list(
element_dtype=dtype, element_shape=scalar_shape())
l = list_ops.tensor_list_push_back(l, sub_l)
sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast(
1, dtype=dtype))
l = list_ops.tensor_list_push_back(l, sub_l)
sub_l = list_ops.tensor_list_push_back(sub_l, math_ops.cast(
2, dtype=dtype))
l = list_ops.tensor_list_push_back(l, sub_l)
# l : [[],
# [1],
# [1, 2]]
#
# l_zeros : [[],
# [0],
# [0, 0]]
l_zeros = array_ops.zeros_like(l)
outputs = []
for _ in range(3):
l_zeros, out = list_ops.tensor_list_pop_back(
l_zeros, element_dtype=dtypes.variant)
outputs.append(list_ops.tensor_list_stack(out, element_dtype=dtype))
# Note: `outputs` contains popped values so the order is reversed.
self.assertAllEqual(self.evaluate(outputs[2]), [])
self.assertAllEqual(
self.evaluate(outputs[1]), np.zeros((1,), dtype=dtype.as_numpy_dtype))
self.assertAllEqual(
self.evaluate(outputs[0]), np.zeros((2,), dtype=dtype.as_numpy_dtype))
if __name__ == "__main__":
test.main()
|
{
"content_hash": "cdc261493626e0be99c10072075b0d18",
"timestamp": "",
"source": "github",
"line_count": 504,
"max_line_length": 80,
"avg_line_length": 42.6031746031746,
"alnum_prop": 0.6276080476900149,
"repo_name": "AnishShah/tensorflow",
"id": "0f5607712b7c87e163f7d29c229cdbedc8fa6d0e",
"size": "22161",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/list_ops_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "337393"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "48452986"
},
{
"name": "CMake",
"bytes": "195768"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1210238"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "834103"
},
{
"name": "Jupyter Notebook",
"bytes": "2584246"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52618"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40782103"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "458367"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
"""
Unit tests for the salt.pillar.nacl module
"""
import salt.pillar.nacl
from tests.support.mock import patch
def test_fips_mode():
"""
Nacl pillar doesn't load when fips_mode is True
"""
opts = {"fips_mode": True}
with patch("salt.pillar.nacl.__opts__", opts, create=True):
ret = salt.pillar.nacl.__virtual__()
assert ret == (False, "nacl pillar data not available in FIPS mode")
|
{
"content_hash": "6b6246f6e99012c5561d4892e47c3118",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 76,
"avg_line_length": 26.625,
"alnum_prop": 0.6338028169014085,
"repo_name": "saltstack/salt",
"id": "e71988baf9b31dcc62c03623f14a87a7d489cd0d",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/pytests/unit/pillar/test_nacl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14911"
},
{
"name": "C",
"bytes": "1571"
},
{
"name": "Cython",
"bytes": "1458"
},
{
"name": "Dockerfile",
"bytes": "184"
},
{
"name": "Groovy",
"bytes": "12318"
},
{
"name": "HCL",
"bytes": "257"
},
{
"name": "HTML",
"bytes": "8031"
},
{
"name": "Jinja",
"bytes": "45598"
},
{
"name": "Makefile",
"bytes": "713"
},
{
"name": "NSIS",
"bytes": "76572"
},
{
"name": "PowerShell",
"bytes": "75891"
},
{
"name": "Python",
"bytes": "41444811"
},
{
"name": "Rich Text Format",
"bytes": "6242"
},
{
"name": "Roff",
"bytes": "191"
},
{
"name": "Ruby",
"bytes": "961"
},
{
"name": "SaltStack",
"bytes": "35856"
},
{
"name": "Scheme",
"bytes": "895"
},
{
"name": "Scilab",
"bytes": "1147"
},
{
"name": "Shell",
"bytes": "524917"
}
],
"symlink_target": ""
}
|
"""
jinja2.compiler
~~~~~~~~~~~~~~~
Compiles nodes into python code.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from itertools import chain
from copy import deepcopy
from keyword import iskeyword as is_python_keyword
from jinja2 import nodes
from jinja2.nodes import EvalContext
from jinja2.visitor import NodeVisitor
from jinja2.exceptions import TemplateAssertionError
from jinja2.utils import Markup, concat, escape
from jinja2._compat import range_type, text_type, string_types, \
iteritems, NativeStringIO, imap
operators = {
'eq': '==',
'ne': '!=',
'gt': '>',
'gteq': '>=',
'lt': '<',
'lteq': '<=',
'in': 'in',
'notin': 'not in'
}
# what method to iterate over items do we want to use for dict iteration
# in generated code? on 2.x let's go with iteritems, on 3.x with items
if hasattr(dict, 'iteritems'):
dict_item_iter = 'iteritems'
else:
dict_item_iter = 'items'
# does if 0: dummy(x) get us x into the scope?
def unoptimize_before_dead_code():
x = 42
def f():
if 0: dummy(x)
return f
# The getattr is necessary for pypy which does not set this attribute if
# no closure is on the function
unoptimize_before_dead_code = bool(
getattr(unoptimize_before_dead_code(), '__closure__', None))
def generate(node, environment, name, filename, stream=None,
defer_init=False):
"""Generate the python source for a node tree."""
if not isinstance(node, nodes.Template):
raise TypeError('Can\'t compile non template nodes')
generator = environment.code_generator_class(environment, name, filename,
stream, defer_init)
generator.visit(node)
if stream is None:
return generator.stream.getvalue()
def has_safe_repr(value):
"""Does the node have a safe representation?"""
if value is None or value is NotImplemented or value is Ellipsis:
return True
if isinstance(value, (bool, int, float, complex, range_type,
Markup) + string_types):
return True
if isinstance(value, (tuple, list, set, frozenset)):
for item in value:
if not has_safe_repr(item):
return False
return True
elif isinstance(value, dict):
for key, value in iteritems(value):
if not has_safe_repr(key):
return False
if not has_safe_repr(value):
return False
return True
return False
def find_undeclared(nodes, names):
"""Check if the names passed are accessed undeclared. The return value
is a set of all the undeclared names from the sequence of names found.
"""
visitor = UndeclaredNameVisitor(names)
try:
for node in nodes:
visitor.visit(node)
except VisitorExit:
pass
return visitor.undeclared
class Identifiers(object):
"""Tracks the status of identifiers in frames."""
def __init__(self):
# variables that are known to be declared (probably from outer
# frames or because they are special for the frame)
self.declared = set()
# undeclared variables from outer scopes
self.outer_undeclared = set()
# names that are accessed without being explicitly declared by
# this one or any of the outer scopes. Names can appear both in
# declared and undeclared.
self.undeclared = set()
# names that are declared locally
self.declared_locally = set()
# names that are declared by parameters
self.declared_parameter = set()
def add_special(self, name):
"""Register a special name like `loop`."""
self.undeclared.discard(name)
self.declared.add(name)
def is_declared(self, name):
"""Check if a name is declared in this or an outer scope."""
if name in self.declared_locally or name in self.declared_parameter:
return True
return name in self.declared
def copy(self):
return deepcopy(self)
class Frame(object):
"""Holds compile time information for us."""
def __init__(self, eval_ctx, parent=None):
self.eval_ctx = eval_ctx
self.identifiers = Identifiers()
# a toplevel frame is the root + soft frames such as if conditions.
self.toplevel = False
# the root frame is basically just the outermost frame, so no if
# conditions. This information is used to optimize inheritance
# situations.
self.rootlevel = False
# in some dynamic inheritance situations the compiler needs to add
# write tests around output statements.
self.require_output_check = parent and parent.require_output_check
# inside some tags we are using a buffer rather than yield statements.
# this for example affects {% filter %} or {% macro %}. If a frame
# is buffered this variable points to the name of the list used as
# buffer.
self.buffer = None
# the name of the block we're in, otherwise None.
self.block = parent and parent.block or None
# a set of actually assigned names
self.assigned_names = set()
# the parent of this frame
self.parent = parent
if parent is not None:
self.identifiers.declared.update(
parent.identifiers.declared |
parent.identifiers.declared_parameter |
parent.assigned_names
)
self.identifiers.outer_undeclared.update(
parent.identifiers.undeclared -
self.identifiers.declared
)
self.buffer = parent.buffer
def copy(self):
"""Create a copy of the current one."""
rv = object.__new__(self.__class__)
rv.__dict__.update(self.__dict__)
rv.identifiers = object.__new__(self.identifiers.__class__)
rv.identifiers.__dict__.update(self.identifiers.__dict__)
return rv
def inspect(self, nodes):
"""Walk the node and check for identifiers. If the scope is hard (eg:
enforce on a python level) overrides from outer scopes are tracked
differently.
"""
visitor = FrameIdentifierVisitor(self.identifiers)
for node in nodes:
visitor.visit(node)
def find_shadowed(self, extra=()):
"""Find all the shadowed names. extra is an iterable of variables
that may be defined with `add_special` which may occour scoped.
"""
i = self.identifiers
return (i.declared | i.outer_undeclared) & \
(i.declared_locally | i.declared_parameter) | \
set(x for x in extra if i.is_declared(x))
def inner(self):
"""Return an inner frame."""
return Frame(self.eval_ctx, self)
def soft(self):
"""Return a soft frame. A soft frame may not be modified as
standalone thing as it shares the resources with the frame it
was created of, but it's not a rootlevel frame any longer.
"""
rv = self.copy()
rv.rootlevel = False
return rv
__copy__ = copy
class VisitorExit(RuntimeError):
"""Exception used by the `UndeclaredNameVisitor` to signal a stop."""
class DependencyFinderVisitor(NodeVisitor):
"""A visitor that collects filter and test calls."""
def __init__(self):
self.filters = set()
self.tests = set()
def visit_Filter(self, node):
self.generic_visit(node)
self.filters.add(node.name)
def visit_Test(self, node):
self.generic_visit(node)
self.tests.add(node.name)
def visit_Block(self, node):
"""Stop visiting at blocks."""
class UndeclaredNameVisitor(NodeVisitor):
"""A visitor that checks if a name is accessed without being
declared. This is different from the frame visitor as it will
not stop at closure frames.
"""
def __init__(self, names):
self.names = set(names)
self.undeclared = set()
def visit_Name(self, node):
if node.ctx == 'load' and node.name in self.names:
self.undeclared.add(node.name)
if self.undeclared == self.names:
raise VisitorExit()
else:
self.names.discard(node.name)
def visit_Block(self, node):
"""Stop visiting a blocks."""
class FrameIdentifierVisitor(NodeVisitor):
"""A visitor for `Frame.inspect`."""
def __init__(self, identifiers):
self.identifiers = identifiers
def visit_Name(self, node):
"""All assignments to names go through this function."""
if node.ctx == 'store':
self.identifiers.declared_locally.add(node.name)
elif node.ctx == 'param':
self.identifiers.declared_parameter.add(node.name)
elif node.ctx == 'load' and not \
self.identifiers.is_declared(node.name):
self.identifiers.undeclared.add(node.name)
def visit_If(self, node):
self.visit(node.test)
real_identifiers = self.identifiers
old_names = real_identifiers.declared_locally | \
real_identifiers.declared_parameter
def inner_visit(nodes):
if not nodes:
return set()
self.identifiers = real_identifiers.copy()
for subnode in nodes:
self.visit(subnode)
rv = self.identifiers.declared_locally - old_names
# we have to remember the undeclared variables of this branch
# because we will have to pull them.
real_identifiers.undeclared.update(self.identifiers.undeclared)
self.identifiers = real_identifiers
return rv
body = inner_visit(node.body)
else_ = inner_visit(node.else_ or ())
# the differences between the two branches are also pulled as
# undeclared variables
real_identifiers.undeclared.update(body.symmetric_difference(else_) -
real_identifiers.declared)
# remember those that are declared.
real_identifiers.declared_locally.update(body | else_)
def visit_Macro(self, node):
self.identifiers.declared_locally.add(node.name)
def visit_Import(self, node):
self.generic_visit(node)
self.identifiers.declared_locally.add(node.target)
def visit_FromImport(self, node):
self.generic_visit(node)
for name in node.names:
if isinstance(name, tuple):
self.identifiers.declared_locally.add(name[1])
else:
self.identifiers.declared_locally.add(name)
def visit_Assign(self, node):
"""Visit assignments in the correct order."""
self.visit(node.node)
self.visit(node.target)
def visit_For(self, node):
"""Visiting stops at for blocks. However the block sequence
is visited as part of the outer scope.
"""
self.visit(node.iter)
def visit_CallBlock(self, node):
self.visit(node.call)
def visit_FilterBlock(self, node):
self.visit(node.filter)
def visit_AssignBlock(self, node):
"""Stop visiting at block assigns."""
def visit_Scope(self, node):
"""Stop visiting at scopes."""
def visit_Block(self, node):
"""Stop visiting at blocks."""
class CompilerExit(Exception):
"""Raised if the compiler encountered a situation where it just
doesn't make sense to further process the code. Any block that
raises such an exception is not further processed.
"""
class CodeGenerator(NodeVisitor):
def __init__(self, environment, name, filename, stream=None,
defer_init=False):
if stream is None:
stream = NativeStringIO()
self.environment = environment
self.name = name
self.filename = filename
self.stream = stream
self.created_block_context = False
self.defer_init = defer_init
# aliases for imports
self.import_aliases = {}
# a registry for all blocks. Because blocks are moved out
# into the global python scope they are registered here
self.blocks = {}
# the number of extends statements so far
self.extends_so_far = 0
# some templates have a rootlevel extends. In this case we
# can safely assume that we're a child template and do some
# more optimizations.
self.has_known_extends = False
# the current line number
self.code_lineno = 1
# registry of all filters and tests (global, not block local)
self.tests = {}
self.filters = {}
# the debug information
self.debug_info = []
self._write_debug_info = None
# the number of new lines before the next write()
self._new_lines = 0
# the line number of the last written statement
self._last_line = 0
# true if nothing was written so far.
self._first_write = True
# used by the `temporary_identifier` method to get new
# unique, temporary identifier
self._last_identifier = 0
# the current indentation
self._indentation = 0
# -- Various compilation helpers
def fail(self, msg, lineno):
"""Fail with a :exc:`TemplateAssertionError`."""
raise TemplateAssertionError(msg, lineno, self.name, self.filename)
def temporary_identifier(self):
"""Get a new unique identifier."""
self._last_identifier += 1
return 't_%d' % self._last_identifier
def buffer(self, frame):
"""Enable buffering for the frame from that point onwards."""
frame.buffer = self.temporary_identifier()
self.writeline('%s = []' % frame.buffer)
def return_buffer_contents(self, frame):
"""Return the buffer contents of the frame."""
if frame.eval_ctx.volatile:
self.writeline('if context.eval_ctx.autoescape:')
self.indent()
self.writeline('return Markup(concat(%s))' % frame.buffer)
self.outdent()
self.writeline('else:')
self.indent()
self.writeline('return concat(%s)' % frame.buffer)
self.outdent()
elif frame.eval_ctx.autoescape:
self.writeline('return Markup(concat(%s))' % frame.buffer)
else:
self.writeline('return concat(%s)' % frame.buffer)
def indent(self):
"""Indent by one."""
self._indentation += 1
def outdent(self, step=1):
"""Outdent by step."""
self._indentation -= step
def start_write(self, frame, node=None):
"""Yield or write into the frame buffer."""
if frame.buffer is None:
self.writeline('yield ', node)
else:
self.writeline('%s.append(' % frame.buffer, node)
def end_write(self, frame):
"""End the writing process started by `start_write`."""
if frame.buffer is not None:
self.write(')')
def simple_write(self, s, frame, node=None):
"""Simple shortcut for start_write + write + end_write."""
self.start_write(frame, node)
self.write(s)
self.end_write(frame)
def blockvisit(self, nodes, frame):
"""Visit a list of nodes as block in a frame. If the current frame
is no buffer a dummy ``if 0: yield None`` is written automatically
unless the force_generator parameter is set to False.
"""
if frame.buffer is None:
self.writeline('if 0: yield None')
else:
self.writeline('pass')
try:
for node in nodes:
self.visit(node, frame)
except CompilerExit:
pass
def write(self, x):
"""Write a string into the output stream."""
if self._new_lines:
if not self._first_write:
self.stream.write('\n' * self._new_lines)
self.code_lineno += self._new_lines
if self._write_debug_info is not None:
self.debug_info.append((self._write_debug_info,
self.code_lineno))
self._write_debug_info = None
self._first_write = False
self.stream.write(' ' * self._indentation)
self._new_lines = 0
self.stream.write(x)
def writeline(self, x, node=None, extra=0):
"""Combination of newline and write."""
self.newline(node, extra)
self.write(x)
def newline(self, node=None, extra=0):
"""Add one or more newlines before the next write."""
self._new_lines = max(self._new_lines, 1 + extra)
if node is not None and node.lineno != self._last_line:
self._write_debug_info = node.lineno
self._last_line = node.lineno
def signature(self, node, frame, extra_kwargs=None):
"""Writes a function call to the stream for the current node.
A leading comma is added automatically. The extra keyword
arguments may not include python keywords otherwise a syntax
error could occour. The extra keyword arguments should be given
as python dict.
"""
# if any of the given keyword arguments is a python keyword
# we have to make sure that no invalid call is created.
kwarg_workaround = False
for kwarg in chain((x.key for x in node.kwargs), extra_kwargs or ()):
if is_python_keyword(kwarg):
kwarg_workaround = True
break
for arg in node.args:
self.write(', ')
self.visit(arg, frame)
if not kwarg_workaround:
for kwarg in node.kwargs:
self.write(', ')
self.visit(kwarg, frame)
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write(', %s=%s' % (key, value))
if node.dyn_args:
self.write(', *')
self.visit(node.dyn_args, frame)
if kwarg_workaround:
if node.dyn_kwargs is not None:
self.write(', **dict({')
else:
self.write(', **{')
for kwarg in node.kwargs:
self.write('%r: ' % kwarg.key)
self.visit(kwarg.value, frame)
self.write(', ')
if extra_kwargs is not None:
for key, value in iteritems(extra_kwargs):
self.write('%r: %s, ' % (key, value))
if node.dyn_kwargs is not None:
self.write('}, **')
self.visit(node.dyn_kwargs, frame)
self.write(')')
else:
self.write('}')
elif node.dyn_kwargs is not None:
self.write(', **')
self.visit(node.dyn_kwargs, frame)
def pull_locals(self, frame):
"""Pull all the references identifiers into the local scope."""
for name in frame.identifiers.undeclared:
self.writeline('l_%s = context.resolve(%r)' % (name, name))
def pull_dependencies(self, nodes):
"""Pull all the dependencies."""
visitor = DependencyFinderVisitor()
for node in nodes:
visitor.visit(node)
for dependency in 'filters', 'tests':
mapping = getattr(self, dependency)
for name in getattr(visitor, dependency):
if name not in mapping:
mapping[name] = self.temporary_identifier()
self.writeline('%s = environment.%s[%r]' %
(mapping[name], dependency, name))
def unoptimize_scope(self, frame):
"""Disable Python optimizations for the frame."""
# XXX: this is not that nice but it has no real overhead. It
# mainly works because python finds the locals before dead code
# is removed. If that breaks we have to add a dummy function
# that just accepts the arguments and does nothing.
if frame.identifiers.declared:
self.writeline('%sdummy(%s)' % (
unoptimize_before_dead_code and 'if 0: ' or '',
', '.join('l_' + name for name in frame.identifiers.declared)
))
def push_scope(self, frame, extra_vars=()):
"""This function returns all the shadowed variables in a dict
in the form name: alias and will write the required assignments
into the current scope. No indentation takes place.
This also predefines locally declared variables from the loop
body because under some circumstances it may be the case that
`extra_vars` is passed to `Frame.find_shadowed`.
"""
aliases = {}
for name in frame.find_shadowed(extra_vars):
aliases[name] = ident = self.temporary_identifier()
self.writeline('%s = l_%s' % (ident, name))
to_declare = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_declare.add('l_' + name)
if to_declare:
self.writeline(' = '.join(to_declare) + ' = missing')
return aliases
def pop_scope(self, aliases, frame):
"""Restore all aliases and delete unused variables."""
for name, alias in iteritems(aliases):
self.writeline('l_%s = %s' % (name, alias))
to_delete = set()
for name in frame.identifiers.declared_locally:
if name not in aliases:
to_delete.add('l_' + name)
if to_delete:
# we cannot use the del statement here because enclosed
# scopes can trigger a SyntaxError:
# a = 42; b = lambda: a; del a
self.writeline(' = '.join(to_delete) + ' = missing')
def function_scoping(self, node, frame, children=None,
find_special=True):
"""In Jinja a few statements require the help of anonymous
functions. Those are currently macros and call blocks and in
the future also recursive loops. As there is currently
technical limitation that doesn't allow reading and writing a
variable in a scope where the initial value is coming from an
outer scope, this function tries to fall back with a common
error message. Additionally the frame passed is modified so
that the argumetns are collected and callers are looked up.
This will return the modified frame.
"""
# we have to iterate twice over it, make sure that works
if children is None:
children = node.iter_child_nodes()
children = list(children)
func_frame = frame.inner()
func_frame.inspect(children)
# variables that are undeclared (accessed before declaration) and
# declared locally *and* part of an outside scope raise a template
# assertion error. Reason: we can't generate reasonable code from
# it without aliasing all the variables.
# this could be fixed in Python 3 where we have the nonlocal
# keyword or if we switch to bytecode generation
overridden_closure_vars = (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared &
(func_frame.identifiers.declared_locally |
func_frame.identifiers.declared_parameter)
)
if overridden_closure_vars:
self.fail('It\'s not possible to set and access variables '
'derived from an outer scope! (affects: %s)' %
', '.join(sorted(overridden_closure_vars)), node.lineno)
# remove variables from a closure from the frame's undeclared
# identifiers.
func_frame.identifiers.undeclared -= (
func_frame.identifiers.undeclared &
func_frame.identifiers.declared
)
# no special variables for this scope, abort early
if not find_special:
return func_frame
func_frame.accesses_kwargs = False
func_frame.accesses_varargs = False
func_frame.accesses_caller = False
func_frame.arguments = args = ['l_' + x.name for x in node.args]
undeclared = find_undeclared(children, ('caller', 'kwargs', 'varargs'))
if 'caller' in undeclared:
func_frame.accesses_caller = True
func_frame.identifiers.add_special('caller')
args.append('l_caller')
if 'kwargs' in undeclared:
func_frame.accesses_kwargs = True
func_frame.identifiers.add_special('kwargs')
args.append('l_kwargs')
if 'varargs' in undeclared:
func_frame.accesses_varargs = True
func_frame.identifiers.add_special('varargs')
args.append('l_varargs')
return func_frame
def macro_body(self, node, frame, children=None):
"""Dump the function def of a macro or call block."""
frame = self.function_scoping(node, frame, children)
# macros are delayed, they never require output checks
frame.require_output_check = False
args = frame.arguments
# XXX: this is an ugly fix for the loop nesting bug
# (tests.test_old_bugs.test_loop_call_bug). This works around
# a identifier nesting problem we have in general. It's just more
# likely to happen in loops which is why we work around it. The
# real solution would be "nonlocal" all the identifiers that are
# leaking into a new python frame and might be used both unassigned
# and assigned.
if 'loop' in frame.identifiers.declared:
args = args + ['l_loop=l_loop']
self.writeline('def macro(%s):' % ', '.join(args), node)
self.indent()
self.buffer(frame)
self.pull_locals(frame)
self.blockvisit(node.body, frame)
self.return_buffer_contents(frame)
self.outdent()
return frame
def macro_def(self, node, frame):
"""Dump the macro definition for the def created by macro_body."""
arg_tuple = ', '.join(repr(x.name) for x in node.args)
name = getattr(node, 'name', None)
if len(node.args) == 1:
arg_tuple += ','
self.write('Macro(environment, macro, %r, (%s), (' %
(name, arg_tuple))
for arg in node.defaults:
self.visit(arg, frame)
self.write(', ')
self.write('), %r, %r, %r)' % (
bool(frame.accesses_kwargs),
bool(frame.accesses_varargs),
bool(frame.accesses_caller)
))
def position(self, node):
"""Return a human readable position for the node."""
rv = 'line %d' % node.lineno
if self.name is not None:
rv += ' in ' + repr(self.name)
return rv
# -- Statement Visitors
def visit_Template(self, node, frame=None):
assert frame is None, 'no root frame allowed'
eval_ctx = EvalContext(self.environment, self.name)
from jinja2.runtime import __all__ as exported
self.writeline('from __future__ import division')
self.writeline('from jinja2.runtime import ' + ', '.join(exported))
if not unoptimize_before_dead_code:
self.writeline('dummy = lambda *x: None')
# if we want a deferred initialization we cannot move the
# environment into a local name
envenv = not self.defer_init and ', environment=environment' or ''
# do we have an extends tag at all? If not, we can save some
# overhead by just not processing any inheritance code.
have_extends = node.find(nodes.Extends) is not None
# find all blocks
for block in node.find_all(nodes.Block):
if block.name in self.blocks:
self.fail('block %r defined twice' % block.name, block.lineno)
self.blocks[block.name] = block
# find all imports and import them
for import_ in node.find_all(nodes.ImportedName):
if import_.importname not in self.import_aliases:
imp = import_.importname
self.import_aliases[imp] = alias = self.temporary_identifier()
if '.' in imp:
module, obj = imp.rsplit('.', 1)
self.writeline('from %s import %s as %s' %
(module, obj, alias))
else:
self.writeline('import %s as %s' % (imp, alias))
# add the load name
self.writeline('name = %r' % self.name)
# generate the root render function.
self.writeline('def root(context%s):' % envenv, extra=1)
# process the root
frame = Frame(eval_ctx)
frame.inspect(node.body)
frame.toplevel = frame.rootlevel = True
frame.require_output_check = have_extends and not self.has_known_extends
self.indent()
if have_extends:
self.writeline('parent_template = None')
if 'self' in find_undeclared(node.body, ('self',)):
frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
self.pull_locals(frame)
self.pull_dependencies(node.body)
self.blockvisit(node.body, frame)
self.outdent()
# make sure that the parent root is called.
if have_extends:
if not self.has_known_extends:
self.indent()
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('for event in parent_template.'
'root_render_func(context):')
self.indent()
self.writeline('yield event')
self.outdent(2 + (not self.has_known_extends))
# at this point we now have the blocks collected and can visit them too.
for name, block in iteritems(self.blocks):
block_frame = Frame(eval_ctx)
block_frame.inspect(block.body)
block_frame.block = name
self.writeline('def block_%s(context%s):' % (name, envenv),
block, 1)
self.indent()
undeclared = find_undeclared(block.body, ('self', 'super'))
if 'self' in undeclared:
block_frame.identifiers.add_special('self')
self.writeline('l_self = TemplateReference(context)')
if 'super' in undeclared:
block_frame.identifiers.add_special('super')
self.writeline('l_super = context.super(%r, '
'block_%s)' % (name, name))
self.pull_locals(block_frame)
self.pull_dependencies(block.body)
self.blockvisit(block.body, block_frame)
self.outdent()
self.writeline('blocks = {%s}' % ', '.join('%r: block_%s' % (x, x)
for x in self.blocks),
extra=1)
# add a function that returns the debug info
self.writeline('debug_info = %r' % '&'.join('%s=%s' % x for x
in self.debug_info))
def visit_Block(self, node, frame):
"""Call a block and register it for the template."""
level = 1
if frame.toplevel:
# if we know that we are a child template, there is no need to
# check if we are one
if self.has_known_extends:
return
if self.extends_so_far > 0:
self.writeline('if parent_template is None:')
self.indent()
level += 1
context = node.scoped and 'context.derived(locals())' or 'context'
self.writeline('for event in context.blocks[%r][0](%s):' % (
node.name, context), node)
self.indent()
self.simple_write('event', frame)
self.outdent(level)
def visit_Extends(self, node, frame):
"""Calls the extender."""
if not frame.toplevel:
self.fail('cannot use extend from a non top-level scope',
node.lineno)
# if the number of extends statements in general is zero so
# far, we don't have to add a check if something extended
# the template before this one.
if self.extends_so_far > 0:
# if we have a known extends we just add a template runtime
# error into the generated code. We could catch that at compile
# time too, but i welcome it not to confuse users by throwing the
# same error at different times just "because we can".
if not self.has_known_extends:
self.writeline('if parent_template is not None:')
self.indent()
self.writeline('raise TemplateRuntimeError(%r)' %
'extended multiple times')
# if we have a known extends already we don't need that code here
# as we know that the template execution will end here.
if self.has_known_extends:
raise CompilerExit()
else:
self.outdent()
self.writeline('parent_template = environment.get_template(', node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
self.writeline('for name, parent_block in parent_template.'
'blocks.%s():' % dict_item_iter)
self.indent()
self.writeline('context.blocks.setdefault(name, []).'
'append(parent_block)')
self.outdent()
# if this extends statement was in the root level we can take
# advantage of that information and simplify the generated code
# in the top level from this point onwards
if frame.rootlevel:
self.has_known_extends = True
# and now we have one more
self.extends_so_far += 1
def visit_Include(self, node, frame):
"""Handles includes."""
if node.with_context:
self.unoptimize_scope(frame)
if node.ignore_missing:
self.writeline('try:')
self.indent()
func_name = 'get_or_select_template'
if isinstance(node.template, nodes.Const):
if isinstance(node.template.value, string_types):
func_name = 'get_template'
elif isinstance(node.template.value, (tuple, list)):
func_name = 'select_template'
elif isinstance(node.template, (nodes.Tuple, nodes.List)):
func_name = 'select_template'
self.writeline('template = environment.%s(' % func_name, node)
self.visit(node.template, frame)
self.write(', %r)' % self.name)
if node.ignore_missing:
self.outdent()
self.writeline('except TemplateNotFound:')
self.indent()
self.writeline('pass')
self.outdent()
self.writeline('else:')
self.indent()
if node.with_context:
self.writeline('for event in template.root_render_func('
'template.new_context(context.parent, True, '
'locals())):')
else:
self.writeline('for event in template.module._body_stream:')
self.indent()
self.simple_write('event', frame)
self.outdent()
if node.ignore_missing:
self.outdent()
def visit_Import(self, node, frame):
"""Visit regular imports."""
if node.with_context:
self.unoptimize_scope(frame)
self.writeline('l_%s = ' % node.target, node)
if frame.toplevel:
self.write('context.vars[%r] = ' % node.target)
self.write('environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True, locals())')
else:
self.write('module')
if frame.toplevel and not node.target.startswith('_'):
self.writeline('context.exported_vars.discard(%r)' % node.target)
frame.assigned_names.add(node.target)
def visit_FromImport(self, node, frame):
"""Visit named imports."""
self.newline(node)
self.write('included_template = environment.get_template(')
self.visit(node.template, frame)
self.write(', %r).' % self.name)
if node.with_context:
self.write('make_module(context.parent, True)')
else:
self.write('module')
var_names = []
discarded_names = []
for name in node.names:
if isinstance(name, tuple):
name, alias = name
else:
alias = name
self.writeline('l_%s = getattr(included_template, '
'%r, missing)' % (alias, name))
self.writeline('if l_%s is missing:' % alias)
self.indent()
self.writeline('l_%s = environment.undefined(%r %% '
'included_template.__name__, '
'name=%r)' %
(alias, 'the template %%r (imported on %s) does '
'not export the requested name %s' % (
self.position(node),
repr(name)
), name))
self.outdent()
if frame.toplevel:
var_names.append(alias)
if not alias.startswith('_'):
discarded_names.append(alias)
frame.assigned_names.add(alias)
if var_names:
if len(var_names) == 1:
name = var_names[0]
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({%s})' % ', '.join(
'%r: l_%s' % (name, name) for name in var_names
))
if discarded_names:
if len(discarded_names) == 1:
self.writeline('context.exported_vars.discard(%r)' %
discarded_names[0])
else:
self.writeline('context.exported_vars.difference_'
'update((%s))' % ', '.join(imap(repr, discarded_names)))
def visit_For(self, node, frame):
# when calculating the nodes for the inner frame we have to exclude
# the iterator contents from it
children = node.iter_child_nodes(exclude=('iter',))
if node.recursive:
loop_frame = self.function_scoping(node, frame, children,
find_special=False)
else:
loop_frame = frame.inner()
loop_frame.inspect(children)
# try to figure out if we have an extended loop. An extended loop
# is necessary if the loop is in recursive mode if the special loop
# variable is accessed in the body.
extended_loop = node.recursive or 'loop' in \
find_undeclared(node.iter_child_nodes(
only=('body',)), ('loop',))
# if we don't have an recursive loop we have to find the shadowed
# variables at that point. Because loops can be nested but the loop
# variable is a special one we have to enforce aliasing for it.
if not node.recursive:
aliases = self.push_scope(loop_frame, ('loop',))
# otherwise we set up a buffer and add a function def
else:
self.writeline('def loop(reciter, loop_render_func, depth=0):', node)
self.indent()
self.buffer(loop_frame)
aliases = {}
# make sure the loop variable is a special one and raise a template
# assertion error if a loop tries to write to loop
if extended_loop:
self.writeline('l_loop = missing')
loop_frame.identifiers.add_special('loop')
for name in node.find_all(nodes.Name):
if name.ctx == 'store' and name.name == 'loop':
self.fail('Can\'t assign to special loop variable '
'in for-loop target', name.lineno)
self.pull_locals(loop_frame)
if node.else_:
iteration_indicator = self.temporary_identifier()
self.writeline('%s = 1' % iteration_indicator)
# Create a fake parent loop if the else or test section of a
# loop is accessing the special loop variable and no parent loop
# exists.
if 'loop' not in aliases and 'loop' in find_undeclared(
node.iter_child_nodes(only=('else_', 'test')), ('loop',)):
self.writeline("l_loop = environment.undefined(%r, name='loop')" %
("'loop' is undefined. the filter section of a loop as well "
"as the else block don't have access to the special 'loop'"
" variable of the current loop. Because there is no parent "
"loop it's undefined. Happened in loop on %s" %
self.position(node)))
self.writeline('for ', node)
self.visit(node.target, loop_frame)
self.write(extended_loop and ', l_loop in LoopContext(' or ' in ')
# if we have an extened loop and a node test, we filter in the
# "outer frame".
if extended_loop and node.test is not None:
self.write('(')
self.visit(node.target, loop_frame)
self.write(' for ')
self.visit(node.target, loop_frame)
self.write(' in ')
if node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
self.write(' if (')
test_frame = loop_frame.copy()
self.visit(node.test, test_frame)
self.write('))')
elif node.recursive:
self.write('reciter')
else:
self.visit(node.iter, loop_frame)
if node.recursive:
self.write(', loop_render_func, depth):')
else:
self.write(extended_loop and '):' or ':')
# tests in not extended loops become a continue
if not extended_loop and node.test is not None:
self.indent()
self.writeline('if not ')
self.visit(node.test, loop_frame)
self.write(':')
self.indent()
self.writeline('continue')
self.outdent(2)
self.indent()
self.blockvisit(node.body, loop_frame)
if node.else_:
self.writeline('%s = 0' % iteration_indicator)
self.outdent()
if node.else_:
self.writeline('if %s:' % iteration_indicator)
self.indent()
self.blockvisit(node.else_, loop_frame)
self.outdent()
# reset the aliases if there are any.
if not node.recursive:
self.pop_scope(aliases, loop_frame)
# if the node was recursive we have to return the buffer contents
# and start the iteration code
if node.recursive:
self.return_buffer_contents(loop_frame)
self.outdent()
self.start_write(frame, node)
self.write('loop(')
self.visit(node.iter, frame)
self.write(', loop)')
self.end_write(frame)
def visit_If(self, node, frame):
if_frame = frame.soft()
self.writeline('if ', node)
self.visit(node.test, if_frame)
self.write(':')
self.indent()
self.blockvisit(node.body, if_frame)
self.outdent()
if node.else_:
self.writeline('else:')
self.indent()
self.blockvisit(node.else_, if_frame)
self.outdent()
def visit_Macro(self, node, frame):
macro_frame = self.macro_body(node, frame)
self.newline()
if frame.toplevel:
if not node.name.startswith('_'):
self.write('context.exported_vars.add(%r)' % node.name)
self.writeline('context.vars[%r] = ' % node.name)
self.write('l_%s = ' % node.name)
self.macro_def(node, macro_frame)
frame.assigned_names.add(node.name)
def visit_CallBlock(self, node, frame):
children = node.iter_child_nodes(exclude=('call',))
call_frame = self.macro_body(node, frame, children)
self.writeline('caller = ')
self.macro_def(node, call_frame)
self.start_write(frame, node)
self.visit_Call(node.call, call_frame, forward_caller=True)
self.end_write(frame)
def visit_FilterBlock(self, node, frame):
filter_frame = frame.inner()
filter_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(filter_frame)
self.pull_locals(filter_frame)
self.buffer(filter_frame)
self.blockvisit(node.body, filter_frame)
self.start_write(frame, node)
self.visit_Filter(node.filter, filter_frame)
self.end_write(frame)
self.pop_scope(aliases, filter_frame)
def visit_ExprStmt(self, node, frame):
self.newline(node)
self.visit(node.node, frame)
def visit_Output(self, node, frame):
# if we have a known extends statement, we don't output anything
# if we are in a require_output_check section
if self.has_known_extends and frame.require_output_check:
return
allow_constant_finalize = True
if self.environment.finalize:
func = self.environment.finalize
if getattr(func, 'contextfunction', False) or \
getattr(func, 'evalcontextfunction', False):
allow_constant_finalize = False
elif getattr(func, 'environmentfunction', False):
finalize = lambda x: text_type(
self.environment.finalize(self.environment, x))
else:
finalize = lambda x: text_type(self.environment.finalize(x))
else:
finalize = text_type
# if we are inside a frame that requires output checking, we do so
outdent_later = False
if frame.require_output_check:
self.writeline('if parent_template is None:')
self.indent()
outdent_later = True
# try to evaluate as many chunks as possible into a static
# string at compile time.
body = []
for child in node.nodes:
try:
if not allow_constant_finalize:
raise nodes.Impossible()
const = child.as_const(frame.eval_ctx)
except nodes.Impossible:
body.append(child)
continue
# the frame can't be volatile here, becaus otherwise the
# as_const() function would raise an Impossible exception
# at that point.
try:
if frame.eval_ctx.autoescape:
if hasattr(const, '__html__'):
const = const.__html__()
else:
const = escape(const)
const = finalize(const)
except Exception:
# if something goes wrong here we evaluate the node
# at runtime for easier debugging
body.append(child)
continue
if body and isinstance(body[-1], list):
body[-1].append(const)
else:
body.append([const])
# if we have less than 3 nodes or a buffer we yield or extend/append
if len(body) < 3 or frame.buffer is not None:
if frame.buffer is not None:
# for one item we append, for more we extend
if len(body) == 1:
self.writeline('%s.append(' % frame.buffer)
else:
self.writeline('%s.extend((' % frame.buffer)
self.indent()
for item in body:
if isinstance(item, list):
val = repr(concat(item))
if frame.buffer is None:
self.writeline('yield ' + val)
else:
self.writeline(val + ', ')
else:
if frame.buffer is None:
self.writeline('yield ', item)
else:
self.newline(item)
close = 1
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
elif frame.eval_ctx.autoescape:
self.write('escape(')
else:
self.write('to_string(')
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
"contextfunction", False):
self.write('context, ')
close += 1
self.visit(item, frame)
self.write(')' * close)
if frame.buffer is not None:
self.write(', ')
if frame.buffer is not None:
# close the open parentheses
self.outdent()
self.writeline(len(body) == 1 and ')' or '))')
# otherwise we create a format string as this is faster in that case
else:
format = []
arguments = []
for item in body:
if isinstance(item, list):
format.append(concat(item).replace('%', '%%'))
else:
format.append('%s')
arguments.append(item)
self.writeline('yield ')
self.write(repr(concat(format)) + ' % (')
self.indent()
for argument in arguments:
self.newline(argument)
close = 0
if frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' escape or to_string)(')
close += 1
elif frame.eval_ctx.autoescape:
self.write('escape(')
close += 1
if self.environment.finalize is not None:
self.write('environment.finalize(')
if getattr(self.environment.finalize,
'contextfunction', False):
self.write('context, ')
elif getattr(self.environment.finalize,
'evalcontextfunction', False):
self.write('context.eval_ctx, ')
elif getattr(self.environment.finalize,
'environmentfunction', False):
self.write('environment, ')
close += 1
self.visit(argument, frame)
self.write(')' * close + ', ')
self.outdent()
self.writeline(')')
if outdent_later:
self.outdent()
def make_assignment_frame(self, frame):
# toplevel assignments however go into the local namespace and
# the current template's context. We create a copy of the frame
# here and add a set so that the Name visitor can add the assigned
# names here.
if not frame.toplevel:
return frame
assignment_frame = frame.copy()
assignment_frame.toplevel_assignments = set()
return assignment_frame
def export_assigned_vars(self, frame, assignment_frame):
if not frame.toplevel:
return
public_names = [x for x in assignment_frame.toplevel_assignments
if not x.startswith('_')]
if len(assignment_frame.toplevel_assignments) == 1:
name = next(iter(assignment_frame.toplevel_assignments))
self.writeline('context.vars[%r] = l_%s' % (name, name))
else:
self.writeline('context.vars.update({')
for idx, name in enumerate(assignment_frame.toplevel_assignments):
if idx:
self.write(', ')
self.write('%r: l_%s' % (name, name))
self.write('})')
if public_names:
if len(public_names) == 1:
self.writeline('context.exported_vars.add(%r)' %
public_names[0])
else:
self.writeline('context.exported_vars.update((%s))' %
', '.join(imap(repr, public_names)))
def visit_Assign(self, node, frame):
self.newline(node)
assignment_frame = self.make_assignment_frame(frame)
self.visit(node.target, assignment_frame)
self.write(' = ')
self.visit(node.node, frame)
self.export_assigned_vars(frame, assignment_frame)
def visit_AssignBlock(self, node, frame):
block_frame = frame.inner()
block_frame.inspect(node.body)
aliases = self.push_scope(block_frame)
self.pull_locals(block_frame)
self.buffer(block_frame)
self.blockvisit(node.body, block_frame)
self.pop_scope(aliases, block_frame)
assignment_frame = self.make_assignment_frame(frame)
self.newline(node)
self.visit(node.target, assignment_frame)
self.write(' = concat(%s)' % block_frame.buffer)
self.export_assigned_vars(frame, assignment_frame)
# -- Expression Visitors
def visit_Name(self, node, frame):
if node.ctx == 'store' and frame.toplevel:
frame.toplevel_assignments.add(node.name)
self.write('l_' + node.name)
frame.assigned_names.add(node.name)
def visit_Const(self, node, frame):
val = node.value
if isinstance(val, float):
self.write(str(val))
else:
self.write(repr(val))
def visit_TemplateData(self, node, frame):
try:
self.write(repr(node.as_const(frame.eval_ctx)))
except nodes.Impossible:
self.write('(context.eval_ctx.autoescape and Markup or identity)(%r)'
% node.data)
def visit_Tuple(self, node, frame):
self.write('(')
idx = -1
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(idx == 0 and ',)' or ')')
def visit_List(self, node, frame):
self.write('[')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item, frame)
self.write(']')
def visit_Dict(self, node, frame):
self.write('{')
for idx, item in enumerate(node.items):
if idx:
self.write(', ')
self.visit(item.key, frame)
self.write(': ')
self.visit(item.value, frame)
self.write('}')
def binop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_binops:
self.write('environment.call_binop(context, %r, ' % operator)
self.visit(node.left, frame)
self.write(', ')
self.visit(node.right, frame)
else:
self.write('(')
self.visit(node.left, frame)
self.write(' %s ' % operator)
self.visit(node.right, frame)
self.write(')')
return visitor
def uaop(operator, interceptable=True):
def visitor(self, node, frame):
if self.environment.sandboxed and \
operator in self.environment.intercepted_unops:
self.write('environment.call_unop(context, %r, ' % operator)
self.visit(node.node, frame)
else:
self.write('(' + operator)
self.visit(node.node, frame)
self.write(')')
return visitor
visit_Add = binop('+')
visit_Sub = binop('-')
visit_Mul = binop('*')
visit_Div = binop('/')
visit_FloorDiv = binop('//')
visit_Pow = binop('**')
visit_Mod = binop('%')
visit_And = binop('and', interceptable=False)
visit_Or = binop('or', interceptable=False)
visit_Pos = uaop('+')
visit_Neg = uaop('-')
visit_Not = uaop('not ', interceptable=False)
del binop, uaop
def visit_Concat(self, node, frame):
if frame.eval_ctx.volatile:
func_name = '(context.eval_ctx.volatile and' \
' markup_join or unicode_join)'
elif frame.eval_ctx.autoescape:
func_name = 'markup_join'
else:
func_name = 'unicode_join'
self.write('%s((' % func_name)
for arg in node.nodes:
self.visit(arg, frame)
self.write(', ')
self.write('))')
def visit_Compare(self, node, frame):
self.visit(node.expr, frame)
for op in node.ops:
self.visit(op, frame)
def visit_Operand(self, node, frame):
self.write(' %s ' % operators[node.op])
self.visit(node.expr, frame)
def visit_Getattr(self, node, frame):
self.write('environment.getattr(')
self.visit(node.node, frame)
self.write(', %r)' % node.attr)
def visit_Getitem(self, node, frame):
# slices bypass the environment getitem method.
if isinstance(node.arg, nodes.Slice):
self.visit(node.node, frame)
self.write('[')
self.visit(node.arg, frame)
self.write(']')
else:
self.write('environment.getitem(')
self.visit(node.node, frame)
self.write(', ')
self.visit(node.arg, frame)
self.write(')')
def visit_Slice(self, node, frame):
if node.start is not None:
self.visit(node.start, frame)
self.write(':')
if node.stop is not None:
self.visit(node.stop, frame)
if node.step is not None:
self.write(':')
self.visit(node.step, frame)
def visit_Filter(self, node, frame):
self.write(self.filters[node.name] + '(')
func = self.environment.filters.get(node.name)
if func is None:
self.fail('no filter named %r' % node.name, node.lineno)
if getattr(func, 'contextfilter', False):
self.write('context, ')
elif getattr(func, 'evalcontextfilter', False):
self.write('context.eval_ctx, ')
elif getattr(func, 'environmentfilter', False):
self.write('environment, ')
# if the filter node is None we are inside a filter block
# and want to write to the current buffer
if node.node is not None:
self.visit(node.node, frame)
elif frame.eval_ctx.volatile:
self.write('(context.eval_ctx.autoescape and'
' Markup(concat(%s)) or concat(%s))' %
(frame.buffer, frame.buffer))
elif frame.eval_ctx.autoescape:
self.write('Markup(concat(%s))' % frame.buffer)
else:
self.write('concat(%s)' % frame.buffer)
self.signature(node, frame)
self.write(')')
def visit_Test(self, node, frame):
self.write(self.tests[node.name] + '(')
if node.name not in self.environment.tests:
self.fail('no test named %r' % node.name, node.lineno)
self.visit(node.node, frame)
self.signature(node, frame)
self.write(')')
def visit_CondExpr(self, node, frame):
def write_expr2():
if node.expr2 is not None:
return self.visit(node.expr2, frame)
self.write('environment.undefined(%r)' % ('the inline if-'
'expression on %s evaluated to false and '
'no else section was defined.' % self.position(node)))
self.write('(')
self.visit(node.expr1, frame)
self.write(' if ')
self.visit(node.test, frame)
self.write(' else ')
write_expr2()
self.write(')')
def visit_Call(self, node, frame, forward_caller=False):
if self.environment.sandboxed:
self.write('environment.call(context, ')
else:
self.write('context.call(')
self.visit(node.node, frame)
extra_kwargs = forward_caller and {'caller': 'caller'} or None
self.signature(node, frame, extra_kwargs)
self.write(')')
def visit_Keyword(self, node, frame):
self.write(node.key + '=')
self.visit(node.value, frame)
# -- Unused nodes for extensions
def visit_MarkSafe(self, node, frame):
self.write('Markup(')
self.visit(node.expr, frame)
self.write(')')
def visit_MarkSafeIfAutoescape(self, node, frame):
self.write('(context.eval_ctx.autoescape and Markup or identity)(')
self.visit(node.expr, frame)
self.write(')')
def visit_EnvironmentAttribute(self, node, frame):
self.write('environment.' + node.name)
def visit_ExtensionAttribute(self, node, frame):
self.write('environment.extensions[%r].%s' % (node.identifier, node.name))
def visit_ImportedName(self, node, frame):
self.write(self.import_aliases[node.importname])
def visit_InternalName(self, node, frame):
self.write(node.name)
def visit_ContextReference(self, node, frame):
self.write('context')
def visit_Continue(self, node, frame):
self.writeline('continue', node)
def visit_Break(self, node, frame):
self.writeline('break', node)
def visit_Scope(self, node, frame):
scope_frame = frame.inner()
scope_frame.inspect(node.iter_child_nodes())
aliases = self.push_scope(scope_frame)
self.pull_locals(scope_frame)
self.blockvisit(node.body, scope_frame)
self.pop_scope(aliases, scope_frame)
def visit_EvalContextModifier(self, node, frame):
for keyword in node.options:
self.writeline('context.eval_ctx.%s = ' % keyword.key)
self.visit(keyword.value, frame)
try:
val = keyword.value.as_const(frame.eval_ctx)
except nodes.Impossible:
frame.eval_ctx.volatile = True
else:
setattr(frame.eval_ctx, keyword.key, val)
def visit_ScopedEvalContextModifier(self, node, frame):
old_ctx_name = self.temporary_identifier()
safed_ctx = frame.eval_ctx.save()
self.writeline('%s = context.eval_ctx.save()' % old_ctx_name)
self.visit_EvalContextModifier(node, frame)
for child in node.body:
self.visit(child, frame)
frame.eval_ctx.revert(safed_ctx)
self.writeline('context.eval_ctx.revert(%s)' % old_ctx_name)
|
{
"content_hash": "12254e13d908b5a62d60fb8aeb536442",
"timestamp": "",
"source": "github",
"line_count": 1685,
"max_line_length": 87,
"avg_line_length": 38.87655786350148,
"alnum_prop": 0.5457737341047522,
"repo_name": "fancasy/final",
"id": "5dcb42f81e4a20ebda5dd5d3a1b48731d2328d0e",
"size": "65532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/jinja2/compiler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "51200"
},
{
"name": "CSS",
"bytes": "824461"
},
{
"name": "HTML",
"bytes": "897427"
},
{
"name": "JavaScript",
"bytes": "5449090"
},
{
"name": "Python",
"bytes": "3283421"
}
],
"symlink_target": ""
}
|
import argparse
import os
import sys
import shutil as st
from lib.textgrid import TextGrid
from lib import utils
def neg_vot_creator(audio_path, textgrid_path, output_path, l):
# defines
tmp_dir = 'tmp/'
tmp_input = tmp_dir + 'tmp.input'
tmp_label = tmp_dir + 'tmp.labels'
label_suffix = '.labels'
tmp_features = tmp_dir + 'tmp.features'
tmp_file = tmp_dir + 'tmp.wav'
# validation
if not os.path.exists(audio_path):
print >> sys.stderr, 'Error: input path does not exists.'
return
if not os.path.exists(output_path):
print 'output path does not exists, creating output directory.'
os.mkdir(output_path)
# create tmp dir
if os.path.exists(tmp_dir):
st.rmtree(tmp_dir)
os.mkdir(tmp_dir)
count = 0
# loop over all the files in the input dir
for item in os.listdir(audio_path):
if item.endswith('.wav'):
try:
# convert to 16K 16bit
cmd = 'sbin/sox %s -r 16000 -b 16 %s' % (audio_path + item, tmp_file)
utils.easy_call(cmd)
# parse the textgrid
textgrid = TextGrid()
textgrid.read(textgrid_path + item.replace('.wav', '.TextGrid'))
release_start = textgrid._TextGrid__tiers[2]._IntervalTier__intervals[1]._Interval__xmin
end_time = release_start
if end_time - 0.1 < 0:
count += 1
start_time = max(0, end_time - 0.1)
# =================== ACOUSTIC FEATURES =================== #
# write labels
label_file = output_path + item.replace('.wav', label_suffix)
fid = open(label_file, 'w')
fid.write('%s\n' % str(l))
fid.close()
# creating the files
input_file = open(tmp_features, 'wb') # open the input file for the feature extraction
features_file = open(tmp_input, 'wb') # open file for the feature list path
labels_file = open(tmp_label, 'wb') # open file for the labels
# write the data
input_file.write(
'"' + tmp_file + '" ' + str('%.8f' % float(start_time)) + ' ' + str(
float(end_time)) + ' ' + str(
'%.8f' % float(start_time)) + ' ' + str('%.8f' % float(end_time)))
features_file.write(output_path + item.replace('.wav', '.txt'))
input_file.close()
features_file.close()
labels_file.close()
command = "./sbin/VowelDurationFrontEnd %s %s %s" % (input_file.name, features_file.name, labels_file.name)
utils.easy_call(command)
# remove leftovers
os.remove(tmp_input)
os.remove(tmp_label)
os.remove(tmp_features)
except:
print item
st.rmtree(tmp_dir)
if __name__ == "__main__":
# -------------MENU-------------- #
# command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("audio_path", help="The path to the audio directory")
parser.add_argument("textgrid_path", help="The path to the relevant textgrids")
parser.add_argument("output_path", help="The path to output directory")
args = parser.parse_args()
# main function
neg_vot_creator(args.audio_path, args.textgrid_path, args.output_path, 0)
|
{
"content_hash": "1b80a1360041055093db32ed58a0073c",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 123,
"avg_line_length": 38.48913043478261,
"alnum_prop": 0.5272521886472747,
"repo_name": "adiyoss/DeepVOT",
"id": "69f059109ba800510b9504d53b52194700ddd33a",
"size": "3541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front_end/front_end_prevoicing_detection.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "100683"
},
{
"name": "Python",
"bytes": "80070"
},
{
"name": "Shell",
"bytes": "1096"
}
],
"symlink_target": ""
}
|
from rand import adapted_rand
import numpy as np
import yaml
from tifffile import imread
import sys
def run(config):
image_path, labels_path, raw_path = config['SAVE_PRED'], config['LABELS_DENSE'], config['DATA']
seg, labels, raw = imread(image_path)[2:12], imread(labels_path)[-16:-6], imread(raw_path)[-16:-6,:,:,0]
pixel_error = np.mean(seg != labels)
rand_error = adapted_rand(seg, labels)
print("pixel error: {}, rand error {}".format(pixel_error, rand_error))
if __name__ == '__main__':
with open('config.yml', 'r') as f:
config = yaml.load(f)
run(config)
|
{
"content_hash": "8b9400aad80cc2dae8f92a40e98ee0f8",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 108,
"avg_line_length": 35.23529411764706,
"alnum_prop": 0.6444073455759599,
"repo_name": "jwohlwend/Flood-Filling-Networks",
"id": "568fcef68daa0bee812f326c8632150d5e6c3834",
"size": "599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evaluate.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13307"
}
],
"symlink_target": ""
}
|
"""
extracty.server -- HTTP interface to extracty
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import urlparse
import logging
try:
import simplejson as json
except ImportError:
import json
from . import extract
from .utils import fetch_url
__all__ = ('application',)
# TODO: remove me
logging.getLogger('waitress').addHandler(logging.StreamHandler())
def application(environ, start_response):
""" WSGI application"""
is_view = environ['PATH_INFO'] == '/view'
def response(data, status="200 Success"):
if is_view:
headers = [('Content-type', 'text/html')]
else:
headers = [('Content-type', 'application/json')]
start_response(str(status), headers)
return [json.dumps(data)] if not is_view else data.encode('utf8')
def error(message):
msg = {"error": message} if not is_view else message
return response(msg, status="400 Error")
def get_result():
qs = urlparse.parse_qs(environ['QUERY_STRING'])
if not 'url' in qs:
return error("missing 'url' parameter")
kwargs = {}
for kw in ('cover_image', 'author', 'title'):
key = 'no_%s' % kw
if key in qs:
kwargs[kw] = False
url = qs['url'][0]
doc = fetch_url(url)
return extract(doc, url, **kwargs)
result = get_result()
return response(result) if not is_view else response(template % result)
template = """
<!doctype html>
<style>
.author, .url {
font-size: %%80;
color: #666;
}
</style>
<div>
<div class="author">author: %(author)s</div>
<div class="url">url: %(url)s</div>
<img src="%(cover_image)s">
<h1 class="title">%(title)s</h1>
<div class="content">%(content)s</div>
</div>
"""
|
{
"content_hash": "0fbda8bea656fe80f7957487bc45f03c",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 75,
"avg_line_length": 25.52112676056338,
"alnum_prop": 0.5634657836644592,
"repo_name": "andreypopp/extracty",
"id": "676181ff9bd3e077780f6dc1d4c10faa9de53e95",
"size": "1812",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "extracty/app.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "54198"
}
],
"symlink_target": ""
}
|
import argparse
import asyncio
import logging
import math
import time
from collections import namedtuple
from pathlib import Path
from struct import pack, calcsize
from typing import Awaitable, List, Tuple
import aiohttp
import humanize
from aiohttp.formdata import FormData
from jinja2 import Environment, FileSystemLoader
from jinja2.environment import Template
from yarl import URL
from il2fb.maps.heightmaps.constants import HEIGHT_PACK_FORMAT
from il2fb.maps.heightmaps.constants import MAP_SCALE
from il2fb.maps.heightmaps.constants import MAX_OBJECTS_IN_MISSION
from il2fb.maps.heightmaps.logging import setup_logging
__here__ = Path(__file__).parent.absolute()
LOG = logging.getLogger(__name__)
PointsPartition = namedtuple('PointsPartition', ['start', 'end'])
def load_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description=(
"Create heightmap for a given location of "
"«IL-2 Sturmovik: Forgotten Battles»"
),
)
parser.add_argument(
'-l', '--loader',
dest='loader',
type=str,
help="Map loader, e.g. 'Hawaii/load.ini'",
required=True,
)
parser.add_argument(
'--height',
dest='height',
type=int,
required=True,
help=f"Map height in meters. Must be proportional to {MAP_SCALE}",
)
parser.add_argument(
'--width',
dest='width',
type=int,
required=True,
help=f"Map width in meters. Must be proportional to {MAP_SCALE}",
)
parser.add_argument(
'-o', '--out',
dest='output_file_path',
type=lambda x: Path(x).resolve(),
default="heightmap.raw",
help="Output file path. Default: 'heightmap.raw'",
)
parser.add_argument(
'-s', '--servers',
dest='server_addresses',
type=URL,
nargs='+',
required=True,
help=(
"Addesses of root HTTP API endpoints of dedicated servers "
"controlled by «il2fb-ds-airbridge»"
),
)
args = parser.parse_args()
if args.height % MAP_SCALE != 0:
parser.error(f"Map height must be proportional to {MAP_SCALE}")
if args.width % MAP_SCALE != 0:
parser.error(f"Map width must be proportional to {MAP_SCALE}")
return args
def log_input_data(args: argparse.Namespace) -> None:
map_name = args.loader.split('/', 1)[0]
LOG.debug(f"map to query: {map_name}")
LOG.debug(f" height, m: {args.height}")
LOG.debug(f" width, m: {args.width}")
LOG.debug(f"output file: {args.output_file_path}")
LOG.debug(f"servers:")
for s in args.server_addresses:
LOG.debug(f" - {s}")
def get_mission_template() -> Template:
jinja_env = Environment(
loader=FileSystemLoader(str(__here__ / 'templates'))
)
return jinja_env.get_template('mission.j2')
def get_total_points_number(height: int, width: int) -> int:
return (height // MAP_SCALE) * (width // MAP_SCALE)
def partition_points(
total_points: int,
partitions: int,
) -> List[PointsPartition]:
current_id = 0
last_id = total_points - 1
step = min(
MAX_OBJECTS_IN_MISSION,
math.ceil(total_points / partitions),
)
for start_id in range(step, last_id, step):
yield PointsPartition(current_id, start_id - 1)
current_id = start_id
if current_id != last_id:
yield PointsPartition(current_id, last_id)
async def process_partitions_queue(
loop: asyncio.BaseEventLoop,
partitions_queue: asyncio.Queue,
results_queue: asyncio.Queue,
server_address: URL,
mission_template: Template,
mission_loader: str,
width: int,
scale: int,
) -> Awaitable[None]:
mission_name = mission_loader.split('/', 1)[0]
async with aiohttp.ClientSession() as http:
while True:
partition = await partitions_queue.get()
if partition is None:
partitions_queue.task_done()
return
await process_partition(
loop=loop,
results_queue=results_queue,
server_address=server_address,
http=http,
partition=partition,
mission_template=mission_template,
mission_loader=mission_loader,
mission_name=mission_name,
width=width,
scale=scale,
)
partitions_queue.task_done()
def index_to_point(idx: int, width: int, scale: int) -> Tuple[int, int]:
y, x = divmod(idx * scale, width)
y *= scale
return (x, y)
async def process_partition(
loop: asyncio.BaseEventLoop,
results_queue: asyncio.Queue,
server_address: URL,
http: aiohttp.ClientSession,
partition: PointsPartition,
mission_template: Template,
mission_loader: str,
mission_name: str,
width: int,
scale: int,
) -> Awaitable[None]:
LOG.debug(
f"query range [{partition.start}:{partition.end}] on server "
f"{server_address}"
)
file_name = f"{mission_name}_{partition.start}_{partition.end}.mis"
missions_url = server_address / "missions"
mission_dir_url = missions_url / "heightmap"
mission_url = mission_dir_url / file_name
points = (
index_to_point(i, width, scale)
for i in range(partition.start, partition.end + 1)
)
mission = mission_template.render(
loader=mission_loader,
points=points,
)
data = FormData()
data.add_field(
'mission',
mission.encode(),
filename=file_name,
content_type='plain/text',
)
await http.post(mission_dir_url, data=data)
await http.post(mission_url / "load")
await http.post(missions_url / "current" / "begin")
async with http.get(server_address / "radar" / "stationary-objects") as response:
data = await response.json()
data = [
pack(HEIGHT_PACK_FORMAT, int(point['pos']['z']))
for point in data
]
data = b''.join(data)
await http.post(missions_url / "current" / "unload")
await http.delete(mission_url)
await results_queue.put((partition, data))
async def process_results_queue(
results_queue: asyncio.Queue,
total_points: int,
output_file_path: Path,
) -> Awaitable[None]:
point_size = calcsize(HEIGHT_PACK_FORMAT)
output_size = point_size * total_points
natural_size = humanize.naturalsize(
output_size,
binary=True,
format='%.3f',
)
LOG.debug(f"output size: {natural_size}")
processed_points = 0
output_file_path.parent.parent.mkdir(parents=True, exist_ok=True)
with output_file_path.open('wb') as f:
f.truncate(output_size)
while True:
data = await results_queue.get()
if not data:
results_queue.task_done()
return
partition, values = data
start = partition.start * point_size
processed_points += (partition.end - partition.start) + 1
progress = (processed_points / total_points) * 100
LOG.debug(
f"gather results for range "
f"[{partition.start}:{partition.end}], "
f"progress: {progress:.2f}%"
)
f.seek(start)
f.write(values)
results_queue.task_done()
async def run(
loop: asyncio.BaseEventLoop,
server_addresses: List[URL],
mission_template: Template,
mission_loader: str,
height: int,
width: int,
scale: int,
output_file_path: Path,
) -> Awaitable[None]:
total_points = get_total_points_number(height, width)
LOG.debug(f"total points to query: {total_points}")
results_queue = asyncio.Queue(loop=loop)
future = process_results_queue(
results_queue=results_queue,
total_points=total_points,
output_file_path=output_file_path,
)
asyncio.ensure_future(future, loop=loop)
servers_count = len(server_addresses)
partitions_queue = asyncio.Queue(servers_count, loop=loop)
for server_address in server_addresses:
future = process_partitions_queue(
loop=loop,
partitions_queue=partitions_queue,
results_queue=results_queue,
server_address=server_address,
mission_template=mission_template,
mission_loader=mission_loader,
width=width,
scale=scale,
)
asyncio.ensure_future(future, loop=loop)
start_time = time.monotonic()
partitions = partition_points(total_points, servers_count)
for partition in partitions:
await partitions_queue.put(partition)
for i in range(servers_count):
await partitions_queue.put(None)
await partitions_queue.join()
await results_queue.put(None)
await results_queue.join()
run_time = time.monotonic() - start_time
LOG.debug(f"run time: {run_time:.3f} s")
def main() -> None:
args = load_args()
loop = asyncio.get_event_loop()
setup_logging()
log_input_data(args)
mission_template = get_mission_template()
loop.run_until_complete(run(
loop=loop,
server_addresses=args.server_addresses,
mission_template=mission_template,
mission_loader=args.loader,
height=args.height,
width=args.width,
scale=MAP_SCALE,
output_file_path=args.output_file_path,
))
if __name__ == '__main__':
main()
|
{
"content_hash": "35d579d09c6b0517cebdbd03b50e5570",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 85,
"avg_line_length": 26.46005509641873,
"alnum_prop": 0.6064549713690786,
"repo_name": "IL2HorusTeam/il2-heightmap-creator",
"id": "da262d06a7203666aa52470fcb99b55e97ecb138",
"size": "9634",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "il2fb/maps/heightmaps/creation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20913"
},
{
"name": "Shell",
"bytes": "680"
}
],
"symlink_target": ""
}
|
import sys
# Clear possible previous mock... there has to be better way
sys.modules.pop('atrium.requester', '')
from mock import MagicMock
requestsMock = sys.modules['requests'] = MagicMock(spec=[
'get',
'post',
'put',
'delete',
'exceptions'
])
from atrium.requester import request
from atrium.errors import (
NetworkError,
RequestTimeoutError
)
import pytest
import unittest
import json
class HTTPError(Exception):
pass
class CustomConnectionError(Exception):
pass
class ProxyError(Exception):
pass
class SSLError(Exception):
pass
class Timeout(Exception):
pass
# Create the fake exceptions in our mock
requestsMock.exceptions.return_value = MagicMock()
requestsMock.exceptions.HTTPError = HTTPError
requestsMock.exceptions.ConnectionError = CustomConnectionError
requestsMock.exceptions.ProxyError = ProxyError
requestsMock.exceptions.SSLError = SSLError
requestsMock.exceptions.Timeout = Timeout
class TestRequest(unittest.TestCase):
def setUp(self):
self.headers = {
"foo": "bar"
}
self.payload = {
"bar": "baz"
}
def testGet(self):
requestsMock.get.side_effect = None
request("foo", "GET", headers=self.headers)
requestsMock.get.assert_called_with("foo", headers=self.headers)
def testPost(self):
request(
"foo",
"POST",
headers=self.headers,
payload=self.payload
)
requestsMock.post.assert_called_with(
"foo",
headers=self.headers,
data=json.dumps(self.payload)
)
def testPut(self):
request(
"foo",
"PUT",
headers=self.headers,
payload=self.payload
)
requestsMock.put.assert_called_with(
"foo",
headers=self.headers,
data=json.dumps(self.payload)
)
def testDelete(self):
request("foo", "DELETE", headers=self.headers)
requestsMock.delete.assert_called_with("foo", headers=self.headers)
def testHttpError(self):
requestsMock.get.side_effect = HTTPError('foo')
with pytest.raises(NetworkError):
request("foo", "GET")
def testConnectionError(self):
requestsMock.get.side_effect = CustomConnectionError('foo')
with pytest.raises(NetworkError):
request("foo", "GET")
def testProxyError(self):
requestsMock.get.side_effect = ProxyError('foo')
with pytest.raises(NetworkError):
request("foo", "GET")
def testSslErrorP(self):
requestsMock.get.side_effect = SSLError('foo')
with pytest.raises(NetworkError):
request("foo", "GET")
def testTimeout(self):
requestsMock.get.side_effect = Timeout('foo')
with pytest.raises(RequestTimeoutError):
request("foo", "GET")
|
{
"content_hash": "9ccbff48cb7dbebd9b9568bca72ab1ce",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 75,
"avg_line_length": 24.305785123966942,
"alnum_prop": 0.6229173750425026,
"repo_name": "phantomxc/pytrium",
"id": "4cb5d02cca87c698ba8c640df42f317bd1f7f4bf",
"size": "2941",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/requester_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "316"
},
{
"name": "Python",
"bytes": "35719"
}
],
"symlink_target": ""
}
|
"""
Custom widgets for Allusive
"""
##########################################################################
## Imports
##########################################################################
from .frame import *
from .text import *
|
{
"content_hash": "971bd4809db09a92965ec3c5d8785137",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 74,
"avg_line_length": 24,
"alnum_prop": 0.25,
"repo_name": "bbengfort/allusive-editor",
"id": "4d07c6db53283481bc83219b64712b614f26b179",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "allusive/widgets/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22532"
}
],
"symlink_target": ""
}
|
import datetime
import importlib
import logging
import os
import platform
import socket
import sys
import pikka_bird_collector
from .config import Config
COLLECTORS = [
'system', # keep first; sort rest
'mongodb',
'mysql',
'postgresql',
'rabbitmq',
'redis']
COLLECTORS_MODULE_P = 'pikka_bird_collector.collectors.'
for c in COLLECTORS:
importlib.import_module(COLLECTORS_MODULE_P + c)
class Collector():
"""
Main collector, which calls individual service Collectors and merges the
results. The environment, containing such things as PID, hostname, and
kernel version, are passed to each collector.
"""
def __init__(self, config=None, logger=None):
"""
PARAMETERS:
path : string
filename of config to parse
logger : logger
logger
"""
self.config = Config(config)
self.logger = logger or logging.getLogger()
self.__set_environment()
def collect(self):
"""
Collect metrics for all invididual service Collectors, returning the
reports in a format suitable for sending to the Server, complete
with dates converted to strings. All times are in UTC, always.
RETURN:
: dict
collected reports, ready for sending to the Server
"""
reports = {}
collecting_at = datetime.datetime.utcnow()
self.logger.info("COLLECTING")
for c in COLLECTORS:
klass = getattr(sys.modules[COLLECTORS_MODULE_P + c], c.title())
service = klass.service()
collector = klass(self.environment, self.config.settings(service))
if collector.enabled():
self.logger.info("COLLECTING %s" % service)
reports[service] = collector.collect()
self.logger.debug("METRICS %s %s" % (service, reports[service]))
else:
self.logger.debug("SKIPPED %s" % service)
collected_at = datetime.datetime.utcnow()
self.logger.info("COLLECTED (%d s)" % (collected_at - collecting_at).seconds)
return {
'collecting_at': collecting_at.isoformat(),
'collected_at': collected_at.isoformat(),
'environment': self.environment,
'reports': reports}
def __set_environment(self):
self.environment = {
'hostname': socket.gethostname(),
'pid': os.getpid(),
'version': pikka_bird_collector.__version__,
'platform': {
'system': platform.system(),
'release': platform.release(),
'version': platform.version()}}
self.logger.info("ENVIRONMENT %s" % self.environment)
|
{
"content_hash": "a3009916526276245239a18d3894b2b7",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 85,
"avg_line_length": 31,
"alnum_prop": 0.5440638510143,
"repo_name": "tiredpixel/pikka-bird-collector-py",
"id": "bbc37378d583057e324abfde45264ae1fa219795",
"size": "3007",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pikka_bird_collector/collector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158851"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('primer.comments.views',
##
# prefix: comments
#
url(r'^load/$', 'load', name='comments-load'),
url(r'^post/$', 'post', name='comments-post'),
url(r'^delete/$', 'delete', name='comments-delete'),
url(r'^like/$', 'like', name='comments-like'),
)
|
{
"content_hash": "95f1a5c95c59fa6e1263ce2875a6a961",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 56,
"avg_line_length": 26.692307692307693,
"alnum_prop": 0.6109510086455331,
"repo_name": "jamesmfriedman/django-primer",
"id": "5f27f3cf57b246783e491dd91cb3ef275a70559d",
"size": "347",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "primer/comments/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "210640"
},
{
"name": "JavaScript",
"bytes": "76063"
},
{
"name": "PHP",
"bytes": "232"
},
{
"name": "Python",
"bytes": "137085"
},
{
"name": "Shell",
"bytes": "4521"
}
],
"symlink_target": ""
}
|
import unittest
from day08 import Screen
class TestInitializingScreen(unittest.TestCase):
def test_inits_screen(self):
screen = Screen(3, 5)
self.assertEqual(len(screen.data), 3)
self.assertEqual(len(screen.data[0]), 5)
class TestParsingCommands(unittest.TestCase):
cases = (
('rect 3x2', ('rect', (3, 2))),
('rotate column x=1 by 1', ('rotate_column', (1, 1))),
('rotate row y=1 by 2', ('rotate_row', (1, 2))),
)
def test_parses_commands(self):
screen = Screen(3, 5)
for command, expected in self.cases:
parsed = screen.parse_command(command)
self.assertEqual(parsed[0].__name__, expected[0])
self.assertEqual(parsed[1], expected[1])
class TestManipulatingScreen(unittest.TestCase):
def test_rect(self):
screen = Screen(3, 7)
screen.rect(3, 2)
self.assertEqual(screen.data,
[[True, True, True, False, False, False, False],
[True, True, True, False, False, False, False],
[False]*7])
def test_rotate_column(self):
screen = Screen(3, 7)
screen.rect(3, 2)
screen.rotate_column(1, 2)
self.assertEqual(screen.data,
[[True, True, True, False, False, False, False],
[True, False, True, False, False, False, False],
[False, True, False, False, False, False, False]])
def test_rotate_row(self):
screen = Screen(3, 7)
screen.rect(3, 2)
screen.rotate_row(1, 1)
self.assertEqual(screen.data,
[[True, True, True, False, False, False, False],
[False, True, True, True, False, False, False],
[False]*7])
def test_example_input(self):
screen = Screen(3, 7)
screen.rect(3, 2)
screen.rotate_column(1, 1)
screen.rotate_row(0, 4)
screen.rotate_column(1, 1)
self.assertEqual(screen.data,
[[False, True, False, False, True, False, True],
[True, False, True, False, False, False, False],
[False, True, False, False, False, False, False]])
class TestCountingLitPixels(unittest.TestCase):
cases = (
([[False, False, False], [False, False, False]], 0),
([[True, False, False], [False, False, True]], 2),
([[True, True, True], [True, True, True]], 6),
)
def test_counts_lit_pixels(self):
for data, expected in self.cases:
screen = Screen(3,2)
screen.data = data
self.assertEqual(screen.count_lit_pixels(), expected)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "911f982557fc8df50a7abbd2ee647cd6",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 67,
"avg_line_length": 32.17857142857143,
"alnum_prop": 0.5508694043655198,
"repo_name": "mpirnat/aoc2016",
"id": "2d1907a50aad051fb2ebe051e2ad3b083a8007b1",
"size": "2726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day08/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "70457"
}
],
"symlink_target": ""
}
|
"""\
pytest_watch.command
~~~~~~~~~~~~~~~~~~~~
Implements the command-line interface for pytest-watch.
All positional arguments after `--` are passed directly to py.test executable.
Usage:
ptw [options] [<directories>...] [-- <args>...]
Options:
-h --help Show this help.
--version Show version.
--ignore=<dirs> Comma-separated list of directories to ignore
(if relative: starting from the root of each watched dir).
-c --clear Automatically clear the screen before each run.
--onpass=<cmd> Run arbitrary command on pass.
--onfail=<cmd> Run arbitrary command on failure.
--nobeep Do not beep on failure.
-p --poll Use polling instead of events (useful in VMs).
--ext=<exts> Comma-separated list of file extensions that trigger a
new test run when changed (default: .py).
--no-spool Disable event spooling (default: 200ms cooldown).
-v --verbose Increase verbosity of the output.
-q --quiet Decrease verbosity of the output
(takes precedence over verbose).
"""
import sys
import colorama
from docopt import docopt
from .watcher import watch
from . import __version__
def main(argv=None):
"""
The entry point of the application.
"""
colorama.init()
usage = __doc__[__doc__.find('Usage:'):]
version = 'pytest-watch ' + __version__
argv = argv if argv is not None else sys.argv[1:]
args = docopt(usage, argv=argv, version=version)
pytest_args = []
directories = args['<directories>']
if '--' in directories:
index = directories.index('--')
pytest_args = directories[index + 1:]
directories = directories[:index]
ignore = (args['--ignore'] or '').split(',')
extensions = [('.' if not ext.startswith('.') else '') + ext
for ext in (args['--ext'] or '.py').split(',')]
return watch(directories=directories,
ignore=ignore,
auto_clear=args['--clear'],
beep_on_failure=not args['--nobeep'],
onpass=args['--onpass'],
onfail=args['--onfail'],
poll=args['--poll'],
extensions=extensions,
args=pytest_args,
spool=not args['--no-spool'],
verbose=args['--verbose'],
quiet=args['--quiet'])
|
{
"content_hash": "563fb4343456ab7175b060c5bccc7516",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 33.736111111111114,
"alnum_prop": 0.569781803211198,
"repo_name": "blueyed/pytest-watch",
"id": "fd44622b242f44f4d1508dc483e3e781f588586c",
"size": "2429",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pytest_watch/command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11090"
}
],
"symlink_target": ""
}
|
"""Generate template values for attributes.
Extends IdlType with property |constructor_type_name|.
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import idl_types
import dart_types
from dart_utilities import DartUtilities
from v8_globals import interfaces
import v8_attributes
def attribute_context(interface, attribute):
# Call v8's implementation.
context = v8_attributes.attribute_context(interface, attribute)
extended_attributes = attribute.extended_attributes
# Augment's Dart's information to context.
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
# TODO(terry): Work around for DOMString[] base should be IDLTypeArray
if base_idl_type == None:
# Returns Array or Sequence.
base_idl_type = idl_type.inner_name
# [Custom]
has_custom_getter = ('Custom' in extended_attributes and
extended_attributes['Custom'] in [None, 'Getter'])
has_custom_setter = (not attribute.is_read_only and
(('Custom' in extended_attributes and
extended_attributes['Custom'] in [None, 'Setter'])))
is_call_with_script_state = DartUtilities.has_extended_attribute_value(attribute, 'CallWith', 'ScriptState')
is_auto_scope = not 'DartNoAutoScope' in extended_attributes
context.update({
'has_custom_getter': has_custom_getter,
'has_custom_setter': has_custom_setter,
'is_auto_scope': is_auto_scope, # Used internally (outside of templates).
'is_call_with_script_state': is_call_with_script_state,
'auto_scope': DartUtilities.bool_to_cpp(is_auto_scope),
'dart_type': dart_types.idl_type_to_dart_type(idl_type),
})
if v8_attributes.is_constructor_attribute(attribute):
v8_attributes.constructor_getter_context(interface, attribute, context)
return context
if not v8_attributes.has_custom_getter(attribute):
getter_context(interface, attribute, context)
if (not attribute.is_read_only):
# FIXME: We did not previously support the PutForwards attribute, so I am
# disabling it here for now to get things compiling.
# We may wish to revisit this.
# if (not has_custom_setter(attribute) and
# (not attribute.is_read_only or 'PutForwards' in extended_attributes)):
setter_context(interface, attribute, context)
native_entry_getter = \
DartUtilities.generate_native_entry(
interface.name, attribute.name, 'Getter', attribute.is_static, 0)
native_entry_setter = \
DartUtilities.generate_native_entry(
interface.name, attribute.name, 'Setter', attribute.is_static, 1)
context.update({
'native_entry_getter': native_entry_getter,
'native_entry_setter': native_entry_setter,
})
return context
################################################################################
# Getter
################################################################################
def getter_context(interface, attribute, context):
v8_attributes.getter_context(interface, attribute, context)
idl_type = attribute.idl_type
base_idl_type = idl_type.base_type
extended_attributes = attribute.extended_attributes
cpp_value = getter_expression(interface, attribute, context)
# Normally we can inline the function call into the return statement to
# avoid the overhead of using a Ref<> temporary, but for some cases
# (nullable types, EventHandler, [CachedAttribute], or if there are
# exceptions), we need to use a local variable.
# FIXME: check if compilers are smart enough to inline this, and if so,
# always use a local variable (for readability and CG simplicity).
release = False
if (idl_type.is_nullable or
base_idl_type == 'EventHandler' or
'CachedAttribute' in extended_attributes or
'ReflectOnly' in extended_attributes or
context['is_getter_raises_exception']):
context['cpp_value_original'] = cpp_value
cpp_value = 'result'
# EventHandler has special handling
if base_idl_type != 'EventHandler' and idl_type.is_interface_type:
release = True
dart_set_return_value = \
idl_type.dart_set_return_value(cpp_value,
extended_attributes=extended_attributes,
script_wrappable='impl',
release=release,
for_main_world=False,
auto_scope=context['is_auto_scope'])
context.update({
'cpp_value': cpp_value,
'dart_set_return_value': dart_set_return_value,
})
def getter_expression(interface, attribute, context):
v8_attributes.getter_expression(interface, attribute, context)
arguments = []
this_getter_base_name = v8_attributes.getter_base_name(interface, attribute, arguments)
getter_name = DartUtilities.scoped_name(interface, attribute, this_getter_base_name)
arguments.extend(DartUtilities.call_with_arguments(
attribute.extended_attributes.get('CallWith')))
if ('PartialInterfaceImplementedAs' in attribute.extended_attributes and
not attribute.is_static):
# Pass by reference.
arguments.append('*receiver')
if attribute.idl_type.is_explicit_nullable:
arguments.append('is_null')
if context['is_getter_raises_exception']:
arguments.append('es')
return '%s(%s)' % (getter_name, ', '.join(arguments))
################################################################################
# Setter
################################################################################
def setter_context(interface, attribute, context):
v8_attributes.setter_context(interface, attribute, context)
def target_attribute():
target_interface_name = attribute.idl_type.base_type
target_attribute_name = extended_attributes['PutForwards']
target_interface = interfaces[target_interface_name]
try:
return next(attribute
for attribute in target_interface.attributes
if attribute.name == target_attribute_name)
except StopIteration:
raise Exception('[PutForward] target not found:\n'
'Attribute "%s" is not present in interface "%s"' %
(target_attribute_name, target_interface_name))
extended_attributes = attribute.extended_attributes
if 'PutForwards' in extended_attributes:
# Use target attribute in place of original attribute
attribute = target_attribute()
this_cpp_type = 'DartStringAdapter'
else:
this_cpp_type = context['cpp_type']
idl_type = attribute.idl_type
context.update({
'has_setter_exception_state': (
context['is_setter_raises_exception'] or context['has_type_checking_interface'] or
idl_type.is_integer_type),
'setter_lvalue': dart_types.check_reserved_name(attribute.name),
'cpp_type': this_cpp_type,
'local_cpp_type': idl_type.cpp_type_args(attribute.extended_attributes, raw_type=True),
'dart_value_to_local_cpp_value':
attribute.idl_type.dart_value_to_local_cpp_value(
extended_attributes, attribute.name, False,
context['has_type_checking_interface'], 1,
context['is_auto_scope']),
})
# setter_expression() depends on context values we set above.
context['cpp_setter'] = setter_expression(interface, attribute, context)
def setter_expression(interface, attribute, context):
extended_attributes = attribute.extended_attributes
arguments = DartUtilities.call_with_arguments(
extended_attributes.get('SetterCallWith') or
extended_attributes.get('CallWith'))
this_setter_base_name = v8_attributes.setter_base_name(interface, attribute, arguments)
setter_name = DartUtilities.scoped_name(interface, attribute, this_setter_base_name)
if ('PartialInterfaceImplementedAs' in extended_attributes and
not attribute.is_static):
arguments.append('*receiver')
idl_type = attribute.idl_type
if idl_type.base_type == 'EventHandler':
getter_name = DartUtilities.scoped_name(interface, attribute, DartUtilities.cpp_name(attribute))
context['event_handler_getter_expression'] = '%s(%s)' % (
getter_name, ', '.join(arguments))
# FIXME(vsm): Do we need to support this? If so, what's our analogue of
# V8EventListenerList?
arguments.append('nullptr')
else:
attribute_name = dart_types.check_reserved_name(attribute.name)
arguments.append(attribute_name)
if context['is_setter_raises_exception']:
arguments.append('es')
return '%s(%s)' % (setter_name, ', '.join(arguments))
################################################################################
# Attribute configuration
################################################################################
# [Replaceable]
def setter_callback_name(interface, attribute):
cpp_class_name = DartUtilities.cpp_name(interface)
extended_attributes = attribute.extended_attributes
if (('Replaceable' in extended_attributes and
'PutForwards' not in extended_attributes) or
v8_attributes.is_constructor_attribute(attribute)):
# FIXME: rename to ForceSetAttributeOnThisCallback, since also used for Constructors
return '{0}V8Internal::{0}ReplaceableAttributeSetterCallback'.format(cpp_class_name)
# FIXME:disabling PutForwards for now since we didn't support it before
# if attribute.is_read_only and 'PutForwards' not in extended_attributes:
if attribute.is_read_only:
return '0'
return '%sV8Internal::%sAttributeSetterCallback' % (cpp_class_name, attribute.name)
################################################################################
# Constructors
################################################################################
idl_types.IdlType.constructor_type_name = property(
# FIXME: replace this with a [ConstructorAttribute] extended attribute
lambda self: DartUtilities.strip_suffix(self.base_type, 'Constructor'))
|
{
"content_hash": "cebeab213bb23c3e141c4dc639f0faee",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 112,
"avg_line_length": 42.2520325203252,
"alnum_prop": 0.6245911102559168,
"repo_name": "takaaptech/sky_engine",
"id": "8ed32c07982d30227af5af0f66d9bb3d49a7dde6",
"size": "11924",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sky/engine/bindings/scripts/dart_attributes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "2706"
},
{
"name": "C",
"bytes": "1690478"
},
{
"name": "C++",
"bytes": "29453265"
},
{
"name": "Dart",
"bytes": "983023"
},
{
"name": "Go",
"bytes": "174229"
},
{
"name": "Groff",
"bytes": "29030"
},
{
"name": "HTML",
"bytes": "41854"
},
{
"name": "Java",
"bytes": "964898"
},
{
"name": "JavaScript",
"bytes": "154042"
},
{
"name": "Makefile",
"bytes": "402"
},
{
"name": "Objective-C",
"bytes": "76436"
},
{
"name": "Objective-C++",
"bytes": "432106"
},
{
"name": "Protocol Buffer",
"bytes": "1048"
},
{
"name": "Python",
"bytes": "5651395"
},
{
"name": "Shell",
"bytes": "174455"
},
{
"name": "Yacc",
"bytes": "31141"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
"""
New Tab Extension for Python-Markdown
=====================================
Modify the behavior of Links in Python-Markdown to open a in a new
window. This changes the HTML output to add target="_blank" to all
generated links.
"""
import markdown
from markdown.inlinepatterns import \
LinkPattern, ReferencePattern, AutolinkPattern, AutomailPattern, \
LINK_RE, REFERENCE_RE, SHORT_REF_RE, AUTOLINK_RE, AUTOMAIL_RE
class NewTabMixin(object):
def handleMatch(self, m):
el = super(NewTabMixin, self).handleMatch(m)
if el is not None:
el.set('target', '_blank')
return el
class NewTabLinkPattern(NewTabMixin, LinkPattern):
pass
class NewTabReferencePattern(NewTabMixin, ReferencePattern):
pass
class NewTabAutolinkPattern(NewTabMixin, AutolinkPattern):
pass
class NewTabAutomailPattern(NewTabMixin, AutomailPattern):
pass
class NewTabExtension(markdown.Extension):
"""
Modifies HTML output to open links in a new tab.
"""
def extendMarkdown(self, md, md_globals):
md.inlinePatterns['link'] = NewTabLinkPattern(LINK_RE, md)
md.inlinePatterns['reference'] = NewTabReferencePattern(REFERENCE_RE, md)
md.inlinePatterns['short_reference'] = NewTabReferencePattern(SHORT_REF_RE, md)
md.inlinePatterns['autolink'] = NewTabAutolinkPattern(AUTOLINK_RE, md)
md.inlinePatterns['automail'] = NewTabAutomailPattern(AUTOMAIL_RE, md)
def makeExtension(*args, **kwargs):
return NewTabExtension(*args, **kwargs)
|
{
"content_hash": "92ac67a011567e0bc547044fbad4d556",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 87,
"avg_line_length": 28.962264150943398,
"alnum_prop": 0.701628664495114,
"repo_name": "dataquestio/Python-Markdown",
"id": "9cea03864b8a9b70ff580d589f0d811fcc0bb438",
"size": "1535",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "markdown/extensions/newtab.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "629910"
},
{
"name": "Makefile",
"bytes": "1493"
},
{
"name": "Python",
"bytes": "290093"
},
{
"name": "Shell",
"bytes": "912"
}
],
"symlink_target": ""
}
|
from tornado.ioloop import IOLoop
from tornado.web import RequestHandler, Application, url
from tornado import log
import logging
import Filter
import Settings
import Cache
import os
import json
import Log
import time
class MainHandler(RequestHandler):
def get(self):
self.write("FeedFilter")
class FilterHandler(RequestHandler):
def get(self, name):
settings = Settings.Settings()
url = settings.getItem(name)[0]
blacklist = settings.getItem("blacklist")
p = Filter.Processor(blacklist)
xml = p.Parse(url)
self.set_header("Content-Type", "text/xml; charset=utf-8")
self.write(xml)
class CacheHandler(RequestHandler):
def __init__(self, application, request, **kwargs):
self._cache = Cache.Cache()
return super(CacheHandler, self).__init__(application, request, **kwargs)
def get(self):
items = self._cache.get()
self.render(os.path.join("templates", "cache.html"), title="Cache", items=items)
class LogHandler(RequestHandler):
def get(self, count):
items = logInstance.get(count)
self.render(os.path.join("templates", "log.html"), title="Log", items=items)
class SettingsHandler(RequestHandler):
def __init__(self, application, request, **kwargs):
self._settings = Settings.Settings()
return super(SettingsHandler, self).__init__(application, request, **kwargs)
def get(self):
self._render()
def _render(self):
self.render(os.path.join("templates", "settings.html"), title="Settings")
class SettingsServiceHandler(RequestHandler):
def __init__(self, application, request, **kwargs):
self._settings = Settings.Settings()
return super(SettingsServiceHandler, self).__init__(application, request, **kwargs)
def get(self):
self.write(self._getItems())
def post(self):
input = json.loads(self.request.body)
self._settings.add(input["name"], input["value"])
self.write(self._getItems())
def delete(self):
self._settings.delete(self.get_query_argument("name"))
self.write(self._getItems())
def _getItems(self):
items = self._settings.get()
return {"items": [{'name':i.name, 'value':i.value} for i in items]}
logInstance = Log.Log()
def LogRequest(handler):
logInstance.add(handler.request._start_time, handler.request.remote_ip, handler.request.uri)
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"log_function": LogRequest
}
application = Application([
url(r"/", MainHandler),
url(r"/filter/(.*)", FilterHandler, name="filter"),
url(r"/cache/", CacheHandler, name="cache"),
url(r"/log/(.*)", LogHandler, name="log"),
url(r"/settings/", SettingsHandler, name="settings"),
url(r"/api/settings", SettingsServiceHandler, name="settingsService"),
], **settings)
class LoggerHandler(logging.Handler):
def emit(self, record):
logInstance.add(time.time(), "error", self.format(record))
if __name__ == "__main__":
application.listen(9357)
logInstance.add(time.time(), "local", "Start")
log.app_log.addHandler(LoggerHandler())
IOLoop.instance().start()
|
{
"content_hash": "b7d683f6af345fee6c2c3bc43ba141e0",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 96,
"avg_line_length": 31.240384615384617,
"alnum_prop": 0.6509695290858726,
"repo_name": "Yustos/FeedFilter",
"id": "d4e5432fe1fd7a0577109164babe916c33569102",
"size": "3273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1082"
},
{
"name": "Python",
"bytes": "10186"
}
],
"symlink_target": ""
}
|
import types
from StringIO import StringIO
from validator.contextgenerator import ContextGenerator
try:
from html.parser import HTMLParser, HTMLParseError
except ImportError:
from HTMLParser import HTMLParser, HTMLParseError
class DTDParser(object):
'Parses and serializes DTD files. This is useful for L10n tests.'
def __init__(self, dtd):
"""
Creation of DTD parsers can be done based on a local file
(provided as a string to the path), or directly (in memory as a
StringIO object).
"""
self.entities = {}
self.items = []
data = ''
if isinstance(dtd, types.StringTypes):
with open(dtd) as dtd_instance:
data = dtd_instance.read()
elif isinstance(dtd, file):
data = dtd.read()
elif isinstance(dtd, StringIO):
data = dtd.getvalue()
self._parse(data)
# Create a context for the file
self.context = ContextGenerator(data)
def __len__(self):
return len(self.entities)
def _parse(self, data):
'Parses the DTD data and stores it in an aggregate format.'
parser = DTDXMLParser()
# Feed the DTD file in line-by-line.
for split_line in data.split('\n'):
try:
parser.feed(split_line + '\n')
except HTMLParseError:
parser = DTDXMLParser()
else:
if parser.out_buffer:
for name, value, line in parser.out_buffer:
self.entities[name] = value
self.items.append((name, value, line))
parser.clear_buffer()
class DTDXMLParser(HTMLParser):
'Parses the individual XML entities in a DTD document.'
def __init__(self):
HTMLParser.__init__(self)
self.out_buffer = []
# Support for py2.7/3k
def handle_comment(self, data):
self.unknown_decl(data)
def unknown_decl(self, decl):
'Handles non-DOCTYPE SGML declarations in *ML documents.'
decl = decl.strip()
split_decl = decl.split()
if len(split_decl) < 3 or split_decl[0] != 'ENTITY':
# Interestingly enough, it legitimately IS an unknown
# declaration. Funny thing, you know?
return
self.out_buffer.append((split_decl[1],
split_decl[2].strip('\'"'),
self.getpos()[0])) # Pos 0 is the line no.
def clear_buffer(self):
'Clears the return buffer.'
self.out_buffer = []
|
{
"content_hash": "89a9ea921c261c256a4717099fa15c0d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 75,
"avg_line_length": 30.091954022988507,
"alnum_prop": 0.5626432391138273,
"repo_name": "kumar303/amo-validator",
"id": "864e8ae9481e10ba0a427c9c6f52c885d511444b",
"size": "2618",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "validator/testcases/l10n/dtd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "466"
},
{
"name": "HTML",
"bytes": "2802"
},
{
"name": "JavaScript",
"bytes": "602"
},
{
"name": "Python",
"bytes": "811018"
},
{
"name": "Shell",
"bytes": "1842"
}
],
"symlink_target": ""
}
|
# Tools for managing the processing of SGD files
import MySQLdb
import sys, string
import Config
class ModelOrganisms( ) :
def __init__( self, db, cursor ) :
self.db = db
self.cursor = cursor
def buildSGDIDHash( self ) :
self.cursor.execute( "SELECT gene_id, gene_external_value FROM " + Config.DB_NAME + ".gene_externals" )
mappingHash = {}
for row in self.cursor.fetchall( ) :
mappingHash[str(row[1])] = str(row[0])
return mappingHash
def buildPombaseIDHash( self ) :
self.cursor.execute( "SELECT gene_id, gene_alias_value FROM " + Config.DB_NAME + ".gene_aliases WHERE gene_id IN ( SELECT gene_id FROM " + Config.DB_NAME + ".genes WHERE organism_id='284812' )" )
mappingHash = {}
for row in self.cursor.fetchall( ) :
mappingHash[str(row[1])] = str(row[0])
return mappingHash
def buildWormbaseLocusIDHash( self ) :
self.cursor.execute( "SELECT gene_id, gene_alias_value FROM " + Config.DB_NAME + ".gene_aliases WHERE gene_id IN ( SELECT gene_id FROM " + Config.DB_NAME + ".genes WHERE organism_id='6239' ) AND gene_alias_type='ordered locus'" )
mappingHash = {}
for row in self.cursor.fetchall( ) :
mappingHash[str(row[1]).replace( "CELE_", "" )] = str(row[0])
return mappingHash
def buildCGDAliasHash( self ) :
self.cursor.execute( "SELECT gene_id, gene_alias_value FROM " + Config.DB_NAME + ".gene_aliases WHERE gene_id IN ( SELECT gene_id FROM " + Config.DB_NAME + ".genes WHERE organism_id='237561' )" )
mappingHash = {}
for row in self.cursor.fetchall( ) :
if str(row[1]) not in mappingHash :
mappingHash[str(row[1])] = []
mappingHash[str(row[1])].append(str(row[0]))
return mappingHash
def processName( self, geneID, orfName, officialSymbol, officialType, aliases) :
self.cursor.execute( "SELECT gene_name FROM " + Config.DB_NAME + ".genes WHERE gene_id=%s LIMIT 1", [geneID] )
row = self.cursor.fetchone( )
if "" != officialSymbol and row[0].lower( ) != officialSymbol.lower( ) :
self.cursor.execute( "UPDATE " + Config.DB_NAME + ".genes SET gene_name=%s, gene_name_type=%s WHERE gene_id=%s", [officialSymbol, officialType, geneID] )
self.cursor.execute( "SELECT gene_alias_value FROM " + Config.DB_NAME + ".gene_aliases WHERE gene_id=%s AND gene_alias_status='active'", [geneID] )
aliasSet = set( )
for row in self.cursor.fetchall( ) :
aliasSet.add( row[0].strip( ).lower( ) )
if "" != orfName and orfName.lower( ) not in aliasSet :
self.cursor.execute( "UPDATE " + Config.DB_NAME + ".gene_aliases SET gene_alias_type='synonym' WHERE gene_alias_type='ordered locus' AND gene_id=%s", [geneID] )
self.cursor.execute( "INSERT INTO " + Config.DB_NAME + ".gene_aliases VALUES( '0',%s, 'active', 'ordered locus', NOW( ), %s )", [orfName, geneID] )
if "" != officialSymbol and officialSymbol.lower( ) not in aliasSet :
self.cursor.execute( "INSERT INTO " + Config.DB_NAME + ".gene_aliases VALUES( '0',%s, 'active', %s, NOW( ), %s )", [officialSymbol, officialType, geneID] )
for alias in aliases :
alias = alias.strip( )
if "" != alias and alias.lower( ) not in aliasSet :
self.cursor.execute( "INSERT INTO " + Config.DB_NAME + ".gene_aliases VALUES( '0',%s, 'active', 'synonym', NOW( ), %s )", [alias, geneID] )
self.db.commit( )
def processAddonSGDIDs( self, geneID, additionalSGDIDs ) :
self.cursor.execute( "SELECT gene_external_value FROM " + Config.DB_NAME + ".gene_externals WHERE gene_id=%s AND gene_external_status='active'", [geneID] )
externalSet = set( )
for row in self.cursor.fetchall( ) :
externalSet.add( row[0].lower( ) )
for external in additionalSGDIDs :
external = external.strip( )
if "" != external and external.lower( ) not in externalSet :
self.cursor.execute( "INSERT INTO " + Config.DB_NAME + ".gene_aliases VALUES( '0', %s, 'active', 'synonym', NOW( ), %s )", [external, geneID] )
self.db.commit( )
def processDefinition( self, geneID, definition, definitionType ) :
self.cursor.execute( "SELECT gene_definition_text FROM " + Config.DB_NAME + ".gene_definitions WHERE gene_id=%s AND gene_definition_status='active'", [geneID] )
definitionSet = set( )
for row in self.cursor.fetchall( ) :
definitionSet.add( row[0].lower( ) )
definition = definition.strip( )
if "" != definition and definition.lower( ) not in definitionSet :
self.cursor.execute( "INSERT INTO " + Config.DB_NAME + ".gene_definitions VALUES( '0', %s, %s, 'active', NOW( ), %s )", [definition, definitionType, geneID] )
self.db.commit( )
def processExternals( self, geneID, externals, externalSource ) :
self.cursor.execute( "SELECT gene_external_value FROM " + Config.DB_NAME + ".gene_externals WHERE gene_id=%s AND gene_external_source=%s AND gene_external_status='active'", [geneID, externalSource] )
externalSet = set( )
for row in self.cursor.fetchall( ) :
externalSet.add( row[0].lower( ) )
for external in externals :
external = external.strip( )
if "" != external and external.lower( ) not in externalSet :
self.cursor.execute( "INSERT INTO " + Config.DB_NAME + ".gene_externals VALUES( '0',%s,%s,'active',NOW( ),%s )", [external, externalSource.upper( ), geneID] )
self.db.commit( )
|
{
"content_hash": "d894e6c64da98a2e91f73b97a8168ed9",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 231,
"avg_line_length": 42.71653543307087,
"alnum_prop": 0.6425806451612903,
"repo_name": "starkfree/BioGRID-Annotation",
"id": "95bc022b47aa6c36308ecc730ad8070c925e9a0b",
"size": "5425",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "classes/ModelOrganisms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "257968"
},
{
"name": "Shell",
"bytes": "1717"
}
],
"symlink_target": ""
}
|
"""Test the RPC HTTP basics."""
import http.client
import urllib.parse
from test_framework.test_framework import PivxTestFramework
from test_framework.util import assert_equal, str_to_b64str
class HTTPBasicsTest (PivxTestFramework):
def set_test_params(self):
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
#################################################
# lowlevel check for http persistent connection #
#################################################
url = urllib.parse.urlparse(self.nodes[0].url)
authpair = url.username + ':' + url.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock is not None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock is not None) #according to http/1.1 connection must still be open!
conn.close()
#same should be if we add keep-alive because this should be the std. behaviour
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection": "keep-alive"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock is not None) #according to http/1.1 connection must still be open!
#send 2nd request without closing connection
conn.request('POST', '/', '{"method": "getchaintips"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1) #must also response with a correct json-rpc message
assert(conn.sock is not None) #according to http/1.1 connection must still be open!
conn.close()
#now do the same with "Connection: close"
headers = {"Authorization": "Basic " + str_to_b64str(authpair), "Connection":"close"}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock is None) #now the connection must be closed after the response
#node1 (2nd node) is running with disabled keep-alive option
urlNode1 = urllib.parse.urlparse(self.nodes[1].url)
authpair = urlNode1.username + ':' + urlNode1.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode1.hostname, urlNode1.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
#node2 (third node) is running with standard keep-alive parameters which means keep-alive is on
urlNode2 = urllib.parse.urlparse(self.nodes[2].url)
authpair = urlNode2.username + ':' + urlNode2.password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
out1 = conn.getresponse().read()
assert(b'"error":null' in out1)
assert(conn.sock is not None) #connection must be closed because pivxd should use keep-alive by default
# Check excessive request size
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*1000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.NOT_FOUND)
conn = http.client.HTTPConnection(urlNode2.hostname, urlNode2.port)
conn.connect()
conn.request('GET', '/' + ('x'*10000), '', headers)
out1 = conn.getresponse()
assert_equal(out1.status, http.client.BAD_REQUEST)
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
{
"content_hash": "8946a568c33cff0e17eeca1e95a250ef",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 111,
"avg_line_length": 44.00952380952381,
"alnum_prop": 0.6215104955637308,
"repo_name": "PIVX-Project/PIVX",
"id": "660c786f6cd34f1cfdaa46405ec5f79cc1ac61c9",
"size": "4835",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/functional/interface_http.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "953297"
},
{
"name": "C",
"bytes": "5168953"
},
{
"name": "C++",
"bytes": "9188709"
},
{
"name": "CMake",
"bytes": "203234"
},
{
"name": "CSS",
"bytes": "211710"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "JavaScript",
"bytes": "41357"
},
{
"name": "M4",
"bytes": "263162"
},
{
"name": "Makefile",
"bytes": "139139"
},
{
"name": "Objective-C++",
"bytes": "3642"
},
{
"name": "Python",
"bytes": "1505322"
},
{
"name": "QMake",
"bytes": "26219"
},
{
"name": "Rust",
"bytes": "139132"
},
{
"name": "Sage",
"bytes": "30188"
},
{
"name": "Shell",
"bytes": "101041"
},
{
"name": "TypeScript",
"bytes": "10706"
}
],
"symlink_target": ""
}
|
from .websocket_server import *
|
{
"content_hash": "737aa2296b1a0d27530494a1a9cff0f4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 31,
"avg_line_length": 33,
"alnum_prop": 0.7575757575757576,
"repo_name": "ijonglin/WsJsPy",
"id": "4651755d63900b43a25b3c30b1e1651fadd81411",
"size": "33",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Py/Ws/websocket_server/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "870"
},
{
"name": "JavaScript",
"bytes": "5554"
},
{
"name": "Makefile",
"bytes": "169"
},
{
"name": "Python",
"bytes": "28799"
}
],
"symlink_target": ""
}
|
import os
import sys
# import source code dir
here = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, here)
sys.path.insert(0, os.path.join(here, os.pardir))
SITE_ID = 300
DEBUG = True
ROOT_URLCONF = 'tests.urls'
SECRET_KEY = 'skskqlqlaskdsd'
AUTOCOMMIT = True
DATABASES = {
'default': {
'NAME': 'test.db',
'ENGINE': 'django.db.backends.sqlite3',
'USER': '',
'PASSWORD': '',
'PORT': '',
},
}
NEOMODEL_NEO4J_BOLT_URL = os.environ.get('NEO4J_BOLT_URL', 'bolt://neo4j:foobar@localhost:7687')
NEOMODEL_SIGNALS = True
NEOMODEL_FORCE_TIMEZONE = False
NEOMODEL_ENCRYPTED_CONNECTION = False
NEOMODEL_MAX_POOL_SIZE = 50
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
]
INSTALLED_APPS = [
# Django
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
# Third party
'django_neomodel',
# Test
'tests.someapp',
]
USE_TZ = True
TIME_ZONE = 'UTC'
MIDDLEWARE = []
|
{
"content_hash": "14af57924482e02a0e2c2ad63393e606",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 96,
"avg_line_length": 18.724137931034484,
"alnum_prop": 0.6261510128913443,
"repo_name": "robinedwards/django-neomodel",
"id": "171c81bc470b41dc45e18715471214cefd1dfb34",
"size": "1086",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17168"
}
],
"symlink_target": ""
}
|
"""Tracing metaclass.
XXX This is very much a work in progress.
"""
import sys
class TraceMetaClass:
"""Metaclass for tracing.
Classes defined using this metaclass have an automatic tracing
feature -- by setting the __trace_output__ instance (or class)
variable to a file object, trace messages about all calls are
written to the file. The trace formatting can be changed by
defining a suitable __trace_call__ method.
"""
__inited = 0
def __init__(self, name = "", bases = [], dict = {}):
self.__name__ = name
self.__bases__ = bases
self.__dict = dict
# XXX Can't define __dict__, alas
self.__inited = 1
def __getattr__(self, name):
try:
return self.__dict[name]
except KeyError:
for base in self.__bases__:
try:
return base.__getattr__(name)
except AttributeError:
pass
raise AttributeError, name
def __setattr__(self, name, value):
if not self.__inited:
self.__dict__[name] = value
else:
self.__dict[name] = value
def __call__(self, *args, **kw):
inst = TracingInstance()
inst.__meta_init__(self)
try:
init = inst.__getattr__('__init__')
except AttributeError:
init = lambda: None
apply(init, args, kw)
return inst
__trace_output__ = None
class TracingInstance:
"""Helper class to represent an instance of a tracing class."""
myhash = {}
def __trace_call__(self, fp, fmt, *args):
args2 = []
for i in range(0, len(args)):
args2 += [args[i].__str__()]
if (isinstance(args2[i], unicode)):
args2[i] = args2[i].encode("latin-1")
# for i in range(0, len(args)):
# args[i] = args[i].__str__()
# if (isinstance(args[i], unicode)):
# args[i] = args[i].encode("latin-1")
str = (fmt + "\n") % tuple(args2)
fp.write(str)
fp.flush()
def __meta_init__(self, klass):
self.__class = klass
def __getattr__(self, name):
# Invoked for any attr not in the instance's __dict__
try:
raw = self.__class.__getattr__(name)
except AttributeError:
raise AttributeError, name
# print "raw %s" % (raw)
try:
raw.__call__
except:
return raw
# It's a function
fullname = self.__class.__name__ + "." + name
if (self.myhash.has_key(fullname)):
return self.myhash[fullname]
else:
newWrapper = TracingWrapper(fullname, raw, self)
self.myhash[fullname] = newWrapper
return newWrapper
# if not self.__trace_output__ or name == '__trace_call__':
# return NotTracingWrapper(fullname, raw, self)
# else:
# return TracingWrapper(fullname, raw, self)
class NotTracingWrapper:
def __init__(self, name, func, inst):
self.__name__ = name
self.func = func
self.inst = inst
def __call__(self, *args, **kw):
return apply(self.func, (self.inst,) + args, kw)
Traced = None
if (sys.platform == "win32"):
Traced = TraceMetaClass('Traced', (), {'__trace_output__': \
open("C:/fftrace.log", "a"), \
'__filter_funcs__': []})
else:
Traced = TraceMetaClass('Traced', (), {'__trace_output__': \
open("/tmp/fftrace.log", "a"), \
'__filter_funcs__': []})
class TracingWrapper(NotTracingWrapper):
def __call__(self, *args, **kw):
if (self.__name__.split(".")[-1] in self.inst.__filter_funcs__) or \
(not self.inst.__trace_output__):
rv = apply(self.func, (self.inst,) + args, kw)
return rv
else:
self.inst.__trace_call__(self.inst.__trace_output__,
"calling %s, args=%s, kw=%s",
self.__name__, args, kw)
try:
rv = apply(self.func, (self.inst,) + args, kw)
self.inst.__trace_call__(self.inst.__trace_output__,
"returning from %s with value %s",
self.__name__, rv)
return rv
except Exception, inst:
self.inst.__trace_call__(self.inst.__trace_output__,
"Exception in %s: %s", self.__name__, inst)
raise inst
|
{
"content_hash": "a453e64080d208c8b47a2ccf540c190d",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 79,
"avg_line_length": 30.47517730496454,
"alnum_prop": 0.5310681871072842,
"repo_name": "jim-cooley/abletonremotescripts",
"id": "998290f8429fb3aa299410eecb4dfd1b0e90cd6b",
"size": "4297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "remote-scripts/samples/FCB1010_Custom/Tracing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "55142"
},
{
"name": "Python",
"bytes": "1506667"
},
{
"name": "Shell",
"bytes": "1266"
}
],
"symlink_target": ""
}
|
import datetime
from math import sin, cos, pi
from volttron.platform.vip.agent import Agent, RPC, Core
from volttron.platform.agent import utils
# The 'connector' api doesn't come with a nice
# way to install itself so we have it added
# as a subtree here. Hopefully this will
# change in the future.
import sys
sys.path.insert(0, './ddsagent/rticonnextdds-connector-master')
import rticonnextdds_connector as rti
class DDSAgent(Agent):
def __init__(self, config_path, **kwargs):
super(DDSAgent, self).__init__(**kwargs)
self.reader = {}
self.writer = {}
config = utils.load_config(config_path)
for typename, type_config in config.iteritems():
participant_name = type_config['participant_name']
xml_config_path = type_config['xml_config_path']
publisher_name = type_config['publisher_name']
subscriber_name = type_config['subscriber_name']
connector = rti.Connector(participant_name, xml_config_path)
self.writer[typename] = connector.getOutput(publisher_name)
self.reader[typename] = connector.getInput(subscriber_name)
@Core.periodic(1)
def publish_demo(self):
"""
Publish a square that follows a circular path.
Can be visualized by running the *rtishapesdemo*
program and subscribing to *square*.
"""
sample = {"shapesize": 30,
"color": "BLUE"}
center = 100
radius = 50
now = datetime.datetime.now()
radians = pi * float(now.second) / 15.0
sample['x'] = center + int(radius * cos(radians))
sample['y'] = center + int(radius * sin(radians))
self.write_to_dds('square', sample)
@RPC.export
def read_from_dds(self, typename):
""" RPC method
Read samples from the DDS message bus.
A data access method must be called before we can
examine `samples` in the vernacular of DDS. This
examples uses read(), which *does not* modify the
reader's receive queue. The other option is take(),
which *does* remove data from the receive queue.
:param typename: Name of the type to read.
:type typename: str
:returns: samples available on the DDS message bus
:rtype: list of dictionaries
.. warning:: Attempting to read a type of **typename**
that was not in the config file will raise
KeyError.
"""
reader = self.reader[typename]
reader.read()
# For this example we'll return all samples we can see
samples = []
# Find out how many samples we have so
# they can be explicitly indexed
n_samples = reader.samples.getLength()
# Indexes start at one. Yuck.
for i in range(1, n_samples + 1):
if reader.infos.isValid(i):
# Struct fields can be retrieved as a dict
# or accessed individually. A dictionary
# will be easier in most cases.
d = reader.samples.getDictionary(i)
samples.append(d)
return samples
@RPC.export
def write_to_dds(self, typename, sample):
""" RPC method
Write sample to the DDS message bus.
:param typename: Name of the type to write.
:type typename: str
:param sample: Data to write to DDS bus.
:type sample: dict
.. warning:: Attempting to write to a type of **typename**
that was not in the config file will raise
KeyError.
"""
writer = self.writer[typename]
writer.instance.setDictionary(sample)
writer.write()
def main():
utils.vip_main(DDSAgent)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
|
{
"content_hash": "7096d57dae406c7aae7082f6d3f5cffe",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 72,
"avg_line_length": 30.976190476190474,
"alnum_prop": 0.5962080450935178,
"repo_name": "schandrika/volttron",
"id": "292eabbcfbdc8c81faab8e77308159522acfe1a1",
"size": "6795",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/DDSAgent/ddsagent/agent.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "33023"
},
{
"name": "HTML",
"bytes": "61489"
},
{
"name": "JavaScript",
"bytes": "497583"
},
{
"name": "Python",
"bytes": "3090478"
},
{
"name": "Shell",
"bytes": "41093"
}
],
"symlink_target": ""
}
|
from instapy_cli import client
username = 'USERNAME'
password = 'PASSWORD'
cookie = '{COOKIE_STRING_JSON_OBJ}'
with client(username, password, cookie=cookie) as cli:
# get string cookies
cookies = cli.get_cookie()
print(type(cookies)) # == str
print(cookies)
# do stuffs with cli
ig = cli.api()
me = ig.current_user()
print(me)
|
{
"content_hash": "9a0559e632a5681013a6db9c95ad1335",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 54,
"avg_line_length": 24.133333333333333,
"alnum_prop": 0.6546961325966851,
"repo_name": "b3nab/instapy-cli",
"id": "21faf816c34715986817277a89f48ebf0089d720",
"size": "362",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/login-cookie.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11670"
},
{
"name": "Shell",
"bytes": "273"
}
],
"symlink_target": ""
}
|
import json
import logging
import math
import re
import tba_config
import urllib
from difflib import SequenceMatcher
from google.appengine.api import memcache, urlfetch
from google.appengine.ext import ndb
from models.location import Location
from models.sitevar import Sitevar
from models.team import Team
class LocationHelper(object):
GOOGLE_API_KEY = None
@classmethod
def get_similarity(cls, a, b):
"""
Returns max(similarity between two strings ignoring case,
similarity between two strings ignoring case and order,
similarity between acronym(a) & b,
similarity between a & acronym(b)) from 0 to 1
where acronym() is generated by splitting along non word characters
Ignores case and order
"""
a = a.lower().strip()
b = b.lower().strip()
a_split = filter(lambda x: x, re.split('\s+|,|-', a))
b_split = filter(lambda x: x, re.split('\s+|,|-', b))
a_sorted = ' '.join(sorted(a_split))
b_sorted = ' '.join(sorted(b_split))
a_acr = ''.join([w[0] if w else '' for w in a_split]).lower()
b_acr = ''.join([w[0] if w else '' for w in b_split]).lower()
sm1 = SequenceMatcher(None, a, b)
sm2 = SequenceMatcher(None, a_sorted, b_sorted)
sm3 = SequenceMatcher(None, a_acr, b)
sm4 = SequenceMatcher(None, a, b_acr)
return max([
sm1.ratio(),
sm2.ratio(),
sm3.ratio(),
sm4.ratio(),
])
@classmethod
def update_event_location(cls, event):
if not event.location:
return
if event.normalized_location: # Only set normalized_location once
return
location_info, score = cls.get_event_location_info(event)
# Log performance
text = "Event {} location score: {}".format(event.key.id(), score)
if score < 0.8:
logging.warning(text)
else:
logging.info(text)
# Fallback to location only
if not location_info:
logging.warning("Falling back to location only for event {}".format(event.key.id()))
geocode_result = cls.google_maps_geocode_async(event.location).get_result()
if geocode_result:
location_info = cls.construct_location_info_async(geocode_result[0]).get_result()
else:
logging.warning("Event {} location failed!".format(event.key.id()))
# Update event
if 'lat' in location_info and 'lng' in location_info:
lat_lng = ndb.GeoPt(location_info['lat'], location_info['lng'])
else:
lat_lng = None
event.normalized_location = Location(
name=location_info.get('name'),
formatted_address=location_info.get('formatted_address'),
lat_lng=lat_lng,
street_number=location_info.get('street_number'),
street=location_info.get('street'),
city=location_info.get('city'),
state_prov=location_info.get('state_prov'),
state_prov_short=location_info.get('state_prov_short'),
country=location_info.get('country'),
country_short=location_info.get('country_short'),
postal_code=location_info.get('postal_code'),
place_id=location_info.get('place_id'),
place_details=location_info.get('place_details'),
)
@classmethod
def get_event_location_info(cls, event):
"""
Search for different combinations of venue, venue_address, city,
state_prov, postalcode, and country in attempt to find the correct
location associated with the event.
"""
# Possible queries for location that will match yield results
if event.venue:
possible_queries = [event.venue]
else:
possible_queries = []
if event.venue_address:
split_address = event.venue_address.split('\n')
# Venue takes up at most 2 lines. Isolate address
possible_queries.append(' '.join(split_address[1:]))
possible_queries.append(' '.join(split_address[2:]))
# Geocode for lat/lng
lat_lng = cls.get_lat_lng(event.location)
if not lat_lng:
return {}, 0
# Try to find place based on possible queries
best_score = 0
best_location_info = {}
nearbysearch_results_candidates = [] # More trustworthy candidates are added first
for j, query in enumerate(possible_queries):
# Try both searches
nearbysearch_places = cls.google_maps_placesearch_async(query, lat_lng)
textsearch_places = cls.google_maps_placesearch_async(query, lat_lng, textsearch=True)
for results_future in [nearbysearch_places, textsearch_places]:
for i, place in enumerate(results_future.get_result()[:5]):
location_info = cls.construct_location_info_async(place).get_result()
score = cls.compute_event_location_score(query, location_info, lat_lng)
score *= pow(0.7, j) * pow(0.7, i) # discount by ranking
if score == 1:
return location_info, score
elif score > best_score:
best_location_info = location_info
best_score = score
return best_location_info, best_score
@classmethod
def compute_event_location_score(cls, query_name, location_info, lat_lng):
"""
Score for correctness. 1.0 is perfect.
Not checking for absolute equality in case of existing data errors.
"""
# TODO FIX: Hacky special case for weird event. 2017-01-18 -fangeugene
if 'Shenzhen' in query_name and location_info['name'] != 'Shenzhen University Town Sports Center':
return 0
# Check radius
R = 6373.0 # approximate radius of earth in km
lat1 = math.radians(lat_lng[0])
lon1 = math.radians(lat_lng[1])
lat2 = math.radians(location_info['lat'])
lon2 = math.radians(location_info['lng'])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = R * c
if distance > 100:
return 0
if {'point_of_interest', 'premise'}.intersection(set(location_info.get('types', ''))):
score = pow(max(
cls.get_similarity(query_name, location_info['name']),
cls.get_similarity(query_name, location_info['formatted_address'])), 1.0/3)
else:
score = 0
return score
@classmethod
def update_team_location(cls, team):
if not team.location:
return
# # Try with and without textsearch, pick best
# location_info, score = cls.get_team_location_info(team)
# if score < 0.7:
# logging.warning("Using textsearch for {}".format(team.key.id()))
# location_info2, score2 = cls.get_team_location_info(team, textsearch=True)
# if score2 > score:
# location_info = location_info2
# score = score2
# # Log performance
# text = "Team {} location score: {}".format(team.key.id(), score)
# if score < 0.8:
# logging.warning(text)
# else:
# logging.info(text)
# # Don't trust anything below a certain threshold Super strict for now.
# if score < 0.9:
# logging.warning("Location score too low for team {}".format(team.key.id()))
# location_info = {}
location_info = {} # Force imprecise locations
# Fallback to location only
if not location_info:
# logging.warning("Falling back to location only for team {}".format(team.key.id()))
geocode_result = cls.google_maps_geocode_async(team.location).get_result()
if geocode_result:
location_info = cls.construct_location_info_async(geocode_result[0]).get_result()
# Fallback to city, country
if not location_info:
logging.warning("Falling back to city/country only for team {}".format(team.key.id()))
city_country = u'{} {}'.format(
team.city if team.city else '',
team.country if team.country else '')
geocode_result = cls.google_maps_geocode_async(city_country).get_result()
if geocode_result:
location_info = cls.construct_location_info_async(geocode_result[0]).get_result()
else:
logging.warning("Team {} location failed!".format(team.key.id()))
# Update team
if 'lat' in location_info and 'lng' in location_info:
lat_lng = ndb.GeoPt(location_info['lat'], location_info['lng'])
else:
lat_lng = None
team.normalized_location = Location(
name=location_info.get('name'),
formatted_address=location_info.get('formatted_address'),
lat_lng=lat_lng,
street_number=location_info.get('street_number'),
street=location_info.get('street'),
city=location_info.get('city'),
state_prov=location_info.get('state_prov'),
state_prov_short=location_info.get('state_prov_short'),
country=location_info.get('country'),
country_short=location_info.get('country_short'),
postal_code=location_info.get('postal_code'),
place_id=location_info.get('place_id'),
place_details=location_info.get('place_details'),
)
@classmethod
def get_team_location_info(cls, team, textsearch=False):
"""
Search for different combinations of team name (which should include
high school or title sponsor) with city, state_prov, postalcode, and country
in attempt to find the correct location associated with the team.
"""
# Find possible schools/title sponsors
possible_names = []
MAX_SPLIT = 3 # Filters out long names that are unlikely
if team.name:
# Guessing sponsors/school by splitting name by '/' or '&'
split1 = re.split('&', team.name)
split2 = re.split('/', team.name)
if split1 and \
split1[-1].count('&') < MAX_SPLIT and split1[-1].count('/') < MAX_SPLIT:
possible_names.append(split1[-1])
if split2 and split2[-1] not in possible_names and \
split2[-1].count('&') < MAX_SPLIT and split2[-1].count('/') < MAX_SPLIT:
possible_names.append(split2[-1])
if split1 and split1[0] not in possible_names and \
split1[0].count('&') < MAX_SPLIT and split1[0].count('/') < MAX_SPLIT:
possible_names.append(split1[0])
if split2 and split2[0] not in possible_names and \
split2[0].count('&') < MAX_SPLIT and split2[0].count('/') < MAX_SPLIT:
possible_names.append(split2[0])
# Geocode for lat/lng
lat_lng = cls.get_lat_lng(team.location)
if not lat_lng:
return {}, 0
# Try to find place based on possible queries
best_score = 0
best_location_info = {}
nearbysearch_results_candidates = [] # More trustworthy candidates are added first
for j, name in enumerate(possible_names):
places = cls.google_maps_placesearch_async(name, lat_lng, textsearch=textsearch).get_result()
for i, place in enumerate(places[:5]):
location_info = cls.construct_location_info_async(place).get_result()
score = cls.compute_team_location_score(name, location_info, lat_lng)
score *= pow(0.9, 0 if j < 2 else 1) * pow(0.9, i) # discount by ranking
if score == 1:
return location_info, score
elif score > best_score:
best_location_info = location_info
best_score = score
return best_location_info, best_score
@classmethod
def compute_team_location_score(cls, query_name, location_info, lat_lng):
"""
Score for correctness. 1.0 is perfect.
Not checking for absolute equality in case of existing data errors.
"""
# Check radius
R = 6373.0 # approximate radius of earth in km
lat1 = math.radians(lat_lng[0])
lon1 = math.radians(lat_lng[1])
lat2 = math.radians(location_info['lat'])
lon2 = math.radians(location_info['lng'])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = math.sin(dlat / 2)**2 + math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2)**2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = R * c
if distance > 100:
return 0
query_name = query_name.lower().replace('school', '').replace('high', '')
result_name = location_info['name'].lower().replace('school', '').replace('high', '')
score = pow(cls.get_similarity(query_name, result_name), 0.7)
if not {'school', 'university'}.intersection(set(location_info.get('types', ''))):
score *= 0.9
return score
@classmethod
@ndb.tasklet
def construct_location_info_async(cls, gmaps_result):
"""
Gets location info given a gmaps result
"""
location_info = {
'place_id': gmaps_result['place_id'],
'lat': gmaps_result['geometry']['location']['lat'],
'lng': gmaps_result['geometry']['location']['lng'],
'name': gmaps_result.get('name'),
'types': gmaps_result['types'],
}
place_details_result = yield cls.google_maps_place_details_async(gmaps_result['place_id'])
if place_details_result:
has_city = False
for component in place_details_result['address_components']:
if 'street_number' in component['types']:
location_info['street_number'] = component['long_name']
elif 'route' in component['types']:
location_info['street'] = component['long_name']
elif 'locality' in component['types']:
location_info['city'] = component['long_name']
has_city = True
elif 'administrative_area_level_1' in component['types']:
location_info['state_prov'] = component['long_name']
location_info['state_prov_short'] = component['short_name']
elif 'country' in component['types']:
location_info['country'] = component['long_name']
location_info['country_short'] = component['short_name']
elif 'postal_code' in component['types']:
location_info['postal_code'] = component['long_name']
# Special case for when there is no city
if not has_city and 'state_prov' in location_info:
location_info['city'] = location_info['state_prov']
location_info['formatted_address'] = place_details_result['formatted_address']
# Save everything just in case
location_info['place_details'] = place_details_result
raise ndb.Return(location_info)
@classmethod
@ndb.tasklet
def google_maps_placesearch_async(cls, query, lat_lng, textsearch=False):
"""
https://developers.google.com/places/web-service/search#nearbysearchRequests
https://developers.google.com/places/web-service/search#TextSearchRequests
"""
if not cls.GOOGLE_API_KEY:
GOOGLE_SECRETS = Sitevar.get_by_id("google.secrets")
if GOOGLE_SECRETS:
cls.GOOGLE_API_KEY = GOOGLE_SECRETS.contents['api_key']
else:
logging.warning("Must have sitevar google.api_key to use Google Maps nearbysearch")
raise ndb.Return([])
search_type = 'textsearch' if textsearch else 'nearbysearch'
results = None
if query:
query = query.encode('ascii', 'ignore')
cache_key = u'google_maps_{}:{}'.format(search_type, query)
results = memcache.get(cache_key)
if results is None:
search_params = {
'key': cls.GOOGLE_API_KEY,
'location': '{},{}'.format(lat_lng[0], lat_lng[1]),
'radius': 25000,
}
if textsearch:
search_params['query'] = query
else:
search_params['keyword'] = query
search_url = 'https://maps.googleapis.com/maps/api/place/{}/json?{}'.format(search_type, urllib.urlencode(search_params))
try:
# Make async urlfetch call
context = ndb.get_context()
search_result = yield context.urlfetch(search_url)
# Parse urlfetch result
if search_result.status_code == 200:
search_dict = json.loads(search_result.content)
if search_dict['status'] == 'ZERO_RESULTS':
logging.info('No {} results for query: {}, lat_lng: {}'.format(search_type, query, lat_lng))
elif search_dict['status'] == 'OK':
results = search_dict['results']
else:
logging.warning(u'{} failed with query: {}, lat_lng: {}'.format(search_type, query, lat_lng))
logging.warning(search_dict)
else:
logging.warning(u'{} failed with query: {}, lat_lng: {}'.format(search_type, query, lat_lng))
logging.warning(search_dict)
except Exception, e:
logging.warning(u'urlfetch for {} request failed with query: {}, lat_lng: {}'.format(search_type, query, lat_lng))
logging.warning(e)
memcache.set(cache_key, results if results else [])
raise ndb.Return(results if results else [])
@classmethod
@ndb.tasklet
def google_maps_place_details_async(cls, place_id):
"""
https://developers.google.com/places/web-service/details#PlaceDetailsRequests
"""
if not cls.GOOGLE_API_KEY:
GOOGLE_SECRETS = Sitevar.get_by_id("google.secrets")
if GOOGLE_SECRETS:
cls.GOOGLE_API_KEY = GOOGLE_SECRETS.contents['api_key']
else:
logging.warning("Must have sitevar google.api_key to use Google Maps PlaceDetails")
raise ndb.Return(None)
cache_key = u'google_maps_place_details:{}'.format(place_id)
result = memcache.get(cache_key)
if result is None:
place_details_params = {
'placeid': place_id,
'key': cls.GOOGLE_API_KEY,
}
place_details_url = 'https://maps.googleapis.com/maps/api/place/details/json?%s' % urllib.urlencode(place_details_params)
try:
# Make async urlfetch call
context = ndb.get_context()
place_details_result = yield context.urlfetch(place_details_url)
# Parse urlfetch call
if place_details_result.status_code == 200:
place_details_dict = json.loads(place_details_result.content)
if place_details_dict['status'] == 'ZERO_RESULTS':
logging.info('No place_details result for place_id: {}'.format(place_id))
elif place_details_dict['status'] == 'OK':
result = place_details_dict['result']
else:
logging.warning('Placedetails failed with place_id: {}.'.format(place_id))
logging.warning(place_details_dict)
else:
logging.warning('Placedetails failed with place_id: {}.'.format(place_id))
except Exception, e:
logging.warning('urlfetch for place_details request failed with place_id: {}.'.format(place_id))
logging.warning(e)
if tba_config.CONFIG['memcache']:
memcache.set(cache_key, result)
raise ndb.Return(result)
@classmethod
def get_lat_lng(cls, location):
results = cls.google_maps_geocode_async(location).get_result()
if results:
return results[0]['geometry']['location']['lat'], results[0]['geometry']['location']['lng']
else:
return None
@classmethod
@ndb.tasklet
def google_maps_geocode_async(cls, location):
cache_key = u'google_maps_geocode:{}'.format(location)
results = memcache.get(cache_key)
if results is None:
context = ndb.get_context()
if not location:
raise ndb.Return([])
location = location.encode('utf-8')
google_secrets = Sitevar.get_by_id("google.secrets")
google_api_key = None
if google_secrets is None:
logging.warning("Missing sitevar: google.api_key. API calls rate limited by IP and may be over rate limit.")
else:
google_api_key = google_secrets.contents['api_key']
geocode_params = {
'address': location,
'sensor': 'false',
}
if google_api_key:
geocode_params['key'] = google_api_key
geocode_url = 'https://maps.googleapis.com/maps/api/geocode/json?%s' % urllib.urlencode(geocode_params)
try:
geocode_results = yield context.urlfetch(geocode_url)
if geocode_results.status_code == 200:
geocode_dict = json.loads(geocode_results.content)
if geocode_dict['status'] == 'ZERO_RESULTS':
logging.info('No geocode results for location: {}'.format(location))
elif geocode_dict['status'] == 'OK':
results = geocode_dict['results']
else:
logging.warning('Geocoding failed!')
logging.warning(geocode_dict)
else:
logging.warning('Geocoding failed for location {}.'.format(location))
except Exception, e:
logging.warning('urlfetch for geocode request failed for location {}.'.format(location))
logging.warning(e)
memcache.set(cache_key, results if results else [])
raise ndb.Return(results if results else [])
@classmethod
def get_timezone_id(cls, location, lat_lng=None):
if lat_lng is None:
result = cls.get_lat_lng(location)
if result is None:
return None
else:
lat, lng = result
else:
lat, lng = lat_lng.lat, lat_lng.lon
google_secrets = Sitevar.get_by_id("google.secrets")
google_api_key = None
if google_secrets is None:
logging.warning("Missing sitevar: google.api_key. API calls rate limited by IP and may be over rate limit.")
else:
google_api_key = google_secrets.contents['api_key']
# timezone request
tz_params = {
'location': '%s,%s' % (lat, lng),
'timestamp': 0, # we only care about timeZoneId, which doesn't depend on timestamp
'sensor': 'false',
}
if google_api_key is not None:
tz_params['key'] = google_api_key
tz_url = 'https://maps.googleapis.com/maps/api/timezone/json?%s' % urllib.urlencode(tz_params)
try:
tz_result = urlfetch.fetch(tz_url)
except Exception, e:
logging.warning('urlfetch for timezone request failed: {}'.format(tz_url))
logging.info(e)
return None
if tz_result.status_code != 200:
logging.warning('TZ lookup for (lat, lng) failed! ({}, {})'.format(lat, lng))
return None
tz_dict = json.loads(tz_result.content)
if 'timeZoneId' not in tz_dict:
logging.warning('No timeZoneId for (lat, lng)'.format(lat, lng))
return None
return tz_dict['timeZoneId']
|
{
"content_hash": "8edc3227161d19bd1dd303ce9da0851c",
"timestamp": "",
"source": "github",
"line_count": 574,
"max_line_length": 137,
"avg_line_length": 43.254355400696866,
"alnum_prop": 0.5579184791364589,
"repo_name": "verycumbersome/the-blue-alliance",
"id": "6c66aa01bd52d205975b3a66fdaa316f87aea8e5",
"size": "24828",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "helpers/location_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "101"
},
{
"name": "CSS",
"bytes": "374878"
},
{
"name": "HTML",
"bytes": "716180"
},
{
"name": "JavaScript",
"bytes": "404672"
},
{
"name": "PHP",
"bytes": "10727"
},
{
"name": "Python",
"bytes": "2086438"
},
{
"name": "Ruby",
"bytes": "3494"
},
{
"name": "Shell",
"bytes": "45"
}
],
"symlink_target": ""
}
|
import logging
import django
from django.utils.log import AdminEmailHandler
class DbbackupAdminEmailHandler(AdminEmailHandler):
def emit(self, record):
# Monkey patch for old Django versions without send_mail method
if django.VERSION < (1, 8):
from . import utils
django.core.mail.mail_admins = utils.mail_admins
super(DbbackupAdminEmailHandler, self).emit(record)
def send_mail(self, subject, message, *args, **kwargs):
from . import utils
utils.mail_admins(subject, message, *args, connection=self.connection(), **kwargs)
class MailEnabledFilter(logging.Filter):
def filter(self, record):
from .settings import SEND_EMAIL
return SEND_EMAIL
def load():
mail_admins_handler = DbbackupAdminEmailHandler(include_html=True)
mail_admins_handler.setLevel(logging.ERROR)
mail_admins_handler.addFilter(MailEnabledFilter())
logger = logging.getLogger("dbbackup")
logger.setLevel(logging.INFO)
logger.handlers = [mail_admins_handler]
|
{
"content_hash": "fbea79fd85bf978214a52164f2dbdc8a",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 90,
"avg_line_length": 32.78125,
"alnum_prop": 0.7054337464251669,
"repo_name": "django-dbbackup/django-dbbackup",
"id": "9bb94936882c9727ce5c39dc0c7da16c4acbaa87",
"size": "1049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dbbackup/log.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "493"
},
{
"name": "Python",
"bytes": "155224"
},
{
"name": "Shell",
"bytes": "2010"
}
],
"symlink_target": ""
}
|
import logging
from os.path import dirname
import imp
import sys
from six.moves.queue import Queue
from tornado import ioloop
from tornado.web import Application, FallbackHandler
from tornado.wsgi import WSGIContainer
from .settings import settings as server_settings
from ..settings import settings as bokeh_settings
from . import websocket
##bokeh_app is badly named - it's really a blueprint
from .app import bokeh_app, app
from .models import user
from .zmqpub import Publisher
from .zmqsub import Subscriber
from .forwarder import Forwarder
from .server_backends import (
FunctionBackend, HDF5DataBackend, InMemoryServerModelStorage,
MultiUserAuthentication, RedisServerModelStorage, ShelveServerModelStorage,
SingleUserAuthentication,
)
from .serverbb import (
InMemoryBackboneStorage, RedisBackboneStorage, ShelveBackboneStorage
)
REDIS_PORT = 6379
def configure_flask(config_argparse=None, config_file=None, config_dict=None):
if config_argparse:
server_settings.from_args(config_argparse)
if config_dict:
server_settings.from_dict(config_dict)
if config_file:
server_settings.from_file(config_file)
for handler in logging.getLogger().handlers:
handler.addFilter(StaticFilter())
# must import views before running apps
from .views import deps
# this just shuts up pyflakes
deps
backend = server_settings.model_backend
if backend['type'] == 'redis':
import redis
rhost = backend.get('redis_host', '127.0.0.1')
rport = backend.get('redis_port', REDIS_PORT)
bbstorage = RedisBackboneStorage(redis.Redis(host=rhost, port=rport, db=2))
servermodel_storage = RedisServerModelStorage(redis.Redis(host=rhost,
port=rport, db=3))
elif backend['type'] == 'memory':
bbstorage = InMemoryBackboneStorage()
servermodel_storage = InMemoryServerModelStorage()
elif backend['type'] == 'shelve':
bbstorage = ShelveBackboneStorage()
servermodel_storage = ShelveServerModelStorage()
if not server_settings.multi_user:
authentication = SingleUserAuthentication()
else:
authentication = MultiUserAuthentication()
if server_settings.data_directory:
data_manager = HDF5DataBackend(server_settings.data_directory)
else:
data_manager = FunctionBackend()
bokeh_app.url_prefix = server_settings.url_prefix
bokeh_app.publisher = Publisher(server_settings.ctx, server_settings.pub_zmqaddr, Queue())
for script in server_settings.scripts:
script_dir = dirname(script)
if script_dir not in sys.path:
print ("adding %s to python path" % script_dir)
sys.path.append(script_dir)
print ("importing %s" % script)
imp.load_source("_bokeh_app", script)
#todo - push some of this into bokeh_app.setup?
bokeh_app.setup(
backend,
bbstorage,
servermodel_storage,
authentication,
data_manager
)
def register_blueprint():
app.register_blueprint(bokeh_app, url_prefix=server_settings.url_prefix)
class SimpleBokehTornadoApp(Application):
def __init__(self, flask_app, **settings):
self.flask_app = flask_app
tornado_flask = WSGIContainer(flask_app)
url_prefix = server_settings.url_prefix
handlers = [
(url_prefix + "/bokeh/sub", websocket.WebSocketHandler),
(r".*", FallbackHandler, dict(fallback=tornado_flask))
]
super(SimpleBokehTornadoApp, self).__init__(handlers, **settings)
self.wsmanager = websocket.WebSocketManager()
self.subscriber = Subscriber(server_settings.ctx, [server_settings.sub_zmqaddr], self.wsmanager)
if server_settings.run_forwarder:
self.forwarder = Forwarder(server_settings.ctx, server_settings.pub_zmqaddr, server_settings.sub_zmqaddr)
else:
self.forwarder = None
def start_threads(self):
bokeh_app.publisher.start()
self.subscriber.start()
if self.forwarder:
self.forwarder.start()
def stop_threads(self):
bokeh_app.publisher.stop()
self.subscriber.stop()
if self.forwarder:
self.forwarder.stop()
class StaticFilter(logging.Filter):
def filter(self, record):
msg = record.getMessage()
return not (msg.startswith(("200 GET /static", "200 GET /bokehjs/static")))
def make_tornado_app(flask_app=None):
if flask_app is None:
flask_app = app
if server_settings.debug:
flask_app.debug = True
flask_app.secret_key = server_settings.secret_key
tornado_app = SimpleBokehTornadoApp(flask_app, debug=server_settings.debug)
tornado_app.start_threads()
return tornado_app
# Gunicorn startup would look like
# gunicorn bokeh.server.configure.make_tornado_app(config_file=filename) -k tornado
# untested - but should work
|
{
"content_hash": "f6ac0d0d5b61cd126436195df1355ba0",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 117,
"avg_line_length": 35.183098591549296,
"alnum_prop": 0.6813450760608487,
"repo_name": "almarklein/bokeh",
"id": "7bf073e3cbe7791981c9566d6ff291c01f2ee44b",
"size": "4996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/server/configure.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "410607"
},
{
"name": "CoffeeScript",
"bytes": "2138603"
},
{
"name": "JavaScript",
"bytes": "349966"
},
{
"name": "Makefile",
"bytes": "6253"
},
{
"name": "Python",
"bytes": "1543731"
},
{
"name": "Scala",
"bytes": "28963"
},
{
"name": "Shell",
"bytes": "20366"
}
],
"symlink_target": ""
}
|
import logging
import os
from unittest import TestCase
import gevent
from gevent.server import StreamServer
import gevent_dht
import mock
from herd.manager.server import HerdManager
class ManagerTest(TestCase):
def setUp(self):
real_path = os.path.dirname(os.path.realpath(__file__))
self.config_path = "%s/configs/herd_manage.conf" % (real_path)
self.manager = None
def test_load_config(self):
#Testing fail case
with self.assertRaises(IOError) as err:
HerdManager(config='/etc/etc/sdfsdfasdiff.conf')
missing_exception = err.exception
self.assertEqual(missing_exception.errno, 2)
#Testing load works
self.manager = HerdManager(config=self.config_path)
self.assertEqual(self.manager.address, None)
self.assertEqual(self.manager.port, 8339)
self.assertEqual(self.manager.ip, '127.0.0.1')
self.assertEqual(self.manager.stream_ip, '0.0.0.0')
self.assertEqual(self.manager.stream_port, 8338)
self.manager = HerdManager(port=9999, ip='777.666.555.444', stream_ip='111.222.333.444', stream_port=5555)
self.assertEqual(self.manager.address, None)
self.assertEqual(self.manager.port, 9999)
self.assertEqual(self.manager.ip, '777.666.555.444')
self.assertEqual(self.manager.stream_ip, '111.222.333.444')
self.assertEqual(self.manager.stream_port, 5555)
with self.assertRaises(AttributeError):
HerdManager(config="%s/configs/failed.conf" % (self.real_path))
def test_parse_msg(self):
self.manager = HerdManager(config=self.config_path)
self.manager.table = gevent_dht.distributedHashTable(None)
#Adding things to the table should show up correctly
msg = { 'command': 'ping', 'data': { 'ip': '127.0.0.1', 'groups': ['foo', 'bar'] } }
self.manager._parse_message(msg)
self.assertEqual(self.manager.table['foo'], ['127.0.0.1'])
self.assertEqual(self.manager.table['bar'], ['127.0.0.1'])
self.assertEqual(self.manager.table['group_name'], ['foo', 'bar'])
#You should be able to get the groups
msg2 = { 'command': 'groups', 'data': None }
response2 = self.manager._parse_message(msg2)
self.assertEqual(response2['command'], 'groups')
self.assertEqual(response2['data']['groups'], ['foo', 'bar'])
#Uupdating the groups should update the groups
msg3 = { 'command': 'ping', 'data': { 'ip': '127.0.0.2', 'groups': ['baz'] } }
self.manager._parse_message(msg3)
self.assertEqual(self.manager.table['baz'], ['127.0.0.2'])
self.assertEqual(self.manager.table['group_name'], ['foo', 'bar', 'baz'])
response3 = self.manager._parse_message(msg2)
self.assertEqual(response3['command'], 'groups')
self.assertEqual(response3['data']['groups'], ['foo', 'bar', 'baz'])
#Test getting the IPs message
msg4 = { 'command': 'init', 'data': { 'groups': ['foo', 'bar', 'baz'] } }
response4 = self.manager._parse_message(msg4)
self.assertEqual(response4['command'], 'init')
self.assertEqual(response4['data']['groups'][0]['foo'], ['127.0.0.1'])
self.assertEqual(response4['data']['groups'][1]['bar'], ['127.0.0.1'])
self.assertEqual(response4['data']['groups'][2]['baz'], ['127.0.0.2'])
#Test getting managers
msg5 = { 'command': 'managers', 'data': None }
response5 = self.manager._parse_message(msg5)
self.assertEqual(response5['data']['managers'], ['127.0.0.1:8339'])
def test_start_listener(self):
self.manager = HerdManager(port=9999, ip='127.0.0.1', stream_ip='127.0.0.1', stream_port=5555)
with mock.patch('herd.manager.server.StreamServer') as MockClient:
instance = MockClient.return_value
instance.serve_forever.return_value = None
self.manager.start_listener()
self.assertEqual(MockClient.called, True)
self.assertEqual(instance.serve_forever.called, True)
def test_stop_listener(self):
self.manager = HerdManager(port=10000, ip='127.0.0.1', stream_ip='127.0.0.1', stream_port=6666)
with mock.patch('herd.manager.server.StreamServer') as MockClient:
instance = MockClient.return_value
instance.serve_forever.return_value = None
instance.stop.return_value = None
self.manager.start_listener()
self.assertEqual(MockClient.called, True)
self.assertEqual(instance.serve_forever.called, True)
self.manager.stop_listener()
self.assertEqual(instance.stop.called, True)
|
{
"content_hash": "968315294d7837a0be600218973ce5e5",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 114,
"avg_line_length": 45.386792452830186,
"alnum_prop": 0.6185824152982747,
"repo_name": "hoangelos/Herd",
"id": "ebe5c50b6f571adf9c68de9ecf267737a59f15b4",
"size": "5445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_manager_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "92274"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.db.models import Q
from rest_framework import serializers
from rest_framework.response import Response
from sentry.api.bases.organization import (OrganizationEndpoint, OrganizationPermission)
from sentry.api.exceptions import ResourceDoesNotExist
from sentry.api.serializers import serialize
from sentry.api.serializers.models.team import TeamWithProjectsSerializer
from sentry.auth.superuser import is_active_superuser
from sentry.models import (
AuditLogEntryEvent, OrganizationAccessRequest, OrganizationMember, OrganizationMemberTeam, Team
)
ERR_INSUFFICIENT_ROLE = 'You cannot modify a member other than yourself.'
class OrganizationMemberTeamSerializer(serializers.Serializer):
isActive = serializers.BooleanField()
class RelaxedOrganizationPermission(OrganizationPermission):
_allowed_scopes = [
'org:read',
'org:write',
'org:admin',
'member:read',
'member:write',
'member:admin',
]
scope_map = {
'GET': _allowed_scopes,
'POST': _allowed_scopes,
'PUT': _allowed_scopes,
# DELETE checks for role comparison as you can either remove a member
# with a lower access role, or yourself, without having the req. scope
'DELETE': _allowed_scopes,
}
class OrganizationMemberTeamDetailsEndpoint(OrganizationEndpoint):
permission_classes = [RelaxedOrganizationPermission]
def _can_access(self, request, member):
# TODO(dcramer): ideally org owners/admins could perform these actions
if is_active_superuser(request):
return True
if not request.user.is_authenticated():
return False
if request.user.id == member.user_id:
return True
return False
def _get_member(self, request, organization, member_id):
if member_id == 'me':
queryset = OrganizationMember.objects.filter(
organization=organization,
user__id=request.user.id,
user__is_active=True,
)
else:
queryset = OrganizationMember.objects.filter(
Q(user__is_active=True) | Q(user__isnull=True),
organization=organization,
id=member_id,
)
return queryset.select_related('user').get()
def post(self, request, organization, member_id, team_slug):
"""
Join a team
Join or request access to a team.
If the user is already a member of the team, this will simply return
a 204.
If the user needs permission to join the team, an access request will
be generated and the returned status code will be 202.
"""
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if not self._can_access(request, om):
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
try:
team = Team.objects.get(
organization=organization,
slug=team_slug,
)
except Team.DoesNotExist:
raise ResourceDoesNotExist
try:
omt = OrganizationMemberTeam.objects.get(
team=team,
organizationmember=om,
)
except OrganizationMemberTeam.DoesNotExist:
if not (request.access.has_scope('org:write') or organization.flags.allow_joinleave):
omt, created = OrganizationAccessRequest.objects.get_or_create(
team=team,
member=om,
)
if created:
omt.send_request_email()
return Response(status=202)
omt = OrganizationMemberTeam.objects.create(
team=team,
organizationmember=om,
)
else:
return Response(status=204)
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_JOIN_TEAM,
data=omt.get_audit_log_data(),
)
return Response(serialize(team, request.user, TeamWithProjectsSerializer()), status=201)
def delete(self, request, organization, member_id, team_slug):
"""
Leave a team
Leave a team.
"""
try:
om = self._get_member(request, organization, member_id)
except OrganizationMember.DoesNotExist:
raise ResourceDoesNotExist
if not self._can_access(request, om):
return Response({'detail': ERR_INSUFFICIENT_ROLE}, status=400)
try:
team = Team.objects.get(
organization=organization,
slug=team_slug,
)
except Team.DoesNotExist:
raise ResourceDoesNotExist
try:
omt = OrganizationMemberTeam.objects.get(
team=team,
organizationmember=om,
)
except OrganizationMemberTeam.DoesNotExist:
pass
else:
self.create_audit_entry(
request=request,
organization=organization,
target_object=omt.id,
target_user=om.user,
event=AuditLogEntryEvent.MEMBER_LEAVE_TEAM,
data=omt.get_audit_log_data(),
)
omt.delete()
return Response(serialize(team, request.user, TeamWithProjectsSerializer()), status=200)
|
{
"content_hash": "bf2e53734d300e8e13099dec15f21e4a",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 99,
"avg_line_length": 32.42613636363637,
"alnum_prop": 0.5994392850884879,
"repo_name": "gencer/sentry",
"id": "f424c016b73e8bddceed70e8b85fe54acbab203d",
"size": "5707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/organization_member_team_details.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "318167"
},
{
"name": "HTML",
"bytes": "281885"
},
{
"name": "JavaScript",
"bytes": "2342569"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "8393"
},
{
"name": "Python",
"bytes": "28161647"
},
{
"name": "Ruby",
"bytes": "4233"
},
{
"name": "Shell",
"bytes": "2149"
}
],
"symlink_target": ""
}
|
from dodo_commands import Dodo
from dodo_commands.framework.args_tree import ArgsTreeNode
from dodo_commands.framework.decorator_utils import uses_decorator
class Decorator: # noqa
def add_arguments(self, parser): # noqa
parser.add_argument(
"--use-debugger",
action="store_true",
default=False,
help="Run the command through the debugger",
)
def is_used(self, config, command_name, decorator_name):
return uses_decorator(config, command_name, decorator_name)
def modify_args(self, command_line_args, root_node, cwd): # noqa
if not getattr(command_line_args, "use_debugger", False):
return root_node, cwd
debugger_node = ArgsTreeNode("debugger", args=[Dodo.get("/BUILD/debugger")])
debugger_node.add_child(root_node)
return debugger_node, cwd
|
{
"content_hash": "55e3068645bc9662169158ecad3affa7",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 84,
"avg_line_length": 36.625,
"alnum_prop": 0.6552901023890785,
"repo_name": "mnieber/dodo_commands",
"id": "c29d06cf8b2c27e4a489487571f6f2d8efc9d494",
"size": "879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dodo_commands/extra/dodo_standard_commands/decorators/debugger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "198633"
}
],
"symlink_target": ""
}
|
"""BIG-IP® system software update module
REST URI
``http://localhost/mgmt/tm/sys/software/update``
GUI Path
``System --> Software Management --> Update Check``
REST Kind
``tm:sys:software:update:updatestate``
"""
from f5.bigip.resource import UnnamedResource
class Update(UnnamedResource):
"""BIG-IP® system software update unnamed resource
.. note::
This is an unnamed resource so it has not ~Partition~Name pattern
at the end of its URI.
"""
def __init__(self, sys):
super(Update, self).__init__(sys)
self._meta_data['required_load_parameters'] = set()
self._meta_data['required_json_kind'] = \
'tm:sys:software:update:updatestate'
|
{
"content_hash": "cf58e800527ffb47b809572d3a3dd857",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 73,
"avg_line_length": 25.928571428571427,
"alnum_prop": 0.6391184573002755,
"repo_name": "F5Networks/f5-common-python",
"id": "94f23ece3612783c68c085d7628bc1d52f8f9f9e",
"size": "1336",
"binary": false,
"copies": "2",
"ref": "refs/heads/development",
"path": "f5/bigiq/tm/sys/software/update.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "713"
},
{
"name": "Groovy",
"bytes": "4321"
},
{
"name": "Python",
"bytes": "2705690"
},
{
"name": "Shell",
"bytes": "6398"
}
],
"symlink_target": ""
}
|
import httplib
import socket
import urllib2
import sys
import logging
import shelve
from urlparse import urlparse, urljoin
from datetime import datetime
class HTTPRequest:
def __init__(self, url):
self.url = url
self.urlparsed = None
self.HTML = ''
self.headers = {}
self.status = 0
self.fetched = None
self.opener = urllib2.build_opener()
self.opener.addheaders = [('User-agent','Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_7; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.0 Safari/534.13')]
def __parse_headers(self,headers):
output = {}
for h in headers:
output[h[0]] = h[1]
self.headers = output
def get_location(self):
if not 'location' in self.headers:
location = '';
else:
if 'http://' in self.headers['location']:
location = self.headers['location']
else:
location = urljoin(self.url,self.headers['location'])
return location
def get_headers(self):
if not self.status:
self.urlparsed = urlparse(self.url)
self.date = datetime.today()
h = httplib.HTTPConnection(self.urlparsed.netloc)
try :
if self.urlparsed.query != '':
uri = ''.join([self.urlparsed.path,'?',self.urlparsed.query])
else:
uri = self.urlparsed.path
h.request('GET',uri)
res = h.getresponse()
self.__parse_headers(res.getheaders())
self.status = res.status
except socket.gaierror:
logging.warning('No response from server : %s', self.urlparsed.netloc)
except httplib.InvalidURL:
logging.waring('Invalid URL : %s', self.url)
except:
logging.warning('Unexpected error')
raise
def get_content(self):
if self.HTML == '':
if 'http://' in self.url:
try:
self.HTML = self.opener.open(self.url).read()
except urllib2.HTTPError:
logging.warning('Error')
except urllib2.URLError:
logging.warning('No response from server')
except:
logging.warning('Unexpected error')
else:
try:
self.HTML = open(self.url).read()
except IOError:
logging.warning('File does not exist')
|
{
"content_hash": "1af81f541e370d078ea92a9998cb2618",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 178,
"avg_line_length": 33.52564102564103,
"alnum_prop": 0.5200764818355641,
"repo_name": "ravelsoft/ploufseo",
"id": "bc9fdda8c5584068c923ebd63a938c0dcc909c78",
"size": "2615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plouflib/http.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17432"
}
],
"symlink_target": ""
}
|
import datetime
from datetime import timedelta
from dateutil import rrule
from dateutil.relativedelta import relativedelta
from operator import itemgetter
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.float_utils import float_compare
from openerp.tools.translate import _
import pytz
class resource_calendar(osv.osv):
""" Calendar model for a resource. It has
- attendance_ids: list of resource.calendar.attendance that are a working
interval in a given weekday.
- leave_ids: list of leaves linked to this calendar. A leave can be general
or linked to a specific resource, depending on its resource_id.
All methods in this class use intervals. An interval is a tuple holding
(begin_datetime, end_datetime). A list of intervals is therefore a list of
tuples, holding several intervals of work or leaves. """
_name = "resource.calendar"
_description = "Resource Calendar"
_columns = {
'name': fields.char("Name", required=True),
'company_id': fields.many2one('res.company', 'Company', required=False),
'attendance_ids': fields.one2many('resource.calendar.attendance', 'calendar_id', 'Working Time', copy=True),
'manager': fields.many2one('res.users', 'Workgroup Manager'),
'leave_ids': fields.one2many(
'resource.calendar.leaves', 'calendar_id', 'Leaves',
help=''
),
}
_defaults = {
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.calendar', context=context)
}
# --------------------------------------------------
# Utility methods
# --------------------------------------------------
def interval_clean(self, intervals):
""" Utility method that sorts and removes overlapping inside datetime
intervals. The intervals are sorted based on increasing starting datetime.
Overlapping intervals are merged into a single one.
:param list intervals: list of intervals; each interval is a tuple
(datetime_from, datetime_to)
:return list cleaned: list of sorted intervals without overlap """
intervals = sorted(intervals, key=itemgetter(0)) # sort on first datetime
cleaned = []
working_interval = None
while intervals:
current_interval = intervals.pop(0)
if not working_interval: # init
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[0]: # interval is disjoint
cleaned.append(tuple(working_interval))
working_interval = [current_interval[0], current_interval[1]]
elif working_interval[1] < current_interval[1]: # union of greater intervals
working_interval[1] = current_interval[1]
if working_interval: # handle void lists
cleaned.append(tuple(working_interval))
return cleaned
def interval_remove_leaves(self, interval, leave_intervals):
""" Utility method that remove leave intervals from a base interval:
- clean the leave intervals, to have an ordered list of not-overlapping
intervals
- initiate the current interval to be the base interval
- for each leave interval:
- finishing before the current interval: skip, go to next
- beginning after the current interval: skip and get out of the loop
because we are outside range (leaves are ordered)
- beginning within the current interval: close the current interval
and begin a new current interval that begins at the end of the leave
interval
- ending within the current interval: update the current interval begin
to match the leave interval ending
:param tuple interval: a tuple (beginning datetime, ending datetime) that
is the base interval from which the leave intervals
will be removed
:param list leave_intervals: a list of tuples (beginning datetime, ending datetime)
that are intervals to remove from the base interval
:return list intervals: a list of tuples (begin datetime, end datetime)
that are the remaining valid intervals """
if not interval:
return interval
if leave_intervals is None:
leave_intervals = []
intervals = []
leave_intervals = self.interval_clean(leave_intervals)
current_interval = [interval[0], interval[1]]
for leave in leave_intervals:
if leave[1] <= current_interval[0]:
continue
if leave[0] >= current_interval[1]:
break
if current_interval[0] < leave[0] < current_interval[1]:
current_interval[1] = leave[0]
intervals.append((current_interval[0], current_interval[1]))
current_interval = [leave[1], interval[1]]
# if current_interval[0] <= leave[1] <= current_interval[1]:
if current_interval[0] <= leave[1]:
current_interval[0] = leave[1]
if current_interval and current_interval[0] < interval[1]: # remove intervals moved outside base interval due to leaves
intervals.append((current_interval[0], current_interval[1]))
return intervals
def interval_schedule_hours(self, intervals, hour, remove_at_end=True):
""" Schedule hours in intervals. The last matching interval is truncated
to match the specified hours.
It is possible to truncate the last interval at its beginning or ending.
However this does nothing on the given interval order that should be
submitted accordingly.
:param list intervals: a list of tuples (beginning datetime, ending datetime)
:param int/float hours: number of hours to schedule. It will be converted
into a timedelta, but should be submitted as an
int or float.
:param boolean remove_at_end: remove extra hours at the end of the last
matching interval. Otherwise, do it at the
beginning.
:return list results: a list of intervals. If the number of hours to schedule
is greater than the possible scheduling in the intervals, no extra-scheduling
is done, and results == intervals. """
results = []
res = datetime.timedelta()
limit = datetime.timedelta(hours=hour)
for interval in intervals:
res += interval[1] - interval[0]
if res > limit and remove_at_end:
interval = (interval[0], interval[1] + relativedelta(seconds=seconds(limit-res)))
elif res > limit:
interval = (interval[0] + relativedelta(seconds=seconds(res-limit)), interval[1])
results.append(interval)
if res > limit:
break
return results
# --------------------------------------------------
# Date and hours computation
# --------------------------------------------------
def get_attendances_for_weekdays(self, cr, uid, id, weekdays, context=None):
""" Given a list of weekdays, return matching resource.calendar.attendance"""
calendar = self.browse(cr, uid, id, context=None)
return [att for att in calendar.attendance_ids if int(att.dayofweek) in weekdays]
def get_weekdays(self, cr, uid, id, default_weekdays=None, context=None):
""" Return the list of weekdays that contain at least one working interval.
If no id is given (no calendar), return default weekdays. """
if id is None:
return default_weekdays if default_weekdays is not None else [0, 1, 2, 3, 4]
calendar = self.browse(cr, uid, id, context=None)
weekdays = set()
for attendance in calendar.attendance_ids:
weekdays.add(int(attendance.dayofweek))
return list(weekdays)
def get_next_day(self, cr, uid, id, day_date, context=None):
""" Get following date of day_date, based on resource.calendar. If no
calendar is provided, just return the next day.
:param int id: id of a resource.calendar. If not given, simply add one day
to the submitted date.
:param date day_date: current day as a date
:return date: next day of calendar, or just next day """
if not id:
return day_date + relativedelta(days=1)
weekdays = self.get_weekdays(cr, uid, id, context)
base_index = -1
for weekday in weekdays:
if weekday > day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days < 0:
days = 7 + days
return day_date + relativedelta(days=days)
def get_previous_day(self, cr, uid, id, day_date, context=None):
""" Get previous date of day_date, based on resource.calendar. If no
calendar is provided, just return the previous day.
:param int id: id of a resource.calendar. If not given, simply remove
one day from the submitted date.
:param date day_date: current day as a date
:return date: previous day of calendar, or just previous day """
if not id:
return day_date + relativedelta(days=-1)
weekdays = self.get_weekdays(cr, uid, id, context)
weekdays.reverse()
base_index = -1
for weekday in weekdays:
if weekday < day_date.weekday():
break
base_index += 1
new_index = (base_index + 1) % len(weekdays)
days = (weekdays[new_index] - day_date.weekday())
if days > 0:
days = days - 7
return day_date + relativedelta(days=days)
def get_leave_intervals(self, cr, uid, id, resource_id=None,
start_datetime=None, end_datetime=None,
context=None):
"""Get the leaves of the calendar. Leaves can be filtered on the resource,
the start datetime or the end datetime.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param datetime start_datetime: if provided, do not take into account leaves
ending before this date.
:param datetime end_datetime: if provided, do not take into account leaves
beginning after this date.
:return list leaves: list of tuples (start_datetime, end_datetime) of
leave intervals
"""
resource_calendar = self.browse(cr, uid, id, context=context)
leaves = []
for leave in resource_calendar.leave_ids:
if leave.resource_id and not resource_id == leave.resource_id.id:
continue
date_from = datetime.datetime.strptime(leave.date_from, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if end_datetime and date_from > end_datetime:
continue
date_to = datetime.datetime.strptime(leave.date_to, tools.DEFAULT_SERVER_DATETIME_FORMAT)
if start_datetime and date_to < start_datetime:
continue
leaves.append((date_from, date_to))
return leaves
def get_working_intervals_of_day(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working intervals of the day based on calendar. This method
handle leaves that come directly from the leaves parameter or can be computed.
:param int id: resource.calendar id; take the first one if is a list
:param datetime start_dt: datetime object that is the beginning hours
for the working intervals computation; any
working interval beginning before start_dt
will be truncated. If not set, set to end_dt
or today() if no end_dt at 00.00.00.
:param datetime end_dt: datetime object that is the ending hour
for the working intervals computation; any
working interval ending after end_dt
will be truncated. If not set, set to start_dt()
at 23.59.59.
:param list leaves: a list of tuples(start_datetime, end_datetime) that
represent leaves.
:param boolean compute_leaves: if set and if leaves is None, compute the
leaves based on calendar and resource.
If leaves is None and compute_leaves false
no leaves are taken into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return list intervals: a list of tuples (start_datetime, end_datetime)
of work intervals """
if isinstance(id, (list, tuple)):
id = id[0]
# Computes start_dt, end_dt (with default values if not set) + off-interval work limits
work_limits = []
if start_dt is None and end_dt is not None:
start_dt = end_dt.replace(hour=0, minute=0, second=0)
elif start_dt is None:
start_dt = datetime.datetime.now().replace(hour=0, minute=0, second=0)
else:
work_limits.append((start_dt.replace(hour=0, minute=0, second=0), start_dt))
if end_dt is None:
end_dt = start_dt.replace(hour=23, minute=59, second=59)
else:
work_limits.append((end_dt, end_dt.replace(hour=23, minute=59, second=59)))
assert start_dt.date() == end_dt.date(), 'get_working_intervals_of_day is restricted to one day'
intervals = []
work_dt = start_dt.replace(hour=0, minute=0, second=0)
# no calendar: try to use the default_interval, then return directly
if id is None:
working_interval = []
if default_interval:
working_interval = (start_dt.replace(hour=default_interval[0], minute=0, second=0), start_dt.replace(hour=default_interval[1], minute=0, second=0))
intervals = self.interval_remove_leaves(working_interval, work_limits)
return intervals
working_intervals = []
tz_info = fields.datetime.context_timestamp(cr, uid, work_dt, context=context).tzinfo
for calendar_working_day in self.get_attendances_for_weekdays(cr, uid, id, [start_dt.weekday()], context):
x = work_dt.replace(hour=0, minute=0, second=0) + timedelta(seconds=(calendar_working_day.hour_from * 3600))
y = work_dt.replace(hour=0, minute=0, second=0) + timedelta(seconds=(calendar_working_day.hour_to * 3600))
x = x.replace(tzinfo=tz_info).astimezone(pytz.UTC).replace(tzinfo=None)
y = y.replace(tzinfo=tz_info).astimezone(pytz.UTC).replace(tzinfo=None)
working_interval = (x, y)
working_intervals += self.interval_remove_leaves(working_interval, work_limits)
# find leave intervals
if leaves is None and compute_leaves:
leaves = self.get_leave_intervals(cr, uid, id, resource_id=resource_id, context=None)
# filter according to leaves
for interval in working_intervals:
work_intervals = self.interval_remove_leaves(interval, leaves)
intervals += work_intervals
return intervals
def get_working_hours_of_date(self, cr, uid, id, start_dt=None, end_dt=None,
leaves=None, compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Get the working hours of the day based on calendar. This method uses
get_working_intervals_of_day to have the work intervals of the day. It
then calculates the number of hours contained in those intervals. """
res = datetime.timedelta()
intervals = self.get_working_intervals_of_day(
cr, uid, id,
start_dt, end_dt, leaves,
compute_leaves, resource_id,
default_interval, context)
for interval in intervals:
res += interval[1] - interval[0]
return seconds(res) / 3600.0
def get_working_hours(self, cr, uid, id, start_dt, end_dt, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
hours = 0.0
for day in rrule.rrule(rrule.DAILY, dtstart=start_dt,
until=(end_dt + datetime.timedelta(days=1)).replace(hour=0, minute=0, second=0),
byweekday=self.get_weekdays(cr, uid, id, context=context)):
day_start_dt = day.replace(hour=0, minute=0, second=0)
if start_dt and day.date() == start_dt.date():
day_start_dt = start_dt
day_end_dt = day.replace(hour=23, minute=59, second=59)
if end_dt and day.date() == end_dt.date():
day_end_dt = end_dt
hours += self.get_working_hours_of_date(
cr, uid, id, start_dt=day_start_dt, end_dt=day_end_dt,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
return hours
# --------------------------------------------------
# Hours scheduling
# --------------------------------------------------
def _schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Schedule hours of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int hours: number of hours to schedule. Use a negative number to
compute a backwards scheduling.
:param datetime day_dt: reference date to compute working days. If days is
> 0 date is the starting date. If days is < 0
date is the ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Note: Why not using rrule.rrule ? Because rrule does not seem to allow
getting back in time.
"""
if day_dt is None:
day_dt = datetime.datetime.now()
backwards = (hours < 0)
hours = abs(hours)
intervals = []
remaining_hours = hours * 1.0
iterations = 0
current_datetime = day_dt
call_args = dict(compute_leaves=compute_leaves, resource_id=resource_id, default_interval=default_interval, context=context)
while float_compare(remaining_hours, 0.0, precision_digits=2) in (1, 0) and iterations < 1000:
if backwards:
call_args['end_dt'] = current_datetime
else:
call_args['start_dt'] = current_datetime
working_intervals = self.get_working_intervals_of_day(cr, uid, id, **call_args)
if id is None and not working_intervals: # no calendar -> consider working 8 hours
remaining_hours -= 8.0
elif working_intervals:
if backwards:
working_intervals.reverse()
new_working_intervals = self.interval_schedule_hours(working_intervals, remaining_hours, not backwards)
if backwards:
new_working_intervals.reverse()
res = datetime.timedelta()
for interval in working_intervals:
res += interval[1] - interval[0]
remaining_hours -= (seconds(res) / 3600.0)
if backwards:
intervals = new_working_intervals + intervals
else:
intervals = intervals + new_working_intervals
# get next day
if backwards:
current_datetime = datetime.datetime.combine(self.get_previous_day(cr, uid, id, current_datetime, context), datetime.time(23, 59, 59))
else:
current_datetime = datetime.datetime.combine(self.get_next_day(cr, uid, id, current_datetime, context), datetime.time())
# avoid infinite loops
iterations += 1
return intervals
def schedule_hours_get_date(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the beginning/ending datetime of
an hours scheduling. """
res = self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
return res and res[0][0] or False
def schedule_hours(self, cr, uid, id, hours, day_dt=None,
compute_leaves=False, resource_id=None,
default_interval=None, context=None):
""" Wrapper on _schedule_hours: return the working intervals of an hours
scheduling. """
return self._schedule_hours(cr, uid, id, hours, day_dt, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Days scheduling
# --------------------------------------------------
def _schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
"""Schedule days of work, using a calendar and an optional resource to
compute working and leave days. This method can be used backwards, i.e.
scheduling days before a deadline.
:param int days: number of days to schedule. Use a negative number to
compute a backwards scheduling.
:param date day_date: reference date to compute working days. If days is > 0
date is the starting date. If days is < 0 date is the
ending date.
:param boolean compute_leaves: if set, compute the leaves based on calendar
and resource. Otherwise no leaves are taken
into account.
:param int resource_id: the id of the resource to take into account when
computing the leaves. If not set, only general
leaves are computed. If set, generic and
specific leaves are computed.
:param tuple default_interval: if no id, try to return a default working
day using default_interval[0] as beginning
hour, and default_interval[1] as ending hour.
Example: default_interval = (8, 16).
Otherwise, a void list of working intervals
is returned when id is None.
:return tuple (datetime, intervals): datetime is the beginning/ending date
of the schedulign; intervals are the
working intervals of the scheduling.
Implementation note: rrule.rrule is not used because rrule it des not seem
to allow getting back in time.
"""
if day_date is None:
day_date = datetime.datetime.now()
backwards = (days < 0)
days = abs(days)
intervals = []
planned_days = 0
iterations = 0
current_datetime = day_date.replace(hour=0, minute=0, second=0)
while planned_days < days and iterations < 1000:
working_intervals = self.get_working_intervals_of_day(
cr, uid, id, current_datetime,
compute_leaves=compute_leaves, resource_id=resource_id,
default_interval=default_interval,
context=context)
if id is None or working_intervals: # no calendar -> no working hours, but day is considered as worked
planned_days += 1
intervals += working_intervals
# get next day
if backwards:
current_datetime = self.get_previous_day(cr, uid, id, current_datetime, context)
else:
current_datetime = self.get_next_day(cr, uid, id, current_datetime, context)
# avoid infinite loops
iterations += 1
return intervals
def schedule_days_get_date(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the beginning/ending datetime of
a days scheduling. """
res = self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
return res and res[-1][1] or False
def schedule_days(self, cr, uid, id, days, day_date=None, compute_leaves=False,
resource_id=None, default_interval=None, context=None):
""" Wrapper on _schedule_days: return the working intervals of a days
scheduling. """
return self._schedule_days(cr, uid, id, days, day_date, compute_leaves, resource_id, default_interval, context)
# --------------------------------------------------
# Compatibility / to clean / to remove
# --------------------------------------------------
def working_hours_on_day(self, cr, uid, resource_calendar_id, day, context=None):
""" Used in hr_payroll/hr_payroll.py
:deprecated: OpenERP saas-3. Use get_working_hours_of_date instead. Note:
since saas-3, take hour/minutes into account, not just the whole day."""
if isinstance(day, datetime.datetime):
day = day.replace(hour=0, minute=0)
return self.get_working_hours_of_date(cr, uid, resource_calendar_id.id, start_dt=day, context=None)
def interval_min_get(self, cr, uid, id, dt_from, hours, resource=False):
""" Schedule hours backwards. Used in mrp_operations/mrp_operations.py.
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note: since
saas-3, counts leave hours instead of all-day leaves."""
return self.schedule_hours(
cr, uid, id, hours * -1.0,
day_dt=dt_from.replace(minute=0, second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
def interval_get_multi(self, cr, uid, date_and_hours_by_cal, resource=False, byday=True):
""" Used in mrp_operations/mrp_operations.py (default parameters) and in
interval_get()
:deprecated: OpenERP saas-3. Use schedule_hours instead. Note:
Byday was not used. Since saas-3, counts Leave hours instead of all-day leaves."""
res = {}
for dt_str, hours, calendar_id in date_and_hours_by_cal:
result = self.schedule_hours(
cr, uid, calendar_id, hours,
day_dt=datetime.datetime.strptime(dt_str, '%Y-%m-%d %H:%M:%S').replace(second=0),
compute_leaves=True, resource_id=resource,
default_interval=(8, 16)
)
res[(dt_str, hours, calendar_id)] = result
return res
def interval_get(self, cr, uid, id, dt_from, hours, resource=False, byday=True):
""" Unifier of interval_get_multi. Used in: mrp_operations/mrp_operations.py,
crm/crm_lead.py (res given).
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
res = self.interval_get_multi(
cr, uid, [(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)], resource, byday)[(dt_from.strftime('%Y-%m-%d %H:%M:%S'), hours, id)]
return res
def interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource=False):
""" Unused wrapper.
:deprecated: OpenERP saas-3. Use get_working_hours instead."""
return self._interval_hours_get(cr, uid, id, dt_from, dt_to, resource_id=resource)
def _interval_hours_get(self, cr, uid, id, dt_from, dt_to, resource_id=False, timezone_from_uid=None, exclude_leaves=True, context=None):
""" Computes working hours between two dates, taking always same hour/minuts.
:deprecated: OpenERP saas-3. Use get_working_hours instead. Note: since saas-3,
now resets hour/minuts. Now counts leave hours instead of all-day leaves."""
return self.get_working_hours(
cr, uid, id, dt_from, dt_to,
compute_leaves=(not exclude_leaves), resource_id=resource_id,
default_interval=(8, 16), context=context)
class resource_calendar_attendance(osv.osv):
_name = "resource.calendar.attendance"
_description = "Work Detail"
_columns = {
'name' : fields.char("Name", required=True),
'dayofweek': fields.selection([('0','Monday'),('1','Tuesday'),('2','Wednesday'),('3','Thursday'),('4','Friday'),('5','Saturday'),('6','Sunday')], 'Day of Week', required=True, select=True),
'date_from' : fields.date('Starting Date'),
'hour_from' : fields.float('Work from', required=True, help="Start and End time of working.", select=True),
'hour_to' : fields.float("Work to", required=True),
'calendar_id' : fields.many2one("resource.calendar", "Resource's Calendar", required=True),
}
_order = 'dayofweek, hour_from'
_defaults = {
'dayofweek' : '0'
}
def hours_time_string(hours):
""" convert a number of hours (float) into a string with format '%H:%M' """
minutes = int(round(hours * 60))
return "%02d:%02d" % divmod(minutes, 60)
class resource_resource(osv.osv):
_name = "resource.resource"
_description = "Resource Detail"
_columns = {
'name': fields.char("Name", required=True),
'code': fields.char('Code', size=16, copy=False),
'active' : fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the resource record without removing it."),
'company_id' : fields.many2one('res.company', 'Company'),
'resource_type': fields.selection([('user','Human'),('material','Material')], 'Resource Type', required=True),
'user_id' : fields.many2one('res.users', 'User', help='Related user name for the resource to manage its access.'),
'time_efficiency' : fields.float('Efficiency Factor', size=8, required=True, help="This field depict the efficiency of the resource to complete tasks. e.g resource put alone on a phase of 5 days with 5 tasks assigned to him, will show a load of 100% for this phase by default, but if we put a efficiency of 200%, then his load will only be 50%."),
'calendar_id' : fields.many2one("resource.calendar", "Working Time", help="Define the schedule of resource"),
}
_defaults = {
'resource_type' : 'user',
'time_efficiency' : 1,
'active' : True,
'company_id': lambda self, cr, uid, context: self.pool.get('res.company')._company_default_get(cr, uid, 'resource.resource', context=context)
}
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
if not default.get('name', False):
default.update(name=_('%s (copy)') % (self.browse(cr, uid, id, context=context).name))
return super(resource_resource, self).copy(cr, uid, id, default, context)
def generate_resources(self, cr, uid, user_ids, calendar_id, context=None):
"""
Return a list of Resource Class objects for the resources allocated to the phase.
NOTE: Used in project/project.py
"""
resource_objs = {}
user_pool = self.pool.get('res.users')
for user in user_pool.browse(cr, uid, user_ids, context=context):
resource_objs[user.id] = {
'name' : user.name,
'vacation': [],
'efficiency': 1.0,
}
resource_ids = self.search(cr, uid, [('user_id', '=', user.id)], context=context)
if resource_ids:
for resource in self.browse(cr, uid, resource_ids, context=context):
resource_objs[user.id]['efficiency'] = resource.time_efficiency
resource_cal = resource.calendar_id.id
if resource_cal:
leaves = self.compute_vacation(cr, uid, calendar_id, resource.id, resource_cal, context=context)
resource_objs[user.id]['vacation'] += list(leaves)
return resource_objs
def compute_vacation(self, cr, uid, calendar_id, resource_id=False, resource_calendar=False, context=None):
"""
Compute the vacation from the working calendar of the resource.
@param calendar_id : working calendar of the project
@param resource_id : resource working on phase/task
@param resource_calendar : working calendar of the resource
NOTE: used in project/project.py, and in generate_resources
"""
resource_calendar_leaves_pool = self.pool.get('resource.calendar.leaves')
leave_list = []
if resource_id:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, ['|', ('calendar_id', '=', calendar_id),
('calendar_id', '=', resource_calendar),
('resource_id', '=', resource_id)
], context=context)
else:
leave_ids = resource_calendar_leaves_pool.search(cr, uid, [('calendar_id', '=', calendar_id),
('resource_id', '=', False)
], context=context)
leaves = resource_calendar_leaves_pool.read(cr, uid, leave_ids, ['date_from', 'date_to'], context=context)
for i in range(len(leaves)):
dt_start = datetime.datetime.strptime(leaves[i]['date_from'], '%Y-%m-%d %H:%M:%S')
dt_end = datetime.datetime.strptime(leaves[i]['date_to'], '%Y-%m-%d %H:%M:%S')
no = dt_end - dt_start
[leave_list.append((dt_start + datetime.timedelta(days=x)).strftime('%Y-%m-%d')) for x in range(int(no.days + 1))]
leave_list.sort()
return leave_list
def compute_working_calendar(self, cr, uid, calendar_id=False, context=None):
"""
Change the format of working calendar from 'Openerp' format to bring it into 'Faces' format.
@param calendar_id : working calendar of the project
NOTE: used in project/project.py
"""
if not calendar_id:
# Calendar is not specified: working days: 24/7
return [('fri', '8:0-12:0','13:0-17:0'), ('thu', '8:0-12:0','13:0-17:0'), ('wed', '8:0-12:0','13:0-17:0'),
('mon', '8:0-12:0','13:0-17:0'), ('tue', '8:0-12:0','13:0-17:0')]
resource_attendance_pool = self.pool.get('resource.calendar.attendance')
time_range = "8:00-8:00"
non_working = ""
week_days = {"0": "mon", "1": "tue", "2": "wed","3": "thu", "4": "fri", "5": "sat", "6": "sun"}
wk_days = {}
wk_time = {}
wktime_list = []
wktime_cal = []
week_ids = resource_attendance_pool.search(cr, uid, [('calendar_id', '=', calendar_id)], context=context)
weeks = resource_attendance_pool.read(cr, uid, week_ids, ['dayofweek', 'hour_from', 'hour_to'], context=context)
# Convert time formats into appropriate format required
# and create a list like [('mon', '8:00-12:00'), ('mon', '13:00-18:00')]
for week in weeks:
res_str = ""
day = None
if week_days.get(week['dayofweek'],False):
day = week_days[week['dayofweek']]
wk_days[week['dayofweek']] = week_days[week['dayofweek']]
else:
raise osv.except_osv(_('Configuration Error!'),_('Make sure the Working time has been configured with proper week days!'))
hour_from_str = hours_time_string(week['hour_from'])
hour_to_str = hours_time_string(week['hour_to'])
res_str = hour_from_str + '-' + hour_to_str
wktime_list.append((day, res_str))
# Convert into format like [('mon', '8:00-12:00', '13:00-18:00')]
for item in wktime_list:
if wk_time.has_key(item[0]):
wk_time[item[0]].append(item[1])
else:
wk_time[item[0]] = [item[0]]
wk_time[item[0]].append(item[1])
for k,v in wk_time.items():
wktime_cal.append(tuple(v))
# Add for the non-working days like: [('sat, sun', '8:00-8:00')]
for k, v in wk_days.items():
if week_days.has_key(k):
week_days.pop(k)
for v in week_days.itervalues():
non_working += v + ','
if non_working:
wktime_cal.append((non_working[:-1], time_range))
return wktime_cal
class resource_calendar_leaves(osv.osv):
_name = "resource.calendar.leaves"
_description = "Leave Detail"
_columns = {
'name' : fields.char("Name"),
'company_id' : fields.related('calendar_id','company_id',type='many2one',relation='res.company',string="Company", store=True, readonly=True),
'calendar_id' : fields.many2one("resource.calendar", "Working Time"),
'date_from' : fields.datetime('Start Date', required=True),
'date_to' : fields.datetime('End Date', required=True),
'resource_id' : fields.many2one("resource.resource", "Resource", help="If empty, this is a generic holiday for the company. If a resource is set, the holiday/leave is only for this resource"),
}
def check_dates(self, cr, uid, ids, context=None):
for leave in self.browse(cr, uid, ids, context=context):
if leave.date_from and leave.date_to and leave.date_from > leave.date_to:
return False
return True
_constraints = [
(check_dates, 'Error! leave start-date must be lower then leave end-date.', ['date_from', 'date_to'])
]
def onchange_resource(self, cr, uid, ids, resource, context=None):
result = {}
if resource:
resource_pool = self.pool.get('resource.resource')
result['calendar_id'] = resource_pool.browse(cr, uid, resource, context=context).calendar_id.id
return {'value': result}
return {'value': {'calendar_id': []}}
def seconds(td):
assert isinstance(td, datetime.timedelta)
return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10**6) / 10.**6
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "47fc0030a1087d314f24f531153bb214",
"timestamp": "",
"source": "github",
"line_count": 814,
"max_line_length": 356,
"avg_line_length": 51.54914004914005,
"alnum_prop": 0.5734134076880913,
"repo_name": "lbk0116/NTDP",
"id": "dd96c15bce717b877b80cb5e10a0d0fbd4f48627",
"size": "42947",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "addons/resource/resource.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "539622"
},
{
"name": "HTML",
"bytes": "46766"
},
{
"name": "JavaScript",
"bytes": "5052395"
},
{
"name": "Makefile",
"bytes": "12757"
},
{
"name": "NSIS",
"bytes": "18524"
},
{
"name": "Python",
"bytes": "3776867"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "4318"
},
{
"name": "XSLT",
"bytes": "27334"
}
],
"symlink_target": ""
}
|
import os
import oss2
from oss2.crypto import BaseCryptoProvider
from oss2.utils import b64encode_as_string, b64decode_from_string, to_bytes
from oss2.headers import *
from Crypto.Cipher import PKCS1_OAEP
from Crypto.PublicKey import RSA
from requests.structures import CaseInsensitiveDict
# 以下代码展示了用户自行提供加密算法进行客户端文件加密上传下载的用法,如下载文件、上传文件等,
# 注意在客户端加密的条件下,oss暂不支持文件分片上传下载操作。
# 本例提供了本地非对称加密密钥的加密器CustomCryptoProvider 和用于数据对称加密的FakeCrypto
# 自定义CryptoProvider
class FakeCrypto:
"""FakeCrypto 加密实现,用户自行提供的一种对称加密算法。
:param str key: 对称加密数据密钥
:param str start: 对称加密初始随机值
.. note::
用户可自行实现对称加密算法,需服务如下规则:
1、提供对称加密算法名,ALGORITHM
2、提供静态方法,返回加密密钥和初始随机值(若算法不需要初始随机值,也需要提供),类型为
3、提供加密解密方法
"""
ALGORITHM = "userdefine"
@staticmethod
def get_key():
return 'fake_key'
@staticmethod
def get_iv():
return 'fake_start'
def __init__(self, key=None, start=None, count=None):
pass
def encrypt(self, raw):
return raw
def decrypt(self, enc):
return enc
class FakeAsymmetric:
def __int__(self):
pass
def get_public_key(self):
return
def get_private_key(self):
return
def encrypt(self, data):
return data
def decrypt(self, data):
return data
class CustomCryptoProvider(BaseCryptoProvider):
"""使用本地自定义FakeAsymmetric加密数据密钥。数据使用公钥加密,私钥解密
:param class cipher: 数据加密,FakeCrypto
"""
def __init__(self, cipher=FakeCrypto):
super(CustomCryptoProvider, self).__init__(cipher=cipher)
self.public_key = FakeAsymmetric()
self.private_key = self.public_key
def build_header(self, headers=None, multipart_context=None):
if not isinstance(headers, CaseInsensitiveDict):
headers = CaseInsensitiveDict(headers)
if 'content-md5' in headers:
headers[OSS_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_MD5] = headers['content-md5']
del headers['content-md5']
if 'content-length' in headers:
headers[OSS_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_LENGTH] = headers['content-length']
del headers['content-length']
headers[OSS_CLIENT_SIDE_ENCRYPTION_KEY] = b64encode_as_string(self.public_key.encrypt(self.plain_key))
headers[OSS_CLIENT_SIDE_ENCRYPTION_START] = b64encode_as_string(self.public_key.encrypt(to_bytes(str(self.plain_iv))))
headers[OSS_CLIENT_SIDE_ENCRYPTION_CEK_ALG] = self.cipher.ALGORITHM
headers[OSS_CLIENT_SIDE_ENCRYPTION_WRAP_ALG] = 'custom'
# multipart file build header
if multipart_context:
headers[OSS_CLIENT_SIDE_ENCRYPTION_DATA_SIZE] = str(multipart_context.data_size)
headers[OSS_CLIENT_SIDE_ENCRYPTION_PART_SIZE] = str(multipart_context.part_size)
self.plain_key = None
self.plain_iv = None
return headers
def build_header_for_upload_part(self, headers=None):
if not isinstance(headers, CaseInsensitiveDict):
headers = CaseInsensitiveDict(headers)
if 'content-md5' in headers:
headers[OSS_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_MD5] = headers['content-md5']
del headers['content-md5']
if 'content-length' in headers:
headers[OSS_CLIENT_SIDE_ENCRYPTION_UNENCRYPTED_CONTENT_LENGTH] = headers['content-length']
del headers['content-length']
self.plain_key = None
self.plain_iv = None
return headers
def get_key(self):
self.plain_key = self.cipher.get_key()
return self.plain_key
def get_iv(self):
self.plain_iv = self.cipher.get_iv()
return self.plain_iv
def decrypt_oss_meta_data(self, headers, key, conv=lambda x:x):
try:
return conv(self.private_key.decrypt(b64decode_from_string(headers[key])))
except:
return None
def decrypt_from_str(self, key, value, conv=lambda x:x):
try:
return conv(self.private_key.decrypt(b64decode_from_string(value)))
except:
return None
# 首先初始化AccessKeyId、AccessKeySecret、Endpoint等信息。
# 通过环境变量获取,或者把诸如“<你的AccessKeyId>”替换成真实的AccessKeyId等。
#
# 以杭州区域为例,Endpoint可以是:
# http://oss-cn-hangzhou.aliyuncs.com
# https://oss-cn-hangzhou.aliyuncs.com
# 分别以HTTP、HTTPS协议访问。
access_key_id = os.getenv('OSS_TEST_ACCESS_KEY_ID', '<你的AccessKeyId>')
access_key_secret = os.getenv('OSS_TEST_ACCESS_KEY_SECRET', '<你的AccessKeySecret>')
bucket_name = os.getenv('OSS_TEST_BUCKET', '<你的Bucket>')
endpoint = os.getenv('OSS_TEST_ENDPOINT', '<你的访问域名>')
# 确认上面的参数都填写正确了
for param in (access_key_id, access_key_secret, bucket_name, endpoint):
assert '<' not in param, '请设置参数:' + param
key = 'motto.txt'
content = b'a' * 1024 * 1024
filename = 'download.txt'
# 创建Bucket对象,可以进行客户端数据加密(用户端RSA),此模式下只提供对象整体上传下载操作
bucket = oss2.CryptoBucket(oss2.Auth(access_key_id, access_key_secret), endpoint, bucket_name, crypto_provider=CustomCryptoProvider())
key1 = 'motto-copy.txt'
# 上传文件
bucket.put_object(key, content, headers={'content-length': str(1024 * 1024)})
"""
文件下载
"""
# 下载文件
# 原文件
result = bucket.get_object(key)
# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got == content
# 下载原文件到本地文件
result = bucket.get_object_to_file(key, filename)
# 验证一下
with open(filename, 'rb') as fileobj:
assert fileobj.read() == content
os.remove(filename)
"""
分片上传
"""
# 初始化上传分片
part_a = b'a' * 1024 * 100
part_b = b'b' * 1024 * 100
part_c = b'c' * 1024 * 100
multi_content = [part_a, part_b, part_c]
parts = []
data_size = 100 * 1024 * 3
part_size = 100 * 1024
multi_key = "test_crypto_multipart"
res = bucket.init_multipart_upload(multi_key, data_size, part_size)
upload_id = res.upload_id
crypto_multipart_context = res.crypto_multipart_context
# 分片上传
for i in range(3):
result = bucket.upload_part(multi_key, upload_id, i+1, multi_content[i], crypto_multipart_context)
parts.append(oss2.models.PartInfo(i+1, result.etag, size = part_size, part_crc = result.crc))
## 分片上传时,若意外中断丢失crypto_multipart_context, 利用list_parts找回。
#for i in range(2):
# result = bucket.upload_part(multi_key, upload_id, i+1, multi_content[i], crypto_multipart_context)
# parts.append(oss2.models.PartInfo(i+1, result.etag, size = part_size, part_crc = result.crc))
#
#res = bucket.list_parts(multi_key, upload_id)
#crypto_multipart_context_new = res.crypto_multipart_context
#
#result = bucket.upload_part(multi_key, upload_id, 3, multi_content[2], crypto_multipart_context_new)
#parts.append(oss2.models.PartInfo(3, result.etag, size = part_size, part_crc = result.crc))
# 完成上传
result = bucket.complete_multipart_upload(multi_key, upload_id, parts)
# 下载全部文件
result = bucket.get_object(multi_key)
# 验证一下
content_got = b''
for chunk in result:
content_got += chunk
assert content_got[0:102400] == part_a
assert content_got[102400:204800] == part_b
assert content_got[204800:307200] == part_c
|
{
"content_hash": "7145e6023b8998b3640fc3ad303e01a3",
"timestamp": "",
"source": "github",
"line_count": 240,
"max_line_length": 134,
"avg_line_length": 29.204166666666666,
"alnum_prop": 0.6785561421030104,
"repo_name": "aliyun/aliyun-oss-python-sdk",
"id": "e9605a893efafa813cccd688db5939415c6fe23f",
"size": "7966",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/custom_crypto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1261534"
}
],
"symlink_target": ""
}
|
import pygame, time
from math import pi
import numpy as np
SAMPLE_RATE = 192000
def split(n):
n1 = n >> 8
n2 = n - (n1 << 8)
return (n2, n1)#Little edian
def setSampleRate(n):
SAMPLE_RATE = n
def genTonePattern(freq, secs, sampleRate=192000):
numSamples = int(np.floor(sampleRate * secs))
secs = numSamples / sampleRate
iters = freq * secs
timeToPlayIteration = 1 / freq
samplesPerFreq =
data = np.empty(numSamples, dtype = np.int16)
for
def getData(freqList, miliSecs):
arrayLen = int(np.floor(len(freqList) * miliSecs / 1000 * SAMPLE_RATE))
data = np.zeros(arrayLen, dtype=np.int16)
dataInsert = 0
freqL = len(freqList)
for freqN in range(freqL):
freq = freqList[freqN]
secs = miliSecs / 1000
numSamples = int(np.floor(secs * SAMPLE_RATE))
numFreqIters = int(np.floor(secs * freq))
samplesPerFreq = int(np.floor(numSamples / numFreqIters))#numSamples / (secs / freq)
tempData = np.zeros(samplesPerFreq, dtype=np.int16)
for i in range(samplesPerFreq):
v = 32767 * np.sin(pi * 2 * (i / samplesPerFreq))
tempData[i] = v
print("Generated tone part {} of {} for tone {} of {}".format(i + 1, samplesPerFreq, freqN + 1, freqL))
cp = range(dataInsert, dataInsert + (numFreqIters - 1) * samplesPerFreq, samplesPerFreq)
print("Inserting {} to {}".format(cp[0], cp[len(cp) - 1]))
data = np.insert(data, cp, tempData)
dataInsert += numSamples
return data
def play(data, doWait):
v = pygame.mixer.get_init()
if v == None:
pygame.mixer.init(frequency = 192000)
v = pygame.mixer.get_init()
transmitSound = pygame.mixer.Sound(data.tobytes())
transmitSound.play()
if doWait:
t = transmitSound.get_length()
print(t)
time.sleep(t)
|
{
"content_hash": "a0dfddec1aa676b1f940872631617a49",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 115,
"avg_line_length": 33.464285714285715,
"alnum_prop": 0.6173959445037354,
"repo_name": "powerboat9/PUDT",
"id": "ce5dc3ae6b4d1fdd2ad752154695dd37aea4ebd3",
"size": "1874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plysnd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4665"
},
{
"name": "Shell",
"bytes": "79"
}
],
"symlink_target": ""
}
|
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import BitcoinTestFramework
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def test_config_file_parser(self):
# Assume node is stopped
inc_conf_file_path = os.path.join(self.nodes[0].datadir, 'include.conf')
with open(os.path.join(self.nodes[0].datadir, 'bitcoin.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file_path))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('-dash=1\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 1: -dash=1, options in configuration file must be specified without leading -')
with open(inc_conf_file_path, 'w', encoding='utf8') as conf:
conf.write("wallet=foo\n")
self.nodes[0].assert_start_raises_init_error(expected_msg='Error: Config setting for -wallet only applied on regtest network when in [regtest] section.')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('nono\n')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 1: nono, if you intended to specify a negated option, use nono=1 instead')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\nmain.rpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 3, using # in rpcpassword can be ambiguous and should be avoided')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('server=1\nrpcuser=someuser\n[main]\nrpcpassword=some#pass')
self.nodes[0].assert_start_raises_init_error(expected_msg='Error reading configuration file: parse error on line 4, using # in rpcpassword can be ambiguous and should be avoided')
inc_conf_file2_path = os.path.join(self.nodes[0].datadir, 'include2.conf')
with open(os.path.join(self.nodes[0].datadir, 'bitcoin.conf'), 'a', encoding='utf-8') as conf:
conf.write('includeconf={}\n'.format(inc_conf_file2_path))
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('testnot.datadir=1\n')
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('[testnet]\n')
self.restart_node(0)
self.nodes[0].stop_node(expected_stderr='Warning: ' + inc_conf_file_path + ':1 Section [testnot] is not recognized.' + os.linesep + 'Warning: ' + inc_conf_file2_path + ':1 Section [testnet] is not recognized.')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
def run_test(self):
self.stop_node(0)
self.test_config_file_parser()
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = self.nodes[0].datadir
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.nodes[0].assert_start_raises_init_error(['-datadir=' + new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "bitcoin.conf")
# datadir needs to be set before [regtest] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
f.write(conf_file_contents)
# Temporarily disabled, because this test would access the user's home dir (~/.bitcoin)
#self.nodes[0].assert_start_raises_init_error(['-conf=' + conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
# Temporarily disabled, because this test would access the user's home dir (~/.bitcoin)
#self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
#self.stop_node(0)
#assert os.path.exists(os.path.join(new_data_dir, 'regtest', 'blocks'))
#if self.is_wallet_compiled():
#assert os.path.exists(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.exists(os.path.join(new_data_dir_2, 'regtest', 'blocks'))
if self.is_wallet_compiled():
assert os.path.exists(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
|
{
"content_hash": "5ddbc1025873b15af96d897671f0c851",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 218,
"avg_line_length": 55.02803738317757,
"alnum_prop": 0.649796195652174,
"repo_name": "afk11/bitcoin",
"id": "2b93e3c24d763dd37c5072c49f431f42220e97c3",
"size": "6102",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/feature_config_args.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "695384"
},
{
"name": "C++",
"bytes": "6418867"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "198872"
},
{
"name": "Makefile",
"bytes": "121257"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "5382"
},
{
"name": "Python",
"bytes": "1603966"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "97840"
}
],
"symlink_target": ""
}
|
from .fprot import FProt
from ..interface import AntivirusPluginInterface
from irma.common.plugins import PluginMetaClass, BinaryDependency, \
PlatformDependency
from irma.common.base.utils import IrmaProbeType
class FProtPlugin(AntivirusPluginInterface, metaclass=PluginMetaClass):
# =================
# plugin metadata
# =================
_plugin_name_ = "FProt"
_plugin_display_name_ = FProt.name
_plugin_author_ = "IRMA (c) Quarkslab"
_plugin_version_ = "1.0.0"
_plugin_category_ = IrmaProbeType.antivirus
_plugin_description_ = "Plugin for FProt Antivirus for Linux"
_plugin_dependencies_ = [
PlatformDependency("linux"),
BinaryDependency("/opt/f-prot/fpscan")
]
# ================
# interface data
# ================
module_cls = FProt
|
{
"content_hash": "3f81e53af2d2ddb85aee4ce0914f4648",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 71,
"avg_line_length": 27.7,
"alnum_prop": 0.6305655836341757,
"repo_name": "quarkslab/irma",
"id": "3e65ceecbc9fe861141a83cdb5ad38e30708fc6c",
"size": "1356",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "probe/modules/antivirus/fprot/plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "79"
},
{
"name": "CSS",
"bytes": "86535"
},
{
"name": "DIGITAL Command Language",
"bytes": "68"
},
{
"name": "Gherkin",
"bytes": "2366"
},
{
"name": "HTML",
"bytes": "26577"
},
{
"name": "JavaScript",
"bytes": "1774854"
},
{
"name": "Jinja",
"bytes": "2672"
},
{
"name": "Less",
"bytes": "13774"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "PowerShell",
"bytes": "15660"
},
{
"name": "Python",
"bytes": "797592"
},
{
"name": "Shell",
"bytes": "61907"
}
],
"symlink_target": ""
}
|
from app import db
from flask.ext.login import LoginManager, UserMixin
class User(db.Model,UserMixin):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
social_id = db.Column(db.String(120),index=True,unique=True)
gender = db.Column(db.String(10))
country = db.Column(db.String(100))
phone = db.Column(db.String(10))
timezone = db.Column(db.String(40))
image = db.Column(db.String(300),index=True)
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __repr__(self):
return '<User %r>' % (self.nickname)
class Connection(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
provider_id = db.Column(db.String(255))
provider_user_id = db.Column(db.String(255))
access_token = db.Column(db.String(255))
secret = db.Column(db.String(255))
display_name = db.Column(db.String(255))
profile_url = db.Column(db.String(512))
image_url = db.Column(db.String(512))
rank = db.Column(db.Integer)
class User_Preferences(db.Model):
nickname = db.Column(db.String(64),index=True,unique=True)
email = db.Column(db.String(200),index=True,primary_key=True)
preferences = db.Column(db.String(1000))
last_accessed = db.Column(db.DateTime)
last_updated = db.Column(db.DateTime)
def get_id(self):
try:
return unicode(self.id) # python 2
except NameError:
return str(self.id) # python 3
def __repr__(self):
return '<User %r>' % (self.nickname)
|
{
"content_hash": "3b1ca02f7c25cd3cf67a8c4e4a7896e5",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 65,
"avg_line_length": 32.03333333333333,
"alnum_prop": 0.63579604578564,
"repo_name": "sandeep6189/Pmp-Webapp",
"id": "863a54a549fc73bd5cc566f57d0066d967c34f36",
"size": "1922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "57528"
},
{
"name": "JavaScript",
"bytes": "138223"
},
{
"name": "PHP",
"bytes": "35621"
},
{
"name": "Python",
"bytes": "34731"
},
{
"name": "Ruby",
"bytes": "8776"
}
],
"symlink_target": ""
}
|
from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['mode', 'start', 'count', 'primcount'])
def glDrawArraysInstancedEXT(mode, start, count, primcount):
pass
@params(api='gles3', prms=['mode', 'count', 'type', 'indices', 'primcount'])
def glDrawElementsInstancedEXT(mode, count, type, indices, primcount):
pass
|
{
"content_hash": "944730362f240e63818d1d93a404e102",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 76,
"avg_line_length": 29.818181818181817,
"alnum_prop": 0.7164634146341463,
"repo_name": "cydenix/OpenGLCffi",
"id": "686a3d918218ebaf17cac4992be160be52e01bc8",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenGLCffi/GLES3/EXT/EXT/draw_instanced.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1646"
},
{
"name": "C++",
"bytes": "188"
},
{
"name": "Python",
"bytes": "1853617"
}
],
"symlink_target": ""
}
|
import typing
from pathlib import Path
from typing import List, Text, Union
from ruamel import yaml
if typing.TYPE_CHECKING:
from rasa.shared.core.events import Event
from rasa.shared.core.training_data.structures import StoryStep
class StoryWriter:
@staticmethod
def dumps(
story_steps: List["StoryStep"],
is_appendable: bool = False,
is_test_story: bool = False,
) -> Text:
"""Turns Story steps into an string.
Args:
story_steps: Original story steps to be converted to the YAML.
is_appendable: Specify if result should not contain
high level keys/definitions and can be appended to
the existing story file.
is_test_story: Identifies if the stories should be exported in test stories
format.
Returns:
String with story steps in the desired format.
"""
raise NotImplementedError
@staticmethod
def dump(
target: Union[Text, Path, yaml.StringIO],
story_steps: List["StoryStep"],
is_appendable: bool = False,
is_test_story: bool = False,
) -> None:
"""Writes Story steps into a target file/stream.
Args:
target: name of the target file/stream to write the string to.
story_steps: Original story steps to be converted to the string.
is_appendable: Specify if result should not contain
high level keys/definitions and can be appended to
the existing story file.
is_test_story: Identifies if the stories should be exported in test stories
format.
"""
raise NotImplementedError
@staticmethod
def _filter_event(event: Union["Event", List["Event"]]) -> bool:
"""Identifies if the event should be converted/written.
Args:
event: target event to check.
Returns:
`True` if the event should be converted/written, `False` otherwise.
"""
from rasa.shared.core.training_data.structures import StoryStep
# This is an "OR" statement, so we accept it
if isinstance(event, list):
return True
return (
not StoryStep.is_action_listen(event)
and not StoryStep.is_action_unlikely_intent(event)
and not StoryStep.is_action_session_start(event)
)
|
{
"content_hash": "d22a5bc6503606e863621033644c0966",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 87,
"avg_line_length": 34.3013698630137,
"alnum_prop": 0.597444089456869,
"repo_name": "RasaHQ/rasa_nlu",
"id": "d92f72426787d8fc1e0463ab1e74414da4da3bac",
"size": "2504",
"binary": false,
"copies": "1",
"ref": "refs/heads/emptystring_10504",
"path": "rasa/shared/core/training_data/story_writer/story_writer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "705"
},
{
"name": "HTML",
"bytes": "3462"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "Python",
"bytes": "1467067"
},
{
"name": "Shell",
"bytes": "941"
}
],
"symlink_target": ""
}
|
import re
import six
from env_tools import load_env
from raven.processors import Processor
from raven.utils import varmap
class SanitizeEnvProcessor(Processor):
"""
Sanitize the environment to prevent leaking data like credit cards and
passwords.
"""
MASK = "*" * 8
FIELDS = []
try:
env = load_env()
if env:
FIELDS = [k for k, _ in env]
except IOError:
pass
VALUES_RE = re.compile(r"^(?:\d[ -]*?){13,16}$")
def sanitize(self, key, value):
if value is None:
return
if not key: # key can be a NoneType
return value
key = key.lower()
for field in self.FIELDS:
if field in key:
# store mask as a fixed length for security
return self.MASK
return value
def filter_stacktrace(self, data):
for frame in data.get("frames", []):
if "vars" not in frame:
continue
frame["vars"] = varmap(self.sanitize, frame["vars"])
def filter_http(self, data):
for n in ("data", "cookies", "headers", "env", "query_string"):
if n not in data:
continue
if isinstance(data[n], six.string_types) and "=" in data[n]:
# at this point we've assumed it's a standard HTTP query
querybits = []
for bit in data[n].split("&"):
chunk = bit.split("=")
if len(chunk) == 2:
querybits.append((chunk[0], self.sanitize(*chunk)))
else:
querybits.append(chunk)
data[n] = "&".join("=".join(k) for k in querybits)
else:
data[n] = varmap(self.sanitize, data[n])
|
{
"content_hash": "ab4509782b515100bee2de2d1690540f",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 75,
"avg_line_length": 26.014285714285716,
"alnum_prop": 0.5035694673256452,
"repo_name": "PersonalGenomesOrg/open-humans",
"id": "104d7518ed264655830fc87f871b1f50ed18a076",
"size": "1821",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "common/processors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19829"
},
{
"name": "HTML",
"bytes": "296839"
},
{
"name": "JavaScript",
"bytes": "25622"
},
{
"name": "Python",
"bytes": "435909"
},
{
"name": "Shell",
"bytes": "721"
}
],
"symlink_target": ""
}
|
from pylib import constants
from pylib.output import local_output_manager
from pylib.output import remote_output_manager
from pylib.utils import local_utils
def CreateOutputManager(args):
if args.local_output or not local_utils.IsOnSwarming():
return local_output_manager.LocalOutputManager(
output_dir=constants.GetOutDirectory())
else:
return remote_output_manager.RemoteOutputManager(
bucket=args.gs_results_bucket)
|
{
"content_hash": "40fb8258ba5565593ea99932fa034291",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 57,
"avg_line_length": 34.53846153846154,
"alnum_prop": 0.7817371937639198,
"repo_name": "endlessm/chromium-browser",
"id": "97a0d88d1ec843f711b386670e3a01075351fed4",
"size": "612",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/android/pylib/base/output_manager_factory.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
class TweepError(Exception):
"""Tweepy exception"""
def __init__(self, reason, response=None):
self.reason = reason #self.reason = unicode(reason) in python 2
self.response = response
Exception.__init__(self, reason)
def __str__(self):
return self.reason
|
{
"content_hash": "ecca1e6dc3870cbf749090924e6be348",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 72,
"avg_line_length": 27.545454545454547,
"alnum_prop": 0.6105610561056105,
"repo_name": "felHR85/Tweepy-3",
"id": "08aa94e97b38fdfcc260818a5515a11d5f5743c3",
"size": "379",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tweepy/error.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132485"
},
{
"name": "Shell",
"bytes": "4511"
}
],
"symlink_target": ""
}
|
import os
import platform
from subprocess import Popen, STDOUT
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
import time
class FirefoxBinary(object):
NO_FOCUS_LIBRARY_NAME = "x_ignore_nofocus.so"
def __init__(self, firefox_path=None, log_file=None):
"""
Creates a new instance of Firefox binary.
:Args:
- firefox_path - Path to the Firefox executable. By default, it will be detected from the standard locations.
- log_file - A file object to redirect the firefox process output to. It can be sys.stdout.
Please note that with parallel run the output won't be synchronous.
By default, it will be redirected to /dev/null.
"""
self._start_cmd = firefox_path
# We used to default to subprocess.PIPE instead of /dev/null, but after
# a while the pipe would fill up and Firefox would freeze.
self._log_file = log_file or open(os.devnull, "wb")
self.command_line = None
if self._start_cmd is None:
self._start_cmd = self._get_firefox_start_cmd()
if not self._start_cmd.strip():
raise Exception("Failed to find firefox binary. You can set it by specifying the path to 'firefox_binary':\n\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\n\n" +
"binary = FirefoxBinary('/path/to/binary')\ndriver = webdriver.Firefox(firefox_binary=binary)")
# Rather than modifying the environment of the calling Python process
# copy it and modify as needed.
self._firefox_env = os.environ.copy()
self._firefox_env["MOZ_CRASHREPORTER_DISABLE"] = "1"
self._firefox_env["MOZ_NO_REMOTE"] = "1"
self._firefox_env["NO_EM_RESTART"] = "1"
def add_command_line_options(self, *args):
self.command_line = args
def launch_browser(self, profile):
"""Launches the browser for the given profile name.
It is assumed the profile already exists.
"""
self.profile = profile
self._start_from_profile_path(self.profile.path)
self._wait_until_connectable()
def kill(self):
"""Kill the browser.
This is useful when the browser is stuck.
"""
if self.process:
self.process.kill()
self.process.wait()
def _start_from_profile_path(self, path):
self._firefox_env["XRE_PROFILE_PATH"] = path
if platform.system().lower() == 'linux':
self._modify_link_library_path()
command = [self._start_cmd, "-foreground"]
if self.command_line is not None:
for cli in self.command_line:
command.append(cli)
self.process = Popen(
command, stdout=self._log_file, stderr=STDOUT,
env=self._firefox_env)
def _wait_until_connectable(self):
"""Blocks until the extension is connectable in the firefox."""
count = 0
while not utils.is_connectable(self.profile.port):
if self.process.poll() is not None:
# Browser has exited
raise WebDriverException("The browser appears to have exited "
"before we could connect. If you specified a log_file in "
"the FirefoxBinary constructor, check it for details.")
if count == 30:
self.kill()
raise WebDriverException("Can't load the profile. Profile "
"Dir: %s If you specified a log_file in the "
"FirefoxBinary constructor, check it for details."
% (self.profile.path))
count += 1
time.sleep(1)
return True
def _find_exe_in_registry(self):
try:
from _winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER
except ImportError:
from winreg import OpenKey, QueryValue, HKEY_LOCAL_MACHINE, HKEY_CURRENT_USER
import shlex
keys = (
r"SOFTWARE\Classes\FirefoxHTML\shell\open\command",
r"SOFTWARE\Classes\Applications\firefox.exe\shell\open\command"
)
command = ""
for path in keys:
try:
key = OpenKey(HKEY_LOCAL_MACHINE, path)
command = QueryValue(key, "")
break
except OSError:
try:
key = OpenKey(HKEY_CURRENT_USER, path)
command = QueryValue(key, "")
break
except OSError:
pass
else:
return ""
if not command:
return ""
return shlex.split(command)[0]
def _get_firefox_start_cmd(self):
"""Return the command to start firefox."""
start_cmd = ""
if platform.system() == "Darwin":
start_cmd = "/Applications/Firefox.app/Contents/MacOS/firefox-bin"
# fallback to homebrew installation for mac users
if not os.path.exists(start_cmd):
start_cmd = os.path.expanduser("~") + start_cmd
elif platform.system() == "Windows":
start_cmd = (self._find_exe_in_registry() or
self._default_windows_location())
elif platform.system() == 'Java' and os._name == 'nt':
start_cmd = self._default_windows_location()
else:
for ffname in ["firefox", "iceweasel"]:
start_cmd = self.which(ffname)
if start_cmd is not None:
break
else:
# couldn't find firefox on the system path
raise RuntimeError("Could not find firefox in your system PATH." +
" Please specify the firefox binary location or install firefox")
return start_cmd
def _default_windows_location(self):
program_files = [os.getenv("PROGRAMFILES", r"C:\Program Files"),
os.getenv("PROGRAMFILES(X86)", r"C:\Program Files (x86)")]
for path in program_files:
binary_path = os.path.join(path, r"Mozilla Firefox\firefox.exe")
if os.access(binary_path, os.X_OK):
return binary_path
return ""
def _modify_link_library_path(self):
existing_ld_lib_path = os.environ.get('LD_LIBRARY_PATH', '')
new_ld_lib_path = self._extract_and_check(
self.profile, self.NO_FOCUS_LIBRARY_NAME, "x86", "amd64")
new_ld_lib_path += existing_ld_lib_path
self._firefox_env["LD_LIBRARY_PATH"] = new_ld_lib_path
self._firefox_env['LD_PRELOAD'] = self.NO_FOCUS_LIBRARY_NAME
def _extract_and_check(self, profile, no_focus_so_name, x86, amd64):
paths = [x86, amd64]
built_path = ""
for path in paths:
library_path = os.path.join(profile.path, path)
if not os.path.exists(library_path):
os.makedirs(library_path)
import shutil
shutil.copy(os.path.join(os.path.dirname(__file__), path,
self.NO_FOCUS_LIBRARY_NAME),
library_path)
built_path += library_path + ":"
return built_path
def which(self, fname):
"""Returns the fully qualified path by searching Path of the given
name"""
for pe in os.environ['PATH'].split(os.pathsep):
checkname = os.path.join(pe, fname)
if os.access(checkname, os.X_OK) and not os.path.isdir(checkname):
return checkname
return None
|
{
"content_hash": "4a0b2b5ba299235794cbf982704bc14b",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 194,
"avg_line_length": 40.03664921465968,
"alnum_prop": 0.5760428926376356,
"repo_name": "lukeis/selenium",
"id": "63fdb8a4cdb107c16b932b5996b881a2990cc12c",
"size": "8436",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "py/selenium/webdriver/firefox/firefox_binary.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "825"
},
{
"name": "AppleScript",
"bytes": "2614"
},
{
"name": "Batchfile",
"bytes": "307"
},
{
"name": "C",
"bytes": "62267"
},
{
"name": "C#",
"bytes": "2822383"
},
{
"name": "C++",
"bytes": "1901617"
},
{
"name": "CSS",
"bytes": "25162"
},
{
"name": "HTML",
"bytes": "1874270"
},
{
"name": "Java",
"bytes": "5323974"
},
{
"name": "JavaScript",
"bytes": "5690500"
},
{
"name": "Makefile",
"bytes": "4655"
},
{
"name": "Python",
"bytes": "710910"
},
{
"name": "Ragel in Ruby Host",
"bytes": "3086"
},
{
"name": "Ruby",
"bytes": "1040337"
},
{
"name": "Shell",
"bytes": "1305"
},
{
"name": "XSLT",
"bytes": "1047"
}
],
"symlink_target": ""
}
|
import os
from cookiecutter import repository, exceptions
import pytest
def test_finds_local_repo(tmpdir):
"""A valid local repository should be returned."""
project_dir = repository.determine_repo_dir(
'tests/fake-repo',
abbreviations={},
clone_to_dir=str(tmpdir),
checkout=None,
no_input=True
)
assert 'tests/fake-repo' == project_dir
def test_local_repo_with_no_context_raises(tmpdir):
"""A local repository without a cookiecutter.json should raise a
`RepositoryNotFound` exception.
"""
template_path = os.path.join('tests', 'fake-repo-bad')
with pytest.raises(exceptions.RepositoryNotFound) as err:
repository.determine_repo_dir(
template_path,
abbreviations={},
clone_to_dir=str(tmpdir),
checkout=None,
no_input=True
)
assert str(err.value) == (
'A valid repository for "{}" could not be found in the following '
'locations:\n{}'.format(
template_path,
'\n'.join([
template_path,
str(tmpdir / 'tests/fake-repo-bad')
]),
)
)
def test_local_repo_typo(tmpdir):
"""An unknown local repository should raise a `RepositoryNotFound`
exception.
"""
template_path = os.path.join('tests', 'unknown-repo')
with pytest.raises(exceptions.RepositoryNotFound) as err:
repository.determine_repo_dir(
template_path,
abbreviations={},
clone_to_dir=str(tmpdir),
checkout=None,
no_input=True
)
assert str(err.value) == (
'A valid repository for "{}" could not be found in the following '
'locations:\n{}'.format(
template_path,
'\n'.join([
template_path,
str(tmpdir / 'tests/unknown-repo')
]),
)
)
|
{
"content_hash": "5a0ba02331679e69b08c2c42c7ce9c1c",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 74,
"avg_line_length": 28.115942028985508,
"alnum_prop": 0.565979381443299,
"repo_name": "terryjbates/cookiecutter",
"id": "b2ca4f1d1f17206c882a0aadeefe6d8e0fa6184f",
"size": "1964",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/repository/test_determine_repository_should_use_local_repo.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2581"
},
{
"name": "Python",
"bytes": "188738"
},
{
"name": "Shell",
"bytes": "161"
}
],
"symlink_target": ""
}
|
import unittest
import os
import sys
import shutil
import logging
import errno
import oeqa.utils.ftools as ftools
from oeqa.utils.commands import runCmd, bitbake, get_bb_var, get_test_layer
from oeqa.utils.decorators import LogResults
@LogResults
class oeSelfTest(unittest.TestCase):
log = logging.getLogger("selftest.base")
longMessage = True
def __init__(self, methodName="runTest"):
self.builddir = os.environ.get("BUILDDIR")
self.localconf_path = os.path.join(self.builddir, "conf/local.conf")
self.testinc_path = os.path.join(self.builddir, "conf/selftest.inc")
self.testlayer_path = oeSelfTest.testlayer_path
self._extra_tear_down_commands = []
self._track_for_cleanup = []
super(oeSelfTest, self).__init__(methodName)
def setUp(self):
os.chdir(self.builddir)
# we don't know what the previous test left around in config or inc files
# if it failed so we need a fresh start
try:
os.remove(self.testinc_path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
for root, _, files in os.walk(self.testlayer_path):
for f in files:
if f == 'test_recipe.inc':
os.remove(os.path.join(root, f))
# tests might need their own setup
# but if they overwrite this one they have to call
# super each time, so let's give them an alternative
self.setUpLocal()
def setUpLocal(self):
pass
def tearDown(self):
if self._extra_tear_down_commands:
failed_extra_commands = []
for command in self._extra_tear_down_commands:
result = runCmd(command, ignore_status=True)
if not result.status == 0:
failed_extra_commands.append(command)
if failed_extra_commands:
self.log.warning("tearDown commands have failed: %s" % ', '.join(map(str, failed_extra_commands)))
self.log.debug("Trying to move on.")
self._extra_tear_down_commands = []
if self._track_for_cleanup:
for path in self._track_for_cleanup:
if os.path.isdir(path):
shutil.rmtree(path)
if os.path.isfile(path):
os.remove(path)
self._track_for_cleanup = []
self.tearDownLocal()
def tearDownLocal(self):
pass
# add test specific commands to the tearDown method.
def add_command_to_tearDown(self, command):
self.log.debug("Adding command '%s' to tearDown for this test." % command)
self._extra_tear_down_commands.append(command)
# add test specific files or directories to be removed in the tearDown method
def track_for_cleanup(self, path):
self.log.debug("Adding path '%s' to be cleaned up when test is over" % path)
self._track_for_cleanup.append(path)
# write to <builddir>/conf/selftest.inc
def write_config(self, data):
self.log.debug("Writing to: %s\n%s\n" % (self.testinc_path, data))
ftools.write_file(self.testinc_path, data)
# append to <builddir>/conf/selftest.inc
def append_config(self, data):
self.log.debug("Appending to: %s\n%s\n" % (self.testinc_path, data))
ftools.append_file(self.testinc_path, data)
# remove data from <builddir>/conf/selftest.inc
def remove_config(self, data):
self.log.debug("Removing from: %s\n\%s\n" % (self.testinc_path, data))
ftools.remove_from_file(self.testinc_path, data)
# write to meta-sefltest/recipes-test/<recipe>/test_recipe.inc
def write_recipeinc(self, recipe, data):
inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
self.log.debug("Writing to: %s\n%s\n" % (inc_file, data))
ftools.write_file(inc_file, data)
# append data to meta-sefltest/recipes-test/<recipe>/test_recipe.inc
def append_recipeinc(self, recipe, data):
inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
self.log.debug("Appending to: %s\n%s\n" % (inc_file, data))
ftools.append_file(inc_file, data)
# remove data from meta-sefltest/recipes-test/<recipe>/test_recipe.inc
def remove_recipeinc(self, recipe, data):
inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
self.log.debug("Removing from: %s\n%s\n" % (inc_file, data))
ftools.remove_from_file(inc_file, data)
# delete meta-sefltest/recipes-test/<recipe>/test_recipe.inc file
def delete_recipeinc(self, recipe):
inc_file = os.path.join(self.testlayer_path, 'recipes-test', recipe, 'test_recipe.inc')
self.log.debug("Deleting file: %s" % inc_file)
try:
os.remove(inc_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
|
{
"content_hash": "1451ffa67aefce5745265511d8031fa7",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 114,
"avg_line_length": 40.56910569105691,
"alnum_prop": 0.6210420841683367,
"repo_name": "0x000000FF/yocto-mostfun-pro",
"id": "80b9b4b312591bb7551ea1cd33c6dbb17e97f508",
"size": "5159",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/oeqa/selftest/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "49917"
},
{
"name": "BitBake",
"bytes": "1111034"
},
{
"name": "BlitzBasic",
"bytes": "3575"
},
{
"name": "C",
"bytes": "702832"
},
{
"name": "C++",
"bytes": "412087"
},
{
"name": "CMake",
"bytes": "880"
},
{
"name": "Groff",
"bytes": "464798"
},
{
"name": "HTML",
"bytes": "4023"
},
{
"name": "Makefile",
"bytes": "18085"
},
{
"name": "PHP",
"bytes": "424137"
},
{
"name": "Pascal",
"bytes": "10149"
},
{
"name": "Perl",
"bytes": "10266"
},
{
"name": "Python",
"bytes": "463016"
},
{
"name": "Shell",
"bytes": "307888"
},
{
"name": "SourcePawn",
"bytes": "185848"
},
{
"name": "Tcl",
"bytes": "4897"
}
],
"symlink_target": ""
}
|
from django.core.management.base import NoArgsCommand
from basic_profiles.models import Profile
from django.contrib.auth.models import User
from django.conf import settings
class Command(NoArgsCommand):
help = 'Create a profile object for users which do not have one.'
def handle_noargs(self, **options):
for usr in User.objects.all():
profile, is_new = Profile.objects.get_or_create(user=usr)
if is_new: profile.save()
|
{
"content_hash": "b76ea3771c227bf591268ea57024c31a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 69,
"avg_line_length": 38.5,
"alnum_prop": 0.7186147186147186,
"repo_name": "ericholscher/pinax",
"id": "496f3adc37e05094e4fb59c24c6e1f1a48d8b6dc",
"size": "462",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "pinax/apps/basic_profiles/management/commands/create_profiles_for_users.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""Test for pyinstaller compatibility."""
from __future__ import absolute_import
from unittest import TestCase, SkipTest
from tempfile import mkdtemp, NamedTemporaryFile
from subprocess import check_call, CalledProcessError
import os
from six import PY2
if PY2:
FileNotFoundError = OSError
class PyInstallerTests(TestCase):
"""Make sure PyInstaller doesn't break Eliot."""
def setUp(self):
try:
check_call(["pyinstaller", "--help"])
except (CalledProcessError, FileNotFoundError):
raise SkipTest("Can't find pyinstaller.")
def test_importable(self):
"""The Eliot package can be imported inside a PyInstaller packaged binary."""
output_dir = mkdtemp()
with NamedTemporaryFile(mode="w") as f:
f.write("import eliot; import eliot.prettyprint\n")
f.flush()
check_call(
[
"pyinstaller",
"--distpath",
output_dir,
"-F",
"-n",
"importeliot",
f.name,
]
)
check_call([os.path.join(output_dir, "importeliot")])
|
{
"content_hash": "1bd0252c7932a57d7ba1a218a3f14890",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 85,
"avg_line_length": 28.976190476190474,
"alnum_prop": 0.5595727198027938,
"repo_name": "ClusterHQ/eliot",
"id": "1b07c92af9371b5de579268336567a64b94eebe2",
"size": "1217",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "eliot/tests/test_pyinstaller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "392375"
}
],
"symlink_target": ""
}
|
"""Built-in volume type properties."""
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
import six
from cinder import context
from cinder import db
from cinder import exception
from cinder.i18n import _, _LE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create(context,
name,
extra_specs=None,
is_public=True,
projects=None,
description=None):
"""Creates volume types."""
extra_specs = extra_specs or {}
projects = projects or []
try:
type_ref = db.volume_type_create(context,
dict(name=name,
extra_specs=extra_specs,
is_public=is_public,
description=description),
projects=projects)
except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s') % six.text_type(e))
raise exception.VolumeTypeCreateFailed(name=name,
extra_specs=extra_specs)
return type_ref
def update(context, id, name, description):
"""Update volume type by id."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
try:
type_updated = db.volume_type_update(context,
id,
dict(name=name,
description=description))
except db_exc.DBError as e:
LOG.exception(_LE('DB error: %s') % six.text_type(e))
raise exception.VolumeTypeUpdateFailed(id=id)
return type_updated
def destroy(context, id):
"""Marks volume types as deleted."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
else:
db.volume_type_destroy(context, id)
def get_all_types(context, inactive=0, search_opts=None):
"""Get all non-deleted volume_types.
Pass true as argument if you want deleted volume types returned also.
"""
search_opts = search_opts or {}
filters = {}
if 'is_public' in search_opts:
filters['is_public'] = search_opts['is_public']
del search_opts['is_public']
vol_types = db.volume_type_get_all(context, inactive, filters=filters)
if search_opts:
LOG.debug("Searching by: %s" % search_opts)
def _check_extra_specs_match(vol_type, searchdict):
for k, v in searchdict.iteritems():
if (k not in vol_type['extra_specs'].keys()
or vol_type['extra_specs'][k] != v):
return False
return True
# search_option to filter_name mapping.
filter_mapping = {'extra_specs': _check_extra_specs_match}
result = {}
for type_name, type_args in vol_types.iteritems():
# go over all filters in the list
for opt, values in search_opts.iteritems():
try:
filter_func = filter_mapping[opt]
except KeyError:
# no such filter - ignore it, go to next filter
continue
else:
if filter_func(type_args, values):
result[type_name] = type_args
break
vol_types = result
return vol_types
def get_volume_type(ctxt, id, expected_fields=None):
"""Retrieves single volume type by id."""
if id is None:
msg = _("id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
if ctxt is None:
ctxt = context.get_admin_context()
return db.volume_type_get(ctxt, id, expected_fields=expected_fields)
def get_volume_type_by_name(context, name):
"""Retrieves single volume type by name."""
if name is None:
msg = _("name cannot be None")
raise exception.InvalidVolumeType(reason=msg)
return db.volume_type_get_by_name(context, name)
def get_default_volume_type():
"""Get the default volume type."""
name = CONF.default_volume_type
vol_type = {}
if name is not None:
ctxt = context.get_admin_context()
try:
vol_type = get_volume_type_by_name(ctxt, name)
except exception.VolumeTypeNotFoundByName as e:
# Couldn't find volume type with the name in default_volume_type
# flag, record this issue and move on
# TODO(zhiteng) consider add notification to warn admin
LOG.exception(_LE('Default volume type is not found,'
'please check default_volume_type config: %s') %
six.text_type(e))
return vol_type
def get_volume_type_extra_specs(volume_type_id, key=False):
volume_type = get_volume_type(context.get_admin_context(),
volume_type_id)
extra_specs = volume_type['extra_specs']
if key:
if extra_specs.get(key):
return extra_specs.get(key)
else:
return False
else:
return extra_specs
def add_volume_type_access(context, volume_type_id, project_id):
"""Add access to volume type for project_id."""
if volume_type_id is None:
msg = _("volume_type_id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
return db.volume_type_access_add(context, volume_type_id, project_id)
def remove_volume_type_access(context, volume_type_id, project_id):
"""Remove access to volume type for project_id."""
if volume_type_id is None:
msg = _("volume_type_id cannot be None")
raise exception.InvalidVolumeType(reason=msg)
return db.volume_type_access_remove(context, volume_type_id, project_id)
def is_encrypted(context, volume_type_id):
if volume_type_id is None:
return False
encryption = db.volume_type_encryption_get(context, volume_type_id)
return encryption is not None
def get_volume_type_encryption(context, volume_type_id):
if volume_type_id is None:
return None
encryption = db.volume_type_encryption_get(context, volume_type_id)
return encryption
def get_volume_type_qos_specs(volume_type_id):
ctxt = context.get_admin_context()
res = db.volume_type_qos_specs_get(ctxt,
volume_type_id)
return res
def volume_types_diff(context, vol_type_id1, vol_type_id2):
"""Returns a 'diff' of two volume types and whether they are equal.
Returns a tuple of (diff, equal), where 'equal' is a boolean indicating
whether there is any difference, and 'diff' is a dictionary with the
following format:
{'extra_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type),
'key2': (value_in_1st_vol_type, value_in_2nd_vol_type),
...}
'qos_specs': {'key1': (value_in_1st_vol_type, value_in_2nd_vol_type),
'key2': (value_in_1st_vol_type, value_in_2nd_vol_type),
...}
'encryption': {'cipher': (value_in_1st_vol_type, value_in_2nd_vol_type),
{'key_size': (value_in_1st_vol_type, value_in_2nd_vol_type),
...}
"""
def _fix_qos_specs(qos_specs):
if qos_specs:
qos_specs.pop('id', None)
qos_specs.pop('name', None)
qos_specs.update(qos_specs.pop('specs', {}))
def _fix_encryption_specs(encryption):
if encryption:
encryption = dict(encryption)
for param in ['volume_type_id', 'created_at', 'updated_at',
'deleted_at']:
encryption.pop(param, None)
return encryption
def _dict_diff(dict1, dict2):
res = {}
equal = True
if dict1 is None:
dict1 = {}
if dict2 is None:
dict2 = {}
for k, v in dict1.iteritems():
res[k] = (v, dict2.get(k))
if k not in dict2 or res[k][0] != res[k][1]:
equal = False
for k, v in dict2.iteritems():
res[k] = (dict1.get(k), v)
if k not in dict1 or res[k][0] != res[k][1]:
equal = False
return (res, equal)
all_equal = True
diff = {}
vol_type_data = []
for vol_type_id in (vol_type_id1, vol_type_id2):
if vol_type_id is None:
specs = {'extra_specs': None,
'qos_specs': None,
'encryption': None}
else:
specs = {}
vol_type = get_volume_type(context, vol_type_id)
specs['extra_specs'] = vol_type.get('extra_specs')
qos_specs = get_volume_type_qos_specs(vol_type_id)
specs['qos_specs'] = qos_specs.get('qos_specs')
_fix_qos_specs(specs['qos_specs'])
specs['encryption'] = get_volume_type_encryption(context,
vol_type_id)
specs['encryption'] = _fix_encryption_specs(specs['encryption'])
vol_type_data.append(specs)
diff['extra_specs'], equal = _dict_diff(vol_type_data[0]['extra_specs'],
vol_type_data[1]['extra_specs'])
if not equal:
all_equal = False
diff['qos_specs'], equal = _dict_diff(vol_type_data[0]['qos_specs'],
vol_type_data[1]['qos_specs'])
if not equal:
all_equal = False
diff['encryption'], equal = _dict_diff(vol_type_data[0]['encryption'],
vol_type_data[1]['encryption'])
if not equal:
all_equal = False
return (diff, all_equal)
|
{
"content_hash": "040cabc77a832e3bf5d8cf492ca63d39",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 79,
"avg_line_length": 34.67368421052632,
"alnum_prop": 0.5521149564865412,
"repo_name": "rakeshmi/cinder",
"id": "1b3cbadb2f7d95600f5fc146f8baae9913e8ec48",
"size": "10744",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "cinder/volume/volume_types.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PLpgSQL",
"bytes": "2511"
},
{
"name": "Python",
"bytes": "10782777"
},
{
"name": "Shell",
"bytes": "8111"
}
],
"symlink_target": ""
}
|
<<<<<<< HEAD
<<<<<<< HEAD
from .. import util
from . import util as import_util
import importlib._bootstrap
import sys
from types import MethodType
import unittest
import warnings
class CallingOrder:
"""Calls to the importers on sys.meta_path happen in order that they are
specified in the sequence, starting with the first importer
[first called], and then continuing on down until one is found that doesn't
return None [continuing]."""
def test_first_called(self):
# [first called]
mod = 'top_level'
with util.mock_spec(mod) as first, util.mock_spec(mod) as second:
with util.import_state(meta_path=[first, second]):
self.assertIs(self.__import__(mod), first.modules[mod])
def test_continuing(self):
# [continuing]
mod_name = 'for_real'
with util.mock_spec('nonexistent') as first, \
util.mock_spec(mod_name) as second:
first.find_spec = lambda self, fullname, path=None, parent=None: None
with util.import_state(meta_path=[first, second]):
self.assertIs(self.__import__(mod_name), second.modules[mod_name])
def test_empty(self):
# Raise an ImportWarning if sys.meta_path is empty.
module_name = 'nothing'
try:
del sys.modules[module_name]
except KeyError:
pass
with util.import_state(meta_path=[]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertIsNone(importlib._bootstrap._find_spec('nothing',
None))
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, ImportWarning))
Frozen_CallingOrder, Source_CallingOrder = util.test_both(
CallingOrder, __import__=import_util.__import__)
class CallSignature:
"""If there is no __path__ entry on the parent module, then 'path' is None
[no path]. Otherwise, the value for __path__ is passed in for the 'path'
argument [path set]."""
def log_finder(self, importer):
fxn = getattr(importer, self.finder_name)
log = []
def wrapper(self, *args, **kwargs):
log.append([args, kwargs])
return fxn(*args, **kwargs)
return log, wrapper
def test_no_path(self):
# [no path]
mod_name = 'top_level'
assert '.' not in mod_name
with self.mock_modules(mod_name) as importer:
log, wrapped_call = self.log_finder(importer)
setattr(importer, self.finder_name, MethodType(wrapped_call, importer))
with util.import_state(meta_path=[importer]):
self.__import__(mod_name)
assert len(log) == 1
args = log[0][0]
kwargs = log[0][1]
# Assuming all arguments are positional.
self.assertEqual(args[0], mod_name)
self.assertIsNone(args[1])
def test_with_path(self):
# [path set]
pkg_name = 'pkg'
mod_name = pkg_name + '.module'
path = [42]
assert '.' in mod_name
with self.mock_modules(pkg_name+'.__init__', mod_name) as importer:
importer.modules[pkg_name].__path__ = path
log, wrapped_call = self.log_finder(importer)
setattr(importer, self.finder_name, MethodType(wrapped_call, importer))
with util.import_state(meta_path=[importer]):
self.__import__(mod_name)
assert len(log) == 2
args = log[1][0]
kwargs = log[1][1]
# Assuming all arguments are positional.
self.assertFalse(kwargs)
self.assertEqual(args[0], mod_name)
self.assertIs(args[1], path)
class CallSignaturePEP302(CallSignature):
mock_modules = util.mock_modules
finder_name = 'find_module'
Frozen_CallSignaturePEP302, Source_CallSignaturePEP302 = util.test_both(
CallSignaturePEP302, __import__=import_util.__import__)
class CallSignaturePEP451(CallSignature):
mock_modules = util.mock_spec
finder_name = 'find_spec'
Frozen_CallSignaturePEP451, Source_CallSignaturePEP451 = util.test_both(
CallSignaturePEP451, __import__=import_util.__import__)
if __name__ == '__main__':
unittest.main()
=======
from .. import util
from . import util as import_util
import importlib._bootstrap
import sys
from types import MethodType
import unittest
import warnings
class CallingOrder:
"""Calls to the importers on sys.meta_path happen in order that they are
specified in the sequence, starting with the first importer
[first called], and then continuing on down until one is found that doesn't
return None [continuing]."""
def test_first_called(self):
# [first called]
mod = 'top_level'
with util.mock_spec(mod) as first, util.mock_spec(mod) as second:
with util.import_state(meta_path=[first, second]):
self.assertIs(self.__import__(mod), first.modules[mod])
def test_continuing(self):
# [continuing]
mod_name = 'for_real'
with util.mock_spec('nonexistent') as first, \
util.mock_spec(mod_name) as second:
first.find_spec = lambda self, fullname, path=None, parent=None: None
with util.import_state(meta_path=[first, second]):
self.assertIs(self.__import__(mod_name), second.modules[mod_name])
def test_empty(self):
# Raise an ImportWarning if sys.meta_path is empty.
module_name = 'nothing'
try:
del sys.modules[module_name]
except KeyError:
pass
with util.import_state(meta_path=[]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertIsNone(importlib._bootstrap._find_spec('nothing',
None))
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, ImportWarning))
Frozen_CallingOrder, Source_CallingOrder = util.test_both(
CallingOrder, __import__=import_util.__import__)
class CallSignature:
"""If there is no __path__ entry on the parent module, then 'path' is None
[no path]. Otherwise, the value for __path__ is passed in for the 'path'
argument [path set]."""
def log_finder(self, importer):
fxn = getattr(importer, self.finder_name)
log = []
def wrapper(self, *args, **kwargs):
log.append([args, kwargs])
return fxn(*args, **kwargs)
return log, wrapper
def test_no_path(self):
# [no path]
mod_name = 'top_level'
assert '.' not in mod_name
with self.mock_modules(mod_name) as importer:
log, wrapped_call = self.log_finder(importer)
setattr(importer, self.finder_name, MethodType(wrapped_call, importer))
with util.import_state(meta_path=[importer]):
self.__import__(mod_name)
assert len(log) == 1
args = log[0][0]
kwargs = log[0][1]
# Assuming all arguments are positional.
self.assertEqual(args[0], mod_name)
self.assertIsNone(args[1])
def test_with_path(self):
# [path set]
pkg_name = 'pkg'
mod_name = pkg_name + '.module'
path = [42]
assert '.' in mod_name
with self.mock_modules(pkg_name+'.__init__', mod_name) as importer:
importer.modules[pkg_name].__path__ = path
log, wrapped_call = self.log_finder(importer)
setattr(importer, self.finder_name, MethodType(wrapped_call, importer))
with util.import_state(meta_path=[importer]):
self.__import__(mod_name)
assert len(log) == 2
args = log[1][0]
kwargs = log[1][1]
# Assuming all arguments are positional.
self.assertFalse(kwargs)
self.assertEqual(args[0], mod_name)
self.assertIs(args[1], path)
class CallSignaturePEP302(CallSignature):
mock_modules = util.mock_modules
finder_name = 'find_module'
Frozen_CallSignaturePEP302, Source_CallSignaturePEP302 = util.test_both(
CallSignaturePEP302, __import__=import_util.__import__)
class CallSignaturePEP451(CallSignature):
mock_modules = util.mock_spec
finder_name = 'find_spec'
Frozen_CallSignaturePEP451, Source_CallSignaturePEP451 = util.test_both(
CallSignaturePEP451, __import__=import_util.__import__)
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
=======
from .. import util
from . import util as import_util
import importlib._bootstrap
import sys
from types import MethodType
import unittest
import warnings
class CallingOrder:
"""Calls to the importers on sys.meta_path happen in order that they are
specified in the sequence, starting with the first importer
[first called], and then continuing on down until one is found that doesn't
return None [continuing]."""
def test_first_called(self):
# [first called]
mod = 'top_level'
with util.mock_spec(mod) as first, util.mock_spec(mod) as second:
with util.import_state(meta_path=[first, second]):
self.assertIs(self.__import__(mod), first.modules[mod])
def test_continuing(self):
# [continuing]
mod_name = 'for_real'
with util.mock_spec('nonexistent') as first, \
util.mock_spec(mod_name) as second:
first.find_spec = lambda self, fullname, path=None, parent=None: None
with util.import_state(meta_path=[first, second]):
self.assertIs(self.__import__(mod_name), second.modules[mod_name])
def test_empty(self):
# Raise an ImportWarning if sys.meta_path is empty.
module_name = 'nothing'
try:
del sys.modules[module_name]
except KeyError:
pass
with util.import_state(meta_path=[]):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
self.assertIsNone(importlib._bootstrap._find_spec('nothing',
None))
self.assertEqual(len(w), 1)
self.assertTrue(issubclass(w[-1].category, ImportWarning))
Frozen_CallingOrder, Source_CallingOrder = util.test_both(
CallingOrder, __import__=import_util.__import__)
class CallSignature:
"""If there is no __path__ entry on the parent module, then 'path' is None
[no path]. Otherwise, the value for __path__ is passed in for the 'path'
argument [path set]."""
def log_finder(self, importer):
fxn = getattr(importer, self.finder_name)
log = []
def wrapper(self, *args, **kwargs):
log.append([args, kwargs])
return fxn(*args, **kwargs)
return log, wrapper
def test_no_path(self):
# [no path]
mod_name = 'top_level'
assert '.' not in mod_name
with self.mock_modules(mod_name) as importer:
log, wrapped_call = self.log_finder(importer)
setattr(importer, self.finder_name, MethodType(wrapped_call, importer))
with util.import_state(meta_path=[importer]):
self.__import__(mod_name)
assert len(log) == 1
args = log[0][0]
kwargs = log[0][1]
# Assuming all arguments are positional.
self.assertEqual(args[0], mod_name)
self.assertIsNone(args[1])
def test_with_path(self):
# [path set]
pkg_name = 'pkg'
mod_name = pkg_name + '.module'
path = [42]
assert '.' in mod_name
with self.mock_modules(pkg_name+'.__init__', mod_name) as importer:
importer.modules[pkg_name].__path__ = path
log, wrapped_call = self.log_finder(importer)
setattr(importer, self.finder_name, MethodType(wrapped_call, importer))
with util.import_state(meta_path=[importer]):
self.__import__(mod_name)
assert len(log) == 2
args = log[1][0]
kwargs = log[1][1]
# Assuming all arguments are positional.
self.assertFalse(kwargs)
self.assertEqual(args[0], mod_name)
self.assertIs(args[1], path)
class CallSignaturePEP302(CallSignature):
mock_modules = util.mock_modules
finder_name = 'find_module'
Frozen_CallSignaturePEP302, Source_CallSignaturePEP302 = util.test_both(
CallSignaturePEP302, __import__=import_util.__import__)
class CallSignaturePEP451(CallSignature):
mock_modules = util.mock_spec
finder_name = 'find_spec'
Frozen_CallSignaturePEP451, Source_CallSignaturePEP451 = util.test_both(
CallSignaturePEP451, __import__=import_util.__import__)
if __name__ == '__main__':
unittest.main()
>>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
|
{
"content_hash": "9a1783c72376721397033986a438d3ab",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 83,
"avg_line_length": 36.86501377410468,
"alnum_prop": 0.5894485129278135,
"repo_name": "ArcherSys/ArcherSys",
"id": "76381d1b5e74e542fa5f4f25e0ea24c859abcfdb",
"size": "13382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lib/test/test_importlib/import_/test_meta_path.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
Holds the formencode validators for debexpo.
"""
__author__ = 'Jonny Lamb'
__copyright__ = 'Copyright © 2008 Jonny Lamb'
__license__ = 'MIT'
import formencode
import logging
from debexpo.lib.base import *
from debexpo.lib.gnupg import GnuPG
from debexpo.model import meta
from debexpo.model.users import User
from debexpo.lib import constants
import debexpo.lib.utils
log = logging.getLogger(__name__)
class GpgKey(formencode.validators.FieldStorageUploadConverter):
"""
Validator for an uploaded GPG key. They must with the 'BEGIN PGP PUBLIC KEY BLOCK'
text.
"""
def __init__(self):
self.gpg_id = None
self.gnupg = GnuPG()
def _to_python(self, value, c):
"""
Validate the GPG key.
``value``
FieldStorage uploaded file.
``c``
"""
if not value.value.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----'):
log.error('GPG key does not start with BEGIN PGP PUBLIC KEY BLOCK')
raise formencode.Invalid(_('Invalid GPG key'), value, c)
if self.gnupg.is_unusable():
log.error('Unable to validate GPG key because gpg is unusable.')
raise formencode.Invalid(_('Internal error: debexpo is not ' +
'properly configured to handle' +
'GPG keys'), value, c)
self.gpg_id = self.gnupg.parse_key_id(value.value)
if self.gpg_id is None:
log.error("Failed to parse GPG key")
raise formencode.Invalid(_('Invalid GPG key'), value, c)
return formencode.validators.FieldStorageUploadConverter._to_python(self, value, c)
def key_id(self):
return self.gpg_id
class CurrentPassword(formencode.validators.String):
"""
Validator for a current password depending on the session's user_id.
"""
def _to_python(self, value, c):
"""
Validate the password.
"""
user = meta.session.query(User).get(session['user_id'])
if user.password != debexpo.lib.utils.hash_it(value):
log.error('Incorrect current password')
raise formencode.Invalid(_('Incorrect password'), value, c)
return formencode.validators.String._to_python(self, value, c)
class CheckBox(formencode.validators.Int):
"""
Validator for a checkbox. When not checked, it doesn't send, and formencode
complains.
"""
if_missing = None
class NewEmailToSystem(formencode.validators.Email):
"""
Email validator class to make sure there is not another user with
the same email address already registered.
"""
def _to_python(self, value, c=None):
"""
Validate the email address.
``value``
Address to validate.
``c``
"""
u = meta.session.query(User).filter_by(email=value)
# c.user_id can contain a user_id that should be ignored (i.e. when the user
# wants to keep the same email).
if hasattr(c, 'user_id'):
u = u.filter(User.id != c.user_id)
u = u.first()
if u is not None:
log.error('Email %s already found on system' % value)
raise formencode.Invalid(_('A user with this email address is already registered on the system'), value, c)
return formencode.validators.Email._to_python(self, value, c)
class NewNameToSystem(formencode.FancyValidator):
"""
Name validation class to make sure there is not another user with
the same name already registered.
"""
def _to_python(self, value, c=None):
"""
Validate the name address.
``value``
Name to validate.
``c``
"""
u = meta.session.query(User).filter_by(name=value)
# c.user_id can contain a user_id that should be ignored (i.e. when the user
# wants to keep the same email).
if hasattr(c, 'user_id'):
u = u.filter(User.id != c.user_id)
u = u.first()
if u is not None:
log.error('Name %s already found on system' % value)
raise formencode.Invalid(_('A user with this name is already registered on the system. If it is you, use that account! Otherwise use a different name to register.'), value, c)
return value
def ValidateSponsorEmail(values, state, validator):
if values['sponsor'] == '1' and not values['email'].endswith('@debian.org'):
return {'sponsor': 'A sponsor account must be registered with your @debian.org address' }
class DummyValidator(formencode.FancyValidator):
pass
def ValidatePackagingGuidelines(values, state, validator):
try:
if values['packaging_guidelines'] == constants.SPONSOR_GUIDELINES_TYPE_TEXT:
formencode.validators.String(min=1).to_python(values['packaging_guideline_text'])
elif values['packaging_guidelines'] == constants.SPONSOR_GUIDELINES_TYPE_URL:
formencode.validators.URL(add_http=True).to_python(values['packaging_guideline_text'])
else:
formencode.validators.Empty().to_python(values['packaging_guideline_text'])
except Exception as e:
return {'packaging_guideline_text': e}
return None
|
{
"content_hash": "b89836301d4e27410fb263bbd4a1b4f1",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 187,
"avg_line_length": 32.6583850931677,
"alnum_prop": 0.6215290985165463,
"repo_name": "swvist/Debexpo",
"id": "021b4dddbd7021330570496dc4e00486b698302b",
"size": "6576",
"binary": false,
"copies": "1",
"ref": "refs/heads/gsoc",
"path": "debexpo/lib/validators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "3986"
},
{
"name": "Python",
"bytes": "411992"
},
{
"name": "Shell",
"bytes": "3905"
}
],
"symlink_target": ""
}
|
"""Test data generators producing signals pairs intended to be used to
test the APM module. Each pair consists of a noisy input and a reference signal.
The former is used as APM input and it is generated by adding noise to a
clean audio track. The reference is the expected APM output.
Throughout this file, the following naming convention is used:
- input signal: the clean signal (e.g., speech),
- noise signal: the noise to be summed up to the input signal (e.g., white
noise, Gaussian noise),
- noisy signal: input + noise.
The noise signal may or may not be a function of the clean signal. For
instance, white noise is independently generated, whereas reverberation is
obtained by convolving the input signal with an impulse response.
"""
import logging
import os
import sys
try:
import scipy.io
except ImportError:
logging.critical('Cannot import the third-party Python package scipy')
sys.exit(1)
from . import data_access
from . import exceptions
from . import signal_processing
class TestDataGenerator(object):
"""Abstract class responsible for the generation of noisy signals.
Given a clean signal, it generates two streams named noisy signal and
reference. The former is the clean signal deteriorated by the noise source,
the latter goes through the same deterioration process, but more "gently".
Noisy signal and reference are produced so that the reference is the signal
expected at the output of the APM module when the latter is fed with the nosiy
signal.
An test data generator generates one or more pairs.
"""
NAME = None
REGISTERED_CLASSES = {}
def __init__(self):
# Init dictionaries with one entry for each test data generator
# configuration (e.g., different SNRs).
# Noisy audio track files (stored separately in a cache folder).
self._noisy_signal_filepaths = None
# Path to be used for the APM simulation output files.
self._apm_output_paths = None
# Reference audio track files (stored separately in a cache folder).
self._reference_signal_filepaths = None
self.Clear()
@classmethod
def RegisterClass(cls, class_to_register):
"""Registers an TestDataGenerator implementation.
Decorator to automatically register the classes that extend
TestDataGenerator.
Example usage:
@TestDataGenerator.RegisterClass
class IdentityGenerator(TestDataGenerator):
pass
"""
cls.REGISTERED_CLASSES[class_to_register.NAME] = class_to_register
return class_to_register
@property
def config_names(self):
return self._noisy_signal_filepaths.keys()
@property
def noisy_signal_filepaths(self):
return self._noisy_signal_filepaths
@property
def apm_output_paths(self):
return self._apm_output_paths
@property
def reference_signal_filepaths(self):
return self._reference_signal_filepaths
def Generate(
self, input_signal_filepath, input_noise_cache_path, base_output_path):
"""Generates a set of noisy input and reference audiotrack file pairs.
This method initializes an empty set of pairs and calls the _Generate()
method implemented in a concrete class.
Args:
input_signal_filepath: path to the clean input audio track file.
input_noise_cache_path: path to the cache of noisy audio track files.
base_output_path: base path where output is written.
"""
self.Clear()
self._Generate(
input_signal_filepath, input_noise_cache_path, base_output_path)
def Clear(self):
"""Clears the generated output path dictionaries.
"""
self._noisy_signal_filepaths = {}
self._apm_output_paths = {}
self._reference_signal_filepaths = {}
def _Generate(
self, input_signal_filepath, input_noise_cache_path, base_output_path):
"""Abstract method to be implemented in each concrete class.
"""
raise NotImplementedError()
def _AddNoiseSnrPairs(self, base_output_path, noisy_mix_filepaths,
snr_value_pairs):
"""Adds noisy-reference signal pairs.
Args:
base_output_path: noisy tracks base output path.
noisy_mix_filepaths: nested dictionary of noisy signal paths organized
by noisy track name and SNR level.
snr_value_pairs: list of SNR pairs.
"""
for noise_track_name in noisy_mix_filepaths:
for snr_noisy, snr_refence in snr_value_pairs:
config_name = '{0}_{1:d}_{2:d}_SNR'.format(
noise_track_name, snr_noisy, snr_refence)
output_path = self._MakeDir(base_output_path, config_name)
self._AddNoiseReferenceFilesPair(
config_name=config_name,
noisy_signal_filepath=noisy_mix_filepaths[
noise_track_name][snr_noisy],
reference_signal_filepath=noisy_mix_filepaths[
noise_track_name][snr_refence],
output_path=output_path)
def _AddNoiseReferenceFilesPair(self, config_name, noisy_signal_filepath,
reference_signal_filepath, output_path):
"""Adds one noisy-reference signal pair.
Args:
config_name: name of the APM configuration.
noisy_signal_filepath: path to noisy audio track file.
reference_signal_filepath: path to reference audio track file.
output_path: APM output path.
"""
assert config_name not in self._noisy_signal_filepaths
self._noisy_signal_filepaths[config_name] = os.path.abspath(
noisy_signal_filepath)
self._apm_output_paths[config_name] = os.path.abspath(output_path)
self._reference_signal_filepaths[config_name] = os.path.abspath(
reference_signal_filepath)
# Save noisy and reference file paths.
data_access.Metadata.SaveAudioTestDataPaths(
output_path=output_path,
audio_in_filepath=self._noisy_signal_filepaths[config_name],
audio_ref_filepath=self._reference_signal_filepaths[config_name])
@classmethod
def _MakeDir(cls, base_output_path, test_data_generator_config_name):
output_path = os.path.join(
base_output_path, test_data_generator_config_name)
data_access.MakeDirectory(output_path)
return output_path
@TestDataGenerator.RegisterClass
class IdentityTestDataGenerator(TestDataGenerator):
"""Generator that adds no noise.
Both the noisy and the reference signals are the input signal.
"""
NAME = 'identity'
def __init__(self):
TestDataGenerator.__init__(self)
def _Generate(
self, input_signal_filepath, input_noise_cache_path, base_output_path):
config_name = 'default'
output_path = self._MakeDir(base_output_path, config_name)
self._AddNoiseReferenceFilesPair(
config_name=config_name,
noisy_signal_filepath=input_signal_filepath,
reference_signal_filepath=input_signal_filepath,
output_path=output_path)
@TestDataGenerator.RegisterClass
class WhiteNoiseTestDataGenerator(TestDataGenerator):
"""Generator that adds white noise.
"""
NAME = 'white_noise'
# Each pair indicates the clean vs. noisy and reference vs. noisy SNRs.
# The reference (second value of each pair) always has a lower amount of noise
# - i.e., the SNR is 10 dB higher.
_SNR_VALUE_PAIRS = [
[20, 30], # Smallest noise.
[10, 20],
[5, 15],
[0, 10], # Largest noise.
]
_NOISY_SIGNAL_FILENAME_TEMPLATE = 'noise_{0:d}_SNR.wav'
def __init__(self):
TestDataGenerator.__init__(self)
def _Generate(
self, input_signal_filepath, input_noise_cache_path, base_output_path):
# Load the input signal.
input_signal = signal_processing.SignalProcessingUtils.LoadWav(
input_signal_filepath)
input_signal = signal_processing.SignalProcessingUtils.Normalize(
input_signal)
# Create the noise track.
noise_signal = signal_processing.SignalProcessingUtils.GenerateWhiteNoise(
input_signal)
noise_signal = signal_processing.SignalProcessingUtils.Normalize(
noise_signal)
# Create the noisy mixes (once for each unique SNR value).
noisy_mix_filepaths = {}
snr_values = set([snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
for snr in snr_values:
noisy_signal_filepath = os.path.join(
input_noise_cache_path,
self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(snr))
# Create and save if not done.
if not os.path.exists(noisy_signal_filepath):
# Create noisy signal.
noisy_signal = signal_processing.SignalProcessingUtils.MixSignals(
input_signal, noise_signal, snr)
# Save.
signal_processing.SignalProcessingUtils.SaveWav(
noisy_signal_filepath, noisy_signal)
# Add file to the collection of mixes.
noisy_mix_filepaths[snr] = noisy_signal_filepath
# Add all the noisy-reference signal pairs.
for snr_noisy, snr_refence in self._SNR_VALUE_PAIRS:
config_name = '{0:d}_{1:d}_SNR'.format(snr_noisy, snr_refence)
output_path = self._MakeDir(base_output_path, config_name)
self._AddNoiseReferenceFilesPair(
config_name=config_name,
noisy_signal_filepath=noisy_mix_filepaths[snr_noisy],
reference_signal_filepath=noisy_mix_filepaths[snr_refence],
output_path=output_path)
# TODO(alessiob): remove comment when class implemented.
# @TestDataGenerator.RegisterClass
class NarrowBandNoiseTestDataGenerator(TestDataGenerator):
"""Generator that adds narrow-band noise.
"""
NAME = 'narrow_band_noise'
def __init__(self):
TestDataGenerator.__init__(self)
def _Generate(
self, input_signal_filepath, input_noise_cache_path, base_output_path):
# TODO(alessiob): implement.
pass
@TestDataGenerator.RegisterClass
class EnvironmentalNoiseTestDataGenerator(TestDataGenerator):
"""Generator that adds environmental noise.
TODO(alessiob): Make the class more generic e.g.,
MixNoiseTrackTestDataGenerator.
"""
NAME = 'environmental_noise'
_NOISY_SIGNAL_FILENAME_TEMPLATE = '{0}_{1:d}_SNR.wav'
# TODO(alessiob): allow the user to store the noise tracks in a custom path.
_NOISE_TRACKS_PATH = os.path.join(
os.path.dirname(__file__), os.pardir, 'noise_tracks')
# TODO(alessiob): Allow the user to have custom noise tracks.
# TODO(alessiob): Exploit TestDataGeneratorFactory.GetInstance().
_NOISE_TRACKS = [
'city.wav'
]
# Each pair indicates the clean vs. noisy and reference vs. noisy SNRs.
# The reference (second value of each pair) always has a lower amount of noise
# - i.e., the SNR is 10 dB higher.
_SNR_VALUE_PAIRS = [
[20, 30], # Smallest noise.
[10, 20],
[5, 15],
[0, 10], # Largest noise.
]
def __init__(self):
TestDataGenerator.__init__(self)
def _Generate(
self, input_signal_filepath, input_noise_cache_path, base_output_path):
"""Generates test data pairs using environmental noise.
For each noise track and pair of SNR values, the following two audio tracks
are created: the noisy signal and the reference signal. The former is
obtained by mixing the (clean) input signal to the corresponding noise
track enforcing the target SNR.
"""
# Init.
snr_values = set([snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
# Load the input signal.
input_signal = signal_processing.SignalProcessingUtils.LoadWav(
input_signal_filepath)
input_signal = signal_processing.SignalProcessingUtils.Normalize(
input_signal)
noisy_mix_filepaths = {}
for noise_track_filename in self._NOISE_TRACKS:
# Load the noise track.
noise_track_name, _ = os.path.splitext(noise_track_filename)
noise_track_filepath = os.path.join(
self._NOISE_TRACKS_PATH, noise_track_filename)
if not os.path.exists(noise_track_filepath):
logging.error('cannot find the <%s> noise track', noise_track_filename)
raise exceptions.FileNotFoundError()
noise_signal = signal_processing.SignalProcessingUtils.LoadWav(
noise_track_filepath)
noise_signal = signal_processing.SignalProcessingUtils.Normalize(
noise_signal)
# Create the noisy mixes (once for each unique SNR value).
noisy_mix_filepaths[noise_track_name] = {}
for snr in snr_values:
noisy_signal_filepath = os.path.join(
input_noise_cache_path,
self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(noise_track_name, snr))
# Create and save if not done.
if not os.path.exists(noisy_signal_filepath):
# Create noisy signal.
noisy_signal = signal_processing.SignalProcessingUtils.MixSignals(
input_signal, noise_signal, snr)
# Save.
signal_processing.SignalProcessingUtils.SaveWav(
noisy_signal_filepath, noisy_signal)
# Add file to the collection of mixes.
noisy_mix_filepaths[noise_track_name][snr] = noisy_signal_filepath
# Add all the noise-SNR pairs.
self._AddNoiseSnrPairs(
base_output_path, noisy_mix_filepaths, self._SNR_VALUE_PAIRS)
@TestDataGenerator.RegisterClass
class ReverberationTestDataGenerator(TestDataGenerator):
"""Generator that adds reverberation noise.
TODO(alessiob): Make this class more generic since the impulse response can be
anything (not just reverberation); call it e.g.,
ConvolutionalNoiseTestDataGenerator.
"""
NAME = 'reverberation'
_IMPULSE_RESPONSES = {
'lecture': 'air_binaural_lecture_0_0_1.mat', # Long echo.
'booth': 'air_binaural_booth_0_0_1.mat', # Short echo.
}
_MAX_IMPULSE_RESPONSE_LENGTH = None
# Each pair indicates the clean vs. noisy and reference vs. noisy SNRs.
# The reference (second value of each pair) always has a lower amount of noise
# - i.e., the SNR is 5 dB higher.
_SNR_VALUE_PAIRS = [
[3, 8], # Smallest noise.
[-3, 2], # Largest noise.
]
_NOISE_TRACK_FILENAME_TEMPLATE = '{0}.wav'
_NOISY_SIGNAL_FILENAME_TEMPLATE = '{0}_{1:d}_SNR.wav'
def __init__(self, aechen_ir_database_path):
TestDataGenerator.__init__(self)
self._aechen_ir_database_path = aechen_ir_database_path
def _Generate(
self, input_signal_filepath, input_noise_cache_path, base_output_path):
"""Generates test data pairs using reverberation noise.
For each impulse response, one noise track is created. For each impulse
response and pair of SNR values, the following 2 audio tracks are
created: the noisy signal and the reference signal. The former is
obtained by mixing the (clean) input signal to the corresponding noise
track enforcing the target SNR.
"""
# Init.
snr_values = set([snr for pair in self._SNR_VALUE_PAIRS for snr in pair])
# Load the input signal.
input_signal = signal_processing.SignalProcessingUtils.LoadWav(
input_signal_filepath)
noisy_mix_filepaths = {}
for impulse_response_name in self._IMPULSE_RESPONSES:
noise_track_filename = self._NOISE_TRACK_FILENAME_TEMPLATE.format(
impulse_response_name)
noise_track_filepath = os.path.join(
input_noise_cache_path, noise_track_filename)
noise_signal = None
try:
# Load noise track.
noise_signal = signal_processing.SignalProcessingUtils.LoadWav(
noise_track_filepath)
except exceptions.FileNotFoundError:
# Generate noise track by applying the impulse response.
impulse_response_filepath = os.path.join(
self._aechen_ir_database_path,
self._IMPULSE_RESPONSES[impulse_response_name])
noise_signal = self._GenerateNoiseTrack(
noise_track_filepath, input_signal, impulse_response_filepath)
assert noise_signal is not None
# Create the noisy mixes (once for each unique SNR value).
noisy_mix_filepaths[impulse_response_name] = {}
for snr in snr_values:
noisy_signal_filepath = os.path.join(
input_noise_cache_path,
self._NOISY_SIGNAL_FILENAME_TEMPLATE.format(
impulse_response_name, snr))
# Create and save if not done.
if not os.path.exists(noisy_signal_filepath):
# Create noisy signal.
noisy_signal = signal_processing.SignalProcessingUtils.MixSignals(
input_signal, noise_signal, snr, bln_pad_shortest=True)
# Save.
signal_processing.SignalProcessingUtils.SaveWav(
noisy_signal_filepath, noisy_signal)
# Add file to the collection of mixes.
noisy_mix_filepaths[impulse_response_name][snr] = noisy_signal_filepath
# Add all the noise-SNR pairs.
self._AddNoiseSnrPairs(base_output_path, noisy_mix_filepaths,
self._SNR_VALUE_PAIRS)
def _GenerateNoiseTrack(self, noise_track_filepath, input_signal,
impulse_response_filepath):
"""Generates noise track.
Generate a signal by convolving input_signal with the impulse response in
impulse_response_filepath; then save to noise_track_filepath.
Args:
noise_track_filepath: output file path for the noise track.
input_signal: (clean) input signal samples.
impulse_response_filepath: impulse response file path.
Returns:
AudioSegment instance.
"""
# Load impulse response.
data = scipy.io.loadmat(impulse_response_filepath)
impulse_response = data['h_air'].flatten()
if self._MAX_IMPULSE_RESPONSE_LENGTH is not None:
logging.info('truncating impulse response from %d to %d samples',
len(impulse_response), self._MAX_IMPULSE_RESPONSE_LENGTH)
impulse_response = impulse_response[:self._MAX_IMPULSE_RESPONSE_LENGTH]
# Apply impulse response.
processed_signal = (
signal_processing.SignalProcessingUtils.ApplyImpulseResponse(
input_signal, impulse_response))
# Save.
signal_processing.SignalProcessingUtils.SaveWav(
noise_track_filepath, processed_signal)
return processed_signal
|
{
"content_hash": "258303ddb1864ccbfd326cb6f7260260",
"timestamp": "",
"source": "github",
"line_count": 498,
"max_line_length": 80,
"avg_line_length": 36.19879518072289,
"alnum_prop": 0.6860265157818828,
"repo_name": "WymanLyu/WYDemo",
"id": "f42944cedd312574e5efbd1e24912609cbaad93b",
"size": "18421",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "FXAudio/FXAudio/FXAudio/webRTC/webrtc/modules/audio_processing/test/py_quality_assessment/quality_assessment/test_data_generation.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "22469"
},
{
"name": "C",
"bytes": "9341283"
},
{
"name": "C++",
"bytes": "25210057"
},
{
"name": "HTML",
"bytes": "33634"
},
{
"name": "Java",
"bytes": "330631"
},
{
"name": "JavaScript",
"bytes": "128054"
},
{
"name": "Makefile",
"bytes": "45840"
},
{
"name": "Matlab",
"bytes": "42078"
},
{
"name": "Objective-C",
"bytes": "4817874"
},
{
"name": "Objective-C++",
"bytes": "539172"
},
{
"name": "Python",
"bytes": "642022"
},
{
"name": "Ruby",
"bytes": "11503"
},
{
"name": "Shell",
"bytes": "321885"
},
{
"name": "Swift",
"bytes": "4973"
}
],
"symlink_target": ""
}
|
from subprocess import call
from os import path
import hitchpostgres
import hitchselenium
import hitchpython
import hitchserve
import hitchredis
import hitchtest
import hitchsmtp
# Get directory above this file
PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..'))
class ExecutionEngine(hitchtest.ExecutionEngine):
"""Engine for orchestating and interacting with the app."""
def set_up(self):
"""Ensure virtualenv present, then run all services."""
python_package = hitchpython.PythonPackage(
python_version=self.preconditions['python_version']
)
python_package.build()
python_package.verify()
call([
python_package.pip, "install", "-r",
path.join(PROJECT_DIRECTORY, "requirements/local.txt")
])
postgres_package = hitchpostgres.PostgresPackage(
version=self.settings["postgres_version"],
)
postgres_package.build()
postgres_package.verify()
redis_package = hitchredis.RedisPackage(version="2.8.4")
redis_package.build()
redis_package.verify()
self.services = hitchserve.ServiceBundle(
project_directory=PROJECT_DIRECTORY,
startup_timeout=float(self.settings["startup_timeout"]),
shutdown_timeout=5.0,
)
postgres_user = hitchpostgres.PostgresUser("megs_project", "password")
self.services['Postgres'] = hitchpostgres.PostgresService(
postgres_package=postgres_package,
users=[postgres_user, ],
databases=[hitchpostgres.PostgresDatabase("megs_project", postgres_user), ]
)
self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025)
self.services['Django'] = hitchpython.DjangoService(
python=python_package.python,
port=8000,
version=str(self.settings.get("django_version")),
settings="config.settings.local",
needs=[self.services['Postgres'], ],
env_vars=self.settings['environment_variables'],
)
self.services['Redis'] = hitchredis.RedisService(
redis_package=redis_package,
port=16379,
)
self.services['Firefox'] = hitchselenium.SeleniumService(
xvfb=self.settings.get("quiet", False),
no_libfaketime=True,
)
# import hitchcron
# self.services['Cron'] = hitchcron.CronService(
# run=self.services['Django'].manage("trigger").command,
# every=1,
# needs=[ self.services['Django'], ],
# )
self.services.startup(interactive=False)
# Configure selenium driver
self.driver = self.services['Firefox'].driver
self.driver.set_window_size(self.settings['window_size']['height'], self.settings['window_size']['width'])
self.driver.set_window_position(0, 0)
self.driver.implicitly_wait(2.0)
self.driver.accept_next_alert = True
def pause(self, message=None):
"""Stop. IPython time."""
if hasattr(self, 'services'):
self.services.start_interactive_mode()
self.ipython(message)
if hasattr(self, 'services'):
self.services.stop_interactive_mode()
def load_website(self):
"""Navigate to website in Firefox."""
self.driver.get(self.services['Django'].url())
def click(self, on):
"""Click on HTML id."""
self.driver.find_element_by_id(on).click()
def fill_form(self, **kwargs):
"""Fill in a form with id=value."""
for element, text in kwargs.items():
self.driver.find_element_by_id(element).send_keys(text)
def click_submit(self):
"""Click on a submit button if it exists."""
self.driver.find_element_by_css_selector("button[type=\"submit\"]").click()
def confirm_emails_sent(self, number):
"""Count number of emails sent by app."""
assert len(self.services['HitchSMTP'].logs.json()) == int(number)
def wait_for_email(self, containing=None):
"""Wait for, and return email."""
self.services['HitchSMTP'].logs.out.tail.until_json(
lambda email: containing in email['payload'] or containing in email['subject'],
timeout=25,
lines_back=1,
)
def time_travel(self, days=""):
"""Make all services think that time has skipped forward."""
self.services.time_travel(days=int(days))
def on_failure(self):
"""Stop and IPython."""
if not self.settings['quiet']:
if self.settings.get("pause_on_failure", False):
self.pause(message=self.stacktrace.to_template())
def on_success(self):
"""Pause on success if enabled."""
if self.settings.get("pause_on_success", False):
self.pause(message="SUCCESS")
def tear_down(self):
"""Shut down services required to run your test."""
if hasattr(self, 'services'):
self.services.shutdown()
|
{
"content_hash": "8b905cddc468d42ac1faeef099a2c81d",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 114,
"avg_line_length": 34.37837837837838,
"alnum_prop": 0.6118317610062893,
"repo_name": "megcunningham/django-diesel",
"id": "6350db8a79a044c1118b11683367e91a367ba8b1",
"size": "5088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/engine.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "546"
},
{
"name": "HTML",
"bytes": "20182"
},
{
"name": "JavaScript",
"bytes": "3150"
},
{
"name": "Nginx",
"bytes": "1095"
},
{
"name": "Python",
"bytes": "41628"
},
{
"name": "Shell",
"bytes": "4542"
}
],
"symlink_target": ""
}
|
"""
BenchExec is a framework for reliable benchmarking.
This file is part of BenchExec.
Copyright (C) 2007-2015 Dirk Beyer
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# prepare for Python 3
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import os
import time
import xml.etree.ElementTree as ET
import sys
from . import result
from . import util as util
MEMLIMIT = "memlimit"
TIMELIMIT = "timelimit"
CORELIMIT = "cpuCores"
SOFTTIMELIMIT = 'softtimelimit'
HARDTIMELIMIT = 'hardtimelimit'
PROPERTY_TAG = "propertyfile"
_BYTE_FACTOR = 1000 # byte in kilobyte
def substitute_vars(oldList, runSet, sourcefile=None):
"""
This method replaces special substrings from a list of string
and return a new list.
"""
benchmark = runSet.benchmark
# list with tuples (key, value): 'key' is replaced by 'value'
keyValueList = [('${benchmark_name}', benchmark.name),
('${benchmark_date}', benchmark.instance),
('${benchmark_path}', benchmark.base_dir or '.'),
('${benchmark_path_abs}', os.path.abspath(benchmark.base_dir)),
('${benchmark_file}', os.path.basename(benchmark.benchmark_file)),
('${benchmark_file_abs}', os.path.abspath(os.path.basename(benchmark.benchmark_file))),
('${logfile_path}', os.path.dirname(runSet.log_folder) or '.'),
('${logfile_path_abs}', os.path.abspath(runSet.log_folder)),
('${rundefinition_name}', runSet.real_name if runSet.real_name else ''),
('${test_name}', runSet.real_name if runSet.real_name else '')]
if sourcefile:
keyValueList.append(('${inputfile_name}', os.path.basename(sourcefile)))
keyValueList.append(('${inputfile_path}', os.path.dirname(sourcefile) or '.'))
keyValueList.append(('${inputfile_path_abs}', os.path.dirname(os.path.abspath(sourcefile))))
keyValueList.append(('${sourcefile_name}', os.path.basename(sourcefile)))
keyValueList.append(('${sourcefile_path}', os.path.dirname(sourcefile) or '.'))
keyValueList.append(('${sourcefile_path_abs}', os.path.dirname(os.path.abspath(sourcefile))))
# do not use keys twice
assert len(set((key for (key, value) in keyValueList))) == len(keyValueList)
newList = []
for oldStr in oldList:
newStr = oldStr
for (key, value) in keyValueList:
newStr = newStr.replace(key, value)
if '${' in newStr:
logging.warning("a variable was not replaced in '{0}'".format(newStr))
newList.append(newStr)
return newList
class Benchmark:
"""
The class Benchmark manages the import of source files, options, columns and
the tool from a benchmark_file.
This class represents the <benchmark> tag.
"""
def __init__(self, benchmark_file, config, start_time):
"""
The constructor of Benchmark reads the source files, options, columns and the tool
from the XML in the benchmark_file..
"""
logging.debug("I'm loading the benchmark {0}.".format(benchmark_file))
self.config = config
self.benchmark_file = benchmark_file
self.base_dir = os.path.dirname(self.benchmark_file)
# get benchmark-name
self.name = os.path.basename(benchmark_file)[:-4] # remove ending ".xml"
if config.name:
self.name += "."+config.name
self.start_time = start_time
self.instance = time.strftime("%Y-%m-%d_%H%M", self.start_time)
self.output_base_name = config.output_path + self.name + "." + self.instance
self.log_folder = self.output_base_name + ".logfiles" + os.path.sep
# parse XML
try:
rootTag = ET.ElementTree().parse(benchmark_file)
except ET.ParseError as e:
sys.exit('Benchmark file {} is invalid: {}'.format(benchmark_file, e))
if 'benchmark' != rootTag.tag:
sys.exit("Benchmark file {} is invalid: "
+ "It's root element is not named 'benchmark'.".format(benchmark_file))
# get tool
tool_name = rootTag.get('tool')
if not tool_name:
sys.exit('A tool needs to be specified in the benchmark definition file.')
self.tool_module = tool_name if '.' in tool_name else ("benchexec.tools." + tool_name)
try:
self.tool = __import__(self.tool_module, fromlist=['Tool']).Tool()
except ImportError as ie:
sys.exit('Unsupported tool "{0}" specified. ImportError: {1}'.format(tool_name, ie))
except AttributeError:
sys.exit('The module for "{0}" does not define the necessary class.'.format(tool_name))
self.tool_name = self.tool.name()
# will be set from the outside if necessary (may not be the case in SaaS environments)
self.tool_version = None
self.executable = None
logging.debug("The tool to be benchmarked is {0}.".format(str(self.tool_name)))
self.rlimits = {}
keys = list(rootTag.keys())
for limit in [MEMLIMIT, TIMELIMIT, CORELIMIT]:
if limit in keys:
self.rlimits[limit] = int(rootTag.get(limit))
# override limits from XML with values from command line
def override_limit(configVal, limit):
if configVal != None:
val = int(configVal)
if val == -1: # infinity
if limit in self.rlimits:
self.rlimits.pop(limit)
else:
self.rlimits[limit] = val
override_limit(config.memorylimit, MEMLIMIT)
override_limit(config.timelimit, TIMELIMIT)
override_limit(config.corelimit, CORELIMIT)
if HARDTIMELIMIT in keys:
hardtimelimit = int(rootTag.get(HARDTIMELIMIT))
if TIMELIMIT in self.rlimits:
if hardtimelimit < self.rlimits[TIMELIMIT]:
logging.warning('Hard timelimit %d is smaller than timelimit %d, ignoring the former.'
% (hardtimelimit, self.rlimits[TIMELIMIT]))
else:
self.rlimits[SOFTTIMELIMIT] = self.rlimits[TIMELIMIT]
self.rlimits[TIMELIMIT] = hardtimelimit
else:
self.rlimits[TIMELIMIT] = hardtimelimit
# get number of threads, default value is 1
self.num_of_threads = int(rootTag.get("threads")) if ("threads" in keys) else 1
if config.num_of_threads != None:
self.num_of_threads = config.num_of_threads
if self.num_of_threads < 1:
logging.error("At least ONE thread must be given!")
sys.exit()
# get global options and property file
self.options = util.get_list_from_xml(rootTag)
self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rootTag, PROPERTY_TAG))
# get columns
self.columns = Benchmark.load_columns(rootTag.find("columns"))
# get global source files, they are used in all run sets
globalSourcefilesTags = rootTag.findall("tasks") + rootTag.findall("sourcefiles")
# get required files
self._required_files = set()
for required_files_tag in rootTag.findall('requiredfiles'):
required_files = util.expand_filename_pattern(required_files_tag.text, self.base_dir)
if not required_files:
logging.warning('Pattern {0} in requiredfiles tag did not match any file.'.format(required_files_tag.text))
self._required_files = self._required_files.union(required_files)
# get requirements
self.requirements = Requirements(rootTag.findall("require"), self.rlimits, config)
self.result_files_pattern = None
resultFilesTags = rootTag.findall("resultfiles")
if resultFilesTags:
if len(resultFilesTags) > 1:
logging.warning("Benchmark file {0} has multiple <resultfiles> tags, ignoring all but the first.")
self.result_files_pattern = resultFilesTags[0].text
# get benchmarks
self.run_sets = []
for (i, rundefinitionTag) in enumerate(rootTag.findall("rundefinition")):
self.run_sets.append(RunSet(rundefinitionTag, self, i+1, globalSourcefilesTags))
if not self.run_sets:
for (i, rundefinitionTag) in enumerate(rootTag.findall("test")):
self.run_sets.append(RunSet(rundefinitionTag, self, i+1, globalSourcefilesTags))
if self.run_sets:
logging.warning("Benchmark file {0} uses deprecated <test> tags. Please rename them to <rundefinition>.".format(benchmark_file))
else:
logging.warning("Benchmark file {0} specifies no runs to execute (no <rundefinition> tags found).".format(benchmark_file))
if not any(runSet.should_be_executed() for runSet in self.run_sets):
logging.warning("No runSet selected, nothing will be executed.")
if config.selected_run_definitions:
logging.warning("The selection {0} does not match any runSet of {1}".format(
str(config.selected_run_definitions),
str([runSet.real_name for runSet in self.run_sets])
))
def required_files(self):
return self._required_files.union(self.tool.program_files(self.executable))
def add_required_file(self, filename=None):
if filename is not None:
self._required_files.add(filename)
def working_directory(self):
return self.tool.working_directory(self.executable)
def environment(self):
return self.tool.environment(self.executable)
@staticmethod
def load_columns(columnsTag):
"""
@param columnsTag: the columnsTag from the XML file
@return: a list of Columns()
"""
logging.debug("I'm loading some columns for the outputfile.")
columns = []
if columnsTag != None: # columnsTag is optional in XML file
for columnTag in columnsTag.findall("column"):
pattern = columnTag.text
title = columnTag.get("title", pattern)
number_of_digits = columnTag.get("numberOfDigits") # digits behind comma
column = Column(pattern, title, number_of_digits)
columns.append(column)
logging.debug('Column "{0}" with title "{1}" loaded from XML file.'
.format(column.text, column.title))
return columns
class RunSet:
"""
The class RunSet manages the import of files and options of a run set.
"""
def __init__(self, rundefinitionTag, benchmark, index, globalSourcefilesTags=[]):
"""
The constructor of RunSet reads run-set name and the source files from rundefinitionTag.
Source files can be included or excluded, and imported from a list of
names in another file. Wildcards and variables are expanded.
@param rundefinitionTag: a rundefinitionTag from the XML file
"""
self.benchmark = benchmark
# get name of run set, name is optional, the result can be "None"
self.real_name = rundefinitionTag.get("name")
# index is the number of the run set
self.index = index
self.log_folder = benchmark.log_folder
if self.real_name:
self.log_folder += self.real_name + "."
# get all run-set-specific options from rundefinitionTag
self.options = benchmark.options + util.get_list_from_xml(rundefinitionTag)
self.propertyfile = util.text_or_none(util.get_single_child_from_xml(rundefinitionTag, PROPERTY_TAG)) or benchmark.propertyfile
# get run-set specific required files
required_files_pattern = set(tag.text for tag in rundefinitionTag.findall('requiredfiles'))
# get all runs, a run contains one sourcefile with options
self.blocks = self.extract_runs_from_xml(
globalSourcefilesTags + rundefinitionTag.findall("tasks") + rundefinitionTag.findall("sourcefiles"),
required_files_pattern)
self.runs = [run for block in self.blocks for run in block.runs]
names = [self.real_name]
if len(self.blocks) == 1:
# there is exactly one source-file set to run, append its name to run-set name
names.append(self.blocks[0].real_name)
self.name = '.'.join(filter(None, names))
self.full_name = self.benchmark.name + (("." + self.name) if self.name else "")
# Currently we store logfiles as "basename.log",
# so we cannot distinguish sourcefiles in different folder with same basename.
# For a 'local benchmark' this causes overriding of logfiles after reading them,
# so the result is correct, only the logfile is gone.
# For 'cloud-mode' the logfile is overridden before reading it,
# so the result will be wrong and every measured value will be missing.
if self.should_be_executed():
sourcefilesSet = set()
for run in self.runs:
base = os.path.basename(run.identifier)
if base in sourcefilesSet:
logging.warning("sourcefile with basename '" + base +
"' appears twice in runset. This could cause problems with equal logfile-names.")
else:
sourcefilesSet.add(base)
del sourcefilesSet
def should_be_executed(self):
return not self.benchmark.config.selected_run_definitions \
or self.real_name in self.benchmark.config.selected_run_definitions
def extract_runs_from_xml(self, sourcefilesTagList, global_required_files_pattern):
'''
This function builds a list of SourcefileSets (containing filename with options).
The files and their options are taken from the list of sourcefilesTags.
'''
# runs are structured as sourcefile sets, one set represents one sourcefiles tag
blocks = []
for index, sourcefilesTag in enumerate(sourcefilesTagList):
sourcefileSetName = sourcefilesTag.get("name")
matchName = sourcefileSetName or str(index)
if self.benchmark.config.selected_sourcefile_sets \
and matchName not in self.benchmark.config.selected_sourcefile_sets:
continue
required_files_pattern = set(tag.text for tag in sourcefilesTag.findall('requiredfiles'))
# get lists of filenames
sourcefiles = self.get_sourcefiles_from_xml(sourcefilesTag, self.benchmark.base_dir)
# get file-specific options for filenames
fileOptions = util.get_list_from_xml(sourcefilesTag)
propertyfile = util.text_or_none(util.get_single_child_from_xml(sourcefilesTag, PROPERTY_TAG))
currentRuns = []
for sourcefile in sourcefiles:
currentRuns.append(Run(sourcefile, fileOptions, self, propertyfile,
global_required_files_pattern.union(required_files_pattern)))
blocks.append(SourcefileSet(sourcefileSetName, index, currentRuns))
return blocks
def get_sourcefiles_from_xml(self, sourcefilesTag, base_dir):
sourcefiles = []
# get included sourcefiles
for includedFiles in sourcefilesTag.findall("include"):
sourcefiles += self.expand_filename_pattern(includedFiles.text, base_dir)
# get sourcefiles from list in file
for includesFilesFile in sourcefilesTag.findall("includesfile"):
for file in self.expand_filename_pattern(includesFilesFile.text, base_dir):
# check for code (if somebody confuses 'include' and 'includesfile')
if util.is_code(file):
logging.error("'" + file + "' seems to contain code instead of a set of source file names.\n" + \
"Please check your benchmark definition file or remove bracket '{' from this file.")
sys.exit()
# read files from list
fileWithList = open(file, 'rt')
for line in fileWithList:
# strip() removes 'newline' behind the line
line = line.strip()
# ignore comments and empty lines
if not util.is_comment(line):
sourcefiles += self.expand_filename_pattern(line, os.path.dirname(file))
fileWithList.close()
# remove excluded sourcefiles
for excludedFiles in sourcefilesTag.findall("exclude"):
excludedFilesList = self.expand_filename_pattern(excludedFiles.text, base_dir)
for excludedFile in excludedFilesList:
sourcefiles = util.remove_all(sourcefiles, excludedFile)
for excludesFilesFile in sourcefilesTag.findall("excludesfile"):
for file in self.expand_filename_pattern(excludesFilesFile.text, base_dir):
# read files from list
fileWithList = open(file, 'rt')
for line in fileWithList:
# strip() removes 'newline' behind the line
line = line.strip()
# ignore comments and empty lines
if not util.is_comment(line):
excludedFilesList = self.expand_filename_pattern(line, os.path.dirname(file))
for excludedFile in excludedFilesList:
sourcefiles = util.remove_all(sourcefiles, excludedFile)
fileWithList.close()
# add runs for cases without source files
for run in sourcefilesTag.findall("withoutfile"):
sourcefiles.append(run.text)
# some runs need more than one sourcefile,
# the first sourcefile is a normal 'include'-file, we use its name as identifier for logfile and result-category
# all other files are 'append'ed.
sourcefilesLists = []
appendFileTags = sourcefilesTag.findall("append")
for sourcefile in sourcefiles:
files = [sourcefile]
for appendFile in appendFileTags:
files.extend(self.expand_filename_pattern(appendFile.text, base_dir, sourcefile=sourcefile))
sourcefilesLists.append(files)
return sourcefilesLists
def expand_filename_pattern(self, pattern, base_dir, sourcefile=None):
"""
The function expand_filename_pattern expands a filename pattern to a sorted list
of filenames. The pattern can contain variables and wildcards.
If base_dir is given and pattern is not absolute, base_dir and pattern are joined.
"""
# replace vars like ${benchmark_path},
# with converting to list and back, we can use the function 'substitute_vars()'
expandedPattern = substitute_vars([pattern], self, sourcefile)
assert len(expandedPattern) == 1
expandedPattern = expandedPattern[0]
if expandedPattern != pattern:
logging.debug("Expanded variables in expression {0} to {1}."
.format(repr(pattern), repr(expandedPattern)))
fileList = util.expand_filename_pattern(expandedPattern, base_dir)
# sort alphabetical,
fileList.sort()
if not fileList:
logging.warning("No files found matching {0}."
.format(repr(pattern)))
return fileList
class SourcefileSet():
"""
A SourcefileSet contains a list of runs and a name.
"""
def __init__(self, name, index, runs):
self.real_name = name # this name is optional
self.name = name or str(index) # this name is always non-empty
self.runs = runs
_logged_missing_property_files = set()
class Run():
"""
A Run contains some sourcefile, some options, propertyfiles and some other stuff, that is needed for the Run.
"""
def __init__(self, sourcefiles, fileOptions, runSet, propertyfile=None, required_files_patterns=[]):
assert sourcefiles
self.identifier = sourcefiles[0] # used for name of logfile, substitution, result-category
self.sourcefiles = util.get_files(sourcefiles) # expand directories to get their sub-files
logging.debug("Creating Run with identifier '{0}' and files {1}".format(self.identifier, self.sourcefiles))
self.runSet = runSet
self.specific_options = fileOptions # options that are specific for this run
self.log_file = runSet.log_folder + os.path.basename(self.identifier) + ".log"
self.required_files = set()
rel_sourcefile = os.path.relpath(self.identifier, runSet.benchmark.base_dir)
for pattern in required_files_patterns:
this_required_files = runSet.expand_filename_pattern(pattern, runSet.benchmark.base_dir, rel_sourcefile)
if not this_required_files:
logging.warning('Pattern {0} in requiredfiles tag did not match any file.'.format(pattern))
self.required_files.update(this_required_files)
self.required_files = list(self.required_files)
# lets reduce memory-consumption: if 2 lists are equal, do not use the second one
self.options = runSet.options + fileOptions if fileOptions else runSet.options # all options to be used when executing this run
substitutedOptions = substitute_vars(self.options, runSet, self.identifier)
if substitutedOptions != self.options: self.options = substitutedOptions # for less memory again
self.propertyfile = propertyfile or runSet.propertyfile
def log_property_file_once(msg):
if not self.propertyfile in _logged_missing_property_files:
_logged_missing_property_files.add(self.propertyfile)
logging.warning(msg)
# replace run-specific stuff in the propertyfile and add it to the set of required files
if self.propertyfile is None:
log_property_file_once('No propertyfile specified. Results for C programs will be handled as UNKNOWN.')
else:
# we check two cases: direct filename or user-defined substitution, one of them must be a 'file'
# TODO: do we need the second case? it is equal to previous used option "-spec ${sourcefile_path}/ALL.prp"
expandedPropertyFiles = util.expand_filename_pattern(self.propertyfile, self.runSet.benchmark.base_dir)
substitutedPropertyfiles = substitute_vars([self.propertyfile], runSet, self.identifier)
assert len(substitutedPropertyfiles) == 1
if expandedPropertyFiles:
if len(expandedPropertyFiles) > 1:
log_property_file_once('Pattern {0} for sourcefile {1} in propertyfile tag matches more than one file. Only {2} will be used.'
.format(self.propertyfile, self.identifier, expandedPropertyFiles[0]))
self.propertyfile = expandedPropertyFiles[0]
elif substitutedPropertyfiles and os.path.isfile(substitutedPropertyfiles[0]):
self.propertyfile = substitutedPropertyfiles[0]
else:
log_property_file_once('Pattern {0} for sourcefile {1} in propertyfile tag did not match any file. It will be ignored.'
.format(self.propertyfile, self.identifier))
self.propertyfile = None
if self.propertyfile:
self.runSet.benchmark.add_required_file(self.propertyfile)
self.properties = result.properties_of_file(self.propertyfile)
else:
self.properties = []
# Copy columns for having own objects in run
# (we need this for storing the results in them).
self.columns = [Column(c.text, c.title, c.number_of_digits) for c in self.runSet.benchmark.columns]
# here we store the optional result values, e.g. memory usage, energy, host name
# keys need to be strings, if first character is "@" the value is marked as hidden (e.g., debug info)
self.values = {}
# dummy values, for output in case of interrupt
self.status = ""
self.cputime = None
self.walltime = None
self.category = result.CATEGORY_UNKNOWN
def cmdline(self):
working_directory = self.runSet.benchmark.tool.working_directory(self.runSet.benchmark.executable)
def relpath(path):
return path if os.path.isabs(path) \
else os.path.relpath(path, working_directory)
executable = relpath(self.runSet.benchmark.executable)
if os.path.sep not in executable:
executable = os.path.join(os.curdir, executable)
args = self.runSet.benchmark.tool.cmdline(
executable, self.options,
list(map(relpath, self.sourcefiles)),
relpath(self.propertyfile) if self.propertyfile else None,
self.runSet.benchmark.rlimits)
args = [os.path.expandvars(arg) for arg in args]
args = [os.path.expanduser(arg) for arg in args]
return args;
def after_execution(self, returnvalue, forceTimeout=False, termination_reason=None):
rlimits = self.runSet.benchmark.rlimits
isTimeout = forceTimeout \
or termination_reason in ['cputime', 'cputime-soft', 'walltime'] \
or self._is_timeout()
# read output
try:
with open(self.log_file, 'rt') as outputFile:
output = outputFile.readlines()
# first 6 lines are for logging, rest is output of subprocess, see runexecutor.py for details
output = output[6:]
except IOError as e:
logging.warning("Cannot read log file: " + e.strerror)
output = []
if returnvalue is not None:
# calculation: returnvalue == (returncode * 256) + returnsignal
# highest bit of returnsignal shows only whether a core file was produced, we clear it
returnsignal = returnvalue & 0x7F
returncode = returnvalue >> 8
logging.debug("My subprocess returned {0}, code {1}, signal {2}.".format(returnvalue, returncode, returnsignal))
self.status = self.runSet.benchmark.tool.determine_result(returncode, returnsignal, output, isTimeout)
self.category = result.get_result_category(self.identifier, self.status, self.properties)
for column in self.columns:
substitutedColumnText = substitute_vars([column.text], self.runSet, self.sourcefiles[0])[0]
column.value = self.runSet.benchmark.tool.get_value_from_output(output, substitutedColumnText)
# Tools sometimes produce a result even after a timeout.
# This should not be counted, so we overwrite the result with TIMEOUT
# here. if this is the case.
# However, we don't want to forget more specific results like SEGFAULT,
# so we do this only if the result is a "normal" one like TRUE.
if self.status in result.RESULT_LIST and isTimeout:
self.status = "TIMEOUT"
self.category = result.CATEGORY_ERROR
# TODO probably this is not necessary anymore
guessed_OOM = returnvalue is not None \
and returnsignal == 9 \
and MEMLIMIT in rlimits \
and 'memUsage' in self.values \
and not self.values['memUsage'] is None \
and int(self.values['memUsage']) >= (rlimits[MEMLIMIT] * _BYTE_FACTOR * _BYTE_FACTOR * 0.99)
if termination_reason == 'memory' or guessed_OOM:
self.status = 'OUT OF MEMORY'
self.category = result.CATEGORY_ERROR
def _is_timeout(self):
''' try to find out whether the tool terminated because of a timeout '''
if self.cputime is None:
return False
rlimits = self.runSet.benchmark.rlimits
if SOFTTIMELIMIT in rlimits:
limit = rlimits[SOFTTIMELIMIT]
elif TIMELIMIT in rlimits:
limit = rlimits[TIMELIMIT]
else:
limit = float('inf')
return self.cputime > limit
class Column:
"""
The class Column contains text, title and number_of_digits of a column.
"""
def __init__(self, text, title, numOfDigits):
self.text = text
self.title = title
self.number_of_digits = numOfDigits
self.value = ""
class Requirements:
'''
This class wrappes the values for the requirements.
It parses the tags from XML to get those values.
If no values are found, at least the limits are used as requirements.
If the user gives a cpu_model in the config, it overrides the previous cpu_model.
'''
def __init__(self, tags, rlimits, config):
self.cpu_model = None
self.memory = None
self.cpu_cores = None
for requireTag in tags:
cpu_model = requireTag.get('cpuModel', None)
if self.cpu_model is None:
self.cpu_model = cpu_model
else:
raise Exception('Double specification of required CPU model.')
cpu_cores = requireTag.get('cpuCores', None)
if self.cpu_cores is None:
if cpu_cores is not None: self.cpu_cores = int(cpu_cores)
else:
raise Exception('Double specification of required CPU cores.')
memory = requireTag.get('memory', None)
if self.memory is None:
if memory is not None: self.memory = int(memory)
else:
raise Exception('Double specification of required memory.')
# TODO check, if we have enough requirements to reach the limits
# TODO is this really enough? we need some overhead!
if self.cpu_cores is None:
self.cpu_cores = rlimits.get(CORELIMIT, None)
if self.memory is None:
self.memory = rlimits.get(MEMLIMIT, None)
if hasattr(config, 'cpu_model') and config.cpu_model is not None:
# user-given model -> override value
self.cpu_model = config.cpu_model
if self.cpu_cores is not None and self.cpu_cores <= 0:
raise Exception('Invalid value {} for required CPU cores.'.format(self.cpu_cores))
if self.memory is not None and self.memory <= 0:
raise Exception('Invalid value {} for required memory.'.format(self.memory))
def __str__(self):
s = ""
if self.cpu_model:
s += " CPU='" + self.cpu_model + "'"
if self.cpu_cores:
s += " Cores=" + str(self.cpu_cores)
if self.memory:
s += " Memory=" + str(self.memory) + "MB"
return "Requirements:" + (s if s else " None")
|
{
"content_hash": "3f8e9b56a3ab2f7d703c9f5d030ffc27",
"timestamp": "",
"source": "github",
"line_count": 727,
"max_line_length": 146,
"avg_line_length": 43.59422283356258,
"alnum_prop": 0.623102893383397,
"repo_name": "bjowac/impara-benchexec",
"id": "8d99149786c1da652d05f2e7dacc4556fa998ff0",
"size": "31693",
"binary": false,
"copies": "2",
"ref": "refs/heads/impara-benchexec",
"path": "benchexec/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gnuplot",
"bytes": "3902"
},
{
"name": "HTML",
"bytes": "52015"
},
{
"name": "Python",
"bytes": "401638"
},
{
"name": "Shell",
"bytes": "668"
}
],
"symlink_target": ""
}
|
"""
This module offers a modified interface for MCMC inference with the following objectives:
- making MCMC independent of Pyro specific trace data structure, to facilitate
integration with other PyTorch based libraries.
- bringing the interface closer to that of NumPyro to make it easier to write
code that works with different backends.
- minimal memory consumption with multiprocessing and CUDA.
"""
import copy
import json
import logging
import queue
import signal
import threading
import warnings
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Dict
import torch
import torch.multiprocessing as mp
import pyro
import pyro.poutine as poutine
from pyro.infer.mcmc.hmc import HMC
from pyro.infer.mcmc.logger import (
DIAGNOSTIC_MSG,
ProgressBar,
TqdmHandler,
initialize_logger,
)
from pyro.infer.mcmc.nuts import NUTS
from pyro.infer.mcmc.util import (
diagnostics,
diagnostics_from_stats,
print_summary,
select_samples,
)
from pyro.ops.streaming import CountMeanVarianceStats, StatsOfDict, StreamingStats
from pyro.util import optional
MAX_SEED = 2 ** 32 - 1
def logger_thread(
log_queue, warmup_steps, num_samples, num_chains, disable_progbar=False
):
"""
Logging thread that asynchronously consumes logging events from `log_queue`,
and handles them appropriately.
"""
progress_bars = ProgressBar(
warmup_steps, num_samples, disable=disable_progbar, num_bars=num_chains
)
logger = logging.getLogger(__name__)
logger.propagate = False
logger.addHandler(TqdmHandler())
num_samples = [0] * num_chains
try:
while True:
try:
record = log_queue.get(timeout=1)
except queue.Empty:
continue
if record is None:
break
metadata, msg = record.getMessage().split("]", 1)
_, msg_type, logger_id = metadata[1:].split()
if msg_type == DIAGNOSTIC_MSG:
pbar_pos = int(logger_id.split(":")[-1])
num_samples[pbar_pos] += 1
if num_samples[pbar_pos] == warmup_steps:
progress_bars.set_description(
"Sample [{}]".format(pbar_pos + 1), pos=pbar_pos
)
diagnostics = json.loads(msg, object_pairs_hook=OrderedDict)
progress_bars.set_postfix(diagnostics, pos=pbar_pos, refresh=False)
progress_bars.update(pos=pbar_pos)
else:
logger.handle(record)
finally:
progress_bars.close()
class _Worker:
def __init__(
self,
chain_id,
result_queue,
log_queue,
event,
kernel,
num_samples,
warmup_steps,
initial_params=None,
hook=None,
):
self.chain_id = chain_id
self.kernel = kernel
if initial_params is not None:
self.kernel.initial_params = initial_params
self.num_samples = num_samples
self.warmup_steps = warmup_steps
self.rng_seed = (torch.initial_seed() + chain_id) % MAX_SEED
self.log_queue = log_queue
self.result_queue = result_queue
self.default_tensor_type = torch.Tensor().type()
self.hook = hook
self.event = event
def run(self, *args, **kwargs):
pyro.set_rng_seed(self.rng_seed)
torch.set_default_tensor_type(self.default_tensor_type)
kwargs = kwargs
logger = logging.getLogger("pyro.infer.mcmc")
logger_id = "CHAIN:{}".format(self.chain_id)
log_queue = self.log_queue
logger = initialize_logger(logger, logger_id, None, log_queue)
logging_hook = _add_logging_hook(logger, None, self.hook)
try:
for sample in _gen_samples(
self.kernel,
self.warmup_steps,
self.num_samples,
logging_hook,
None,
*args,
**kwargs
):
self.result_queue.put_nowait((self.chain_id, sample))
self.event.wait()
self.event.clear()
self.result_queue.put_nowait((self.chain_id, None))
except Exception as e:
logger.exception(e)
self.result_queue.put_nowait((self.chain_id, e))
def _gen_samples(kernel, warmup_steps, num_samples, hook, chain_id, *args, **kwargs):
kernel.setup(warmup_steps, *args, **kwargs)
params = kernel.initial_params
save_params = getattr(kernel, "save_params", sorted(params))
# yield structure (key, value.shape) of params
yield {name: params[name].shape for name in save_params}
for i in range(warmup_steps):
params = kernel.sample(params)
hook(
kernel,
params,
"Warmup [{}]".format(chain_id) if chain_id is not None else "Warmup",
i,
)
for i in range(num_samples):
params = kernel.sample(params)
hook(
kernel,
params,
"Sample [{}]".format(chain_id) if chain_id is not None else "Sample",
i,
)
flat = [params[name].reshape(-1) for name in save_params]
yield (torch.cat if flat else torch.tensor)(flat)
yield kernel.diagnostics()
kernel.cleanup()
def _add_logging_hook(logger, progress_bar=None, hook=None):
def _add_logging(kernel, params, stage, i):
diagnostics = json.dumps(kernel.logging())
logger.info(diagnostics, extra={"msg_type": DIAGNOSTIC_MSG})
if progress_bar:
progress_bar.set_description(stage, refresh=False)
if hook:
hook(kernel, params, stage, i)
return _add_logging
class _UnarySampler:
"""
Single process runner class optimized for the case chains are drawn sequentially.
"""
def __init__(
self,
kernel,
num_samples,
warmup_steps,
num_chains,
disable_progbar,
initial_params=None,
hook=None,
):
self.kernel = kernel
self.initial_params = initial_params
self.warmup_steps = warmup_steps
self.num_samples = num_samples
self.num_chains = num_chains
self.logger = None
self.disable_progbar = disable_progbar
self.hook = hook
super().__init__()
def terminate(self, *args, **kwargs):
pass
def run(self, *args, **kwargs):
logger = logging.getLogger("pyro.infer.mcmc")
for i in range(self.num_chains):
if self.initial_params is not None:
initial_params = {k: v[i] for k, v in self.initial_params.items()}
self.kernel.initial_params = initial_params
progress_bar = ProgressBar(
self.warmup_steps, self.num_samples, disable=self.disable_progbar
)
logger = initialize_logger(logger, "", progress_bar)
hook_w_logging = _add_logging_hook(logger, progress_bar, self.hook)
for sample in _gen_samples(
self.kernel,
self.warmup_steps,
self.num_samples,
hook_w_logging,
i if self.num_chains > 1 else None,
*args,
**kwargs
):
yield sample, i # sample, chain_id
self.kernel.cleanup()
progress_bar.close()
class _MultiSampler:
"""
Parallel runner class for running MCMC chains in parallel. This uses the
`torch.multiprocessing` module (itself a light wrapper over the python
`multiprocessing` module) to spin up parallel workers.
"""
def __init__(
self,
kernel,
num_samples,
warmup_steps,
num_chains,
mp_context,
disable_progbar,
initial_params=None,
hook=None,
):
self.kernel = kernel
self.warmup_steps = warmup_steps
self.num_chains = num_chains
self.hook = hook
self.workers = []
self.ctx = mp
if mp_context:
self.ctx = mp.get_context(mp_context)
self.result_queue = self.ctx.Queue()
self.log_queue = self.ctx.Queue()
self.logger = initialize_logger(
logging.getLogger("pyro.infer.mcmc"), "MAIN", log_queue=self.log_queue
)
self.num_samples = num_samples
self.initial_params = initial_params
self.log_thread = threading.Thread(
target=logger_thread,
args=(
self.log_queue,
self.warmup_steps,
self.num_samples,
self.num_chains,
disable_progbar,
),
)
self.log_thread.daemon = True
self.log_thread.start()
self.events = [self.ctx.Event() for _ in range(num_chains)]
def init_workers(self, *args, **kwargs):
self.workers = []
for i in range(self.num_chains):
init_params = (
{k: v[i] for k, v in self.initial_params.items()}
if self.initial_params is not None
else None
)
worker = _Worker(
i,
self.result_queue,
self.log_queue,
self.events[i],
self.kernel,
self.num_samples,
self.warmup_steps,
initial_params=init_params,
hook=self.hook,
)
worker.daemon = True
self.workers.append(
self.ctx.Process(
name=str(i), target=worker.run, args=args, kwargs=kwargs
)
)
def terminate(self, terminate_workers=False):
if self.log_thread.is_alive():
self.log_queue.put_nowait(None)
self.log_thread.join(timeout=1)
# Only kill workers if exception is raised. worker processes are daemon
# processes that will otherwise be terminated with the main process.
# Note that it is important to not
if terminate_workers:
for w in self.workers:
if w.is_alive():
w.terminate()
def run(self, *args, **kwargs):
# Ignore sigint in worker processes; they will be shut down
# when the main process terminates.
sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
self.init_workers(*args, **kwargs)
# restore original handler
signal.signal(signal.SIGINT, sigint_handler)
active_workers = self.num_chains
exc_raised = True
try:
for w in self.workers:
w.start()
while active_workers:
try:
chain_id, val = self.result_queue.get(timeout=5)
except queue.Empty:
continue
if isinstance(val, Exception):
# Exception trace is already logged by worker.
raise val
if val is not None:
yield val, chain_id
self.events[chain_id].set()
else:
active_workers -= 1
exc_raised = False
finally:
self.terminate(terminate_workers=exc_raised)
class AbstractMCMC(ABC):
"""
Base class for MCMC methods.
"""
def __init__(self, kernel, num_chains, transforms):
self.kernel = kernel
self.num_chains = num_chains
self.transforms = transforms
@abstractmethod
def run(self, *args, **kwargs):
raise NotImplementedError
@abstractmethod
def diagnostics(self):
raise NotImplementedError
def _set_transforms(self, *args, **kwargs):
# Use `kernel.transforms` when available
if getattr(self.kernel, "transforms", None) is not None:
self.transforms = self.kernel.transforms
# Else, get transforms from model (e.g. in multiprocessing).
elif self.kernel.model:
warmup_steps = 0
self.kernel.setup(warmup_steps, *args, **kwargs)
self.transforms = self.kernel.transforms
# Assign default value
else:
self.transforms = {}
def _validate_kernel(self, initial_params):
if (
isinstance(self.kernel, (HMC, NUTS))
and self.kernel.potential_fn is not None
):
if initial_params is None:
raise ValueError(
"Must provide valid initial parameters to begin sampling"
" when using `potential_fn` in HMC/NUTS kernel."
)
def _validate_initial_params(self, initial_params):
for v in initial_params.values():
if v.shape[0] != self.num_chains:
raise ValueError(
"The leading dimension of tensors in `initial_params` "
"must match the number of chains."
)
class MCMC(AbstractMCMC):
"""
Wrapper class for Markov Chain Monte Carlo algorithms. Specific MCMC algorithms
are TraceKernel instances and need to be supplied as a ``kernel`` argument
to the constructor.
.. note:: The case of `num_chains > 1` uses python multiprocessing to
run parallel chains in multiple processes. This goes with the usual
caveats around multiprocessing in python, e.g. the model used to
initialize the ``kernel`` must be serializable via `pickle`, and the
performance / constraints will be platform dependent (e.g. only
the "spawn" context is available in Windows). This has also not
been extensively tested on the Windows platform.
:param kernel: An instance of the ``TraceKernel`` class, which when
given an execution trace returns another sample trace from the target
(posterior) distribution.
:param int num_samples: The number of samples that need to be generated,
excluding the samples discarded during the warmup phase.
:param int warmup_steps: Number of warmup iterations. The samples generated
during the warmup phase are discarded. If not provided, default is
is the same as `num_samples`.
:param int num_chains: Number of MCMC chains to run in parallel. Depending on
whether `num_chains` is 1 or more than 1, this class internally dispatches
to either `_UnarySampler` or `_MultiSampler`.
:param dict initial_params: dict containing initial tensors in unconstrained
space to initiate the markov chain. The leading dimension's size must match
that of `num_chains`. If not specified, parameter values will be sampled from
the prior.
:param hook_fn: Python callable that takes in `(kernel, samples, stage, i)`
as arguments. stage is either `sample` or `warmup` and i refers to the
i'th sample for the given stage. This can be used to implement additional
logging, or more generally, run arbitrary code per generated sample.
:param str mp_context: Multiprocessing context to use when `num_chains > 1`.
Only applicable for Python 3.5 and above. Use `mp_context="spawn"` for
CUDA.
:param bool disable_progbar: Disable progress bar and diagnostics update.
:param bool disable_validation: Disables distribution validation check.
Defaults to ``True``, disabling validation, since divergent transitions
will lead to exceptions. Switch to ``False`` to enable validation, or
to ``None`` to preserve existing global values.
:param dict transforms: dictionary that specifies a transform for a sample site
with constrained support to unconstrained space.
:param List[str] save_params: Optional list of a subset of parameter names to
save during sampling and diagnostics. This is useful in models with
large nuisance variables. Defaults to None, saving all params.
"""
def __init__(
self,
kernel,
num_samples,
warmup_steps=None,
initial_params=None,
num_chains=1,
hook_fn=None,
mp_context=None,
disable_progbar=False,
disable_validation=True,
transforms=None,
save_params=None,
):
super().__init__(kernel, num_chains, transforms)
self.warmup_steps = (
num_samples if warmup_steps is None else warmup_steps
) # Stan
self.num_samples = num_samples
self.disable_validation = disable_validation
self._samples = None
self._args = None
self._kwargs = None
if save_params is not None:
kernel.save_params = save_params
self._validate_kernel(initial_params)
parallel = False
if num_chains > 1:
# check that initial_params is different for each chain
if initial_params:
self._validate_initial_params(initial_params)
# FIXME: probably we want to use "spawn" method by default to avoid the error
# CUDA initialization error https://github.com/pytorch/pytorch/issues/2517
# even that we run MCMC in CPU.
if mp_context is None:
# change multiprocessing context to 'spawn' for CUDA tensors.
if list(initial_params.values())[0].is_cuda:
mp_context = "spawn"
# verify num_chains is compatible with available CPU.
available_cpu = max(
mp.cpu_count() - 1, 1
) # reserving 1 for the main process.
if num_chains <= available_cpu:
parallel = True
else:
warnings.warn(
"num_chains={} is more than available_cpu={}. "
"Chains will be drawn sequentially.".format(
num_chains, available_cpu
)
)
else:
if initial_params:
initial_params = {k: v.unsqueeze(0) for k, v in initial_params.items()}
self._diagnostics = [None] * num_chains
if parallel:
self.sampler = _MultiSampler(
kernel,
num_samples,
self.warmup_steps,
num_chains,
mp_context,
disable_progbar,
initial_params=initial_params,
hook=hook_fn,
)
else:
self.sampler = _UnarySampler(
kernel,
num_samples,
self.warmup_steps,
num_chains,
disable_progbar,
initial_params=initial_params,
hook=hook_fn,
)
@poutine.block
def run(self, *args, **kwargs):
"""
Run MCMC to generate samples and populate `self._samples`.
Example usage:
.. code-block:: python
def model(data):
...
nuts_kernel = NUTS(model)
mcmc = MCMC(nuts_kernel, num_samples=500)
mcmc.run(data)
samples = mcmc.get_samples()
:param args: optional arguments taken by
:meth:`MCMCKernel.setup <pyro.infer.mcmc.mcmc_kernel.MCMCKernel.setup>`.
:param kwargs: optional keywords arguments taken by
:meth:`MCMCKernel.setup <pyro.infer.mcmc.mcmc_kernel.MCMCKernel.setup>`.
"""
self._args, self._kwargs = args, kwargs
num_samples = [0] * self.num_chains
z_flat_acc = [[] for _ in range(self.num_chains)]
with optional(
pyro.validation_enabled(not self.disable_validation),
self.disable_validation is not None,
):
# XXX we clone CUDA tensor args to resolve the issue "Invalid device pointer"
# at https://github.com/pytorch/pytorch/issues/10375
# This also resolves "RuntimeError: Cowardly refusing to serialize non-leaf tensor which
# requires_grad", which happens with `jit_compile` under PyTorch 1.7
args = [arg.detach() if torch.is_tensor(arg) else arg for arg in args]
for x, chain_id in self.sampler.run(*args, **kwargs):
if num_samples[chain_id] == 0:
num_samples[chain_id] += 1
z_structure = x
elif num_samples[chain_id] == self.num_samples + 1:
self._diagnostics[chain_id] = x
else:
num_samples[chain_id] += 1
if self.num_chains > 1:
x_cloned = x.clone()
del x
else:
x_cloned = x
z_flat_acc[chain_id].append(x_cloned)
z_flat_acc = torch.stack([torch.stack(l) for l in z_flat_acc])
# unpack latent
pos = 0
z_acc = z_structure.copy()
for k in sorted(z_structure):
shape = z_structure[k]
next_pos = pos + shape.numel()
z_acc[k] = z_flat_acc[:, :, pos:next_pos].reshape(
(self.num_chains, self.num_samples) + shape
)
pos = next_pos
assert pos == z_flat_acc.shape[-1]
# If transforms is not explicitly provided, infer automatically using
# model args, kwargs.
if self.transforms is None:
self._set_transforms(*args, **kwargs)
# transform samples back to constrained space
for name, z in z_acc.items():
if name in self.transforms:
z_acc[name] = self.transforms[name].inv(z)
self._samples = z_acc
# terminate the sampler (shut down worker processes)
self.sampler.terminate(True)
def get_samples(self, num_samples=None, group_by_chain=False):
"""
Get samples from the MCMC run, potentially resampling with replacement.
For parameter details see: :meth:`select_samples <pyro.infer.mcmc.util.select_samples>`.
"""
samples = self._samples
return select_samples(samples, num_samples, group_by_chain)
def diagnostics(self):
"""
Gets some diagnostics statistics such as effective sample size, split
Gelman-Rubin, or divergent transitions from the sampler.
"""
diag = diagnostics(self._samples)
for diag_name in self._diagnostics[0]:
diag[diag_name] = {
"chain {}".format(i): self._diagnostics[i][diag_name]
for i in range(self.num_chains)
}
return diag
def summary(self, prob=0.9):
"""
Prints a summary table displaying diagnostics of samples obtained from
posterior. The diagnostics displayed are mean, standard deviation, median,
the 90% Credibility Interval, :func:`~pyro.ops.stats.effective_sample_size`,
:func:`~pyro.ops.stats.split_gelman_rubin`.
:param float prob: the probability mass of samples within the credibility interval.
"""
print_summary(self._samples, prob=prob)
if "divergences" in self._diagnostics[0]:
print(
"Number of divergences: {}".format(
sum(
[
len(self._diagnostics[i]["divergences"])
for i in range(self.num_chains)
]
)
)
)
class StreamingMCMC(AbstractMCMC):
"""
MCMC that computes required statistics in a streaming fashion. For this class no samples are retained
but only aggregated statistics. This is useful for running memory expensive models where we care only
about specific statistics (especially useful in a memory constrained environments like GPU).
For available streaming ops please see :mod:`~pyro.ops.streaming`.
"""
def __init__(
self,
kernel,
num_samples,
warmup_steps=None,
initial_params=None,
statistics=None,
num_chains=1,
hook_fn=None,
disable_progbar=False,
disable_validation=True,
transforms=None,
save_params=None,
):
super().__init__(kernel, num_chains, transforms)
self.warmup_steps = (
num_samples if warmup_steps is None else warmup_steps
) # Stan
self.num_samples = num_samples
self.disable_validation = disable_validation
self._samples = None
self._args = None
self._kwargs = None
if statistics is None:
statistics = StatsOfDict(default=CountMeanVarianceStats)
self._statistics = statistics
self._default_statistics = copy.deepcopy(statistics)
if save_params is not None:
kernel.save_params = save_params
self._validate_kernel(initial_params)
if num_chains > 1:
if initial_params:
self._validate_initial_params(initial_params)
else:
if initial_params:
initial_params = {k: v.unsqueeze(0) for k, v in initial_params.items()}
self._diagnostics = [None] * num_chains
self.sampler = _UnarySampler(
kernel,
num_samples,
self.warmup_steps,
num_chains,
disable_progbar,
initial_params=initial_params,
hook=hook_fn,
)
@poutine.block
def run(self, *args, **kwargs):
"""
Run StreamingMCMC to compute required `self._statistics`.
"""
self._args, self._kwargs = args, kwargs
num_samples = [0] * self.num_chains
with optional(
pyro.validation_enabled(not self.disable_validation),
self.disable_validation is not None,
):
args = [arg.detach() if torch.is_tensor(arg) else arg for arg in args]
for x, chain_id in self.sampler.run(*args, **kwargs):
if num_samples[chain_id] == 0:
# If transforms is not explicitly provided, infer automatically using
# model args, kwargs.
if self.transforms is None:
self._set_transforms(*args, **kwargs)
num_samples[chain_id] += 1
z_structure = x
elif num_samples[chain_id] == self.num_samples + 1:
self._diagnostics[chain_id] = x
else:
num_samples[chain_id] += 1
if self.num_chains > 1:
x_cloned = x.clone()
del x
else:
x_cloned = x
# unpack latent
pos = 0
z_acc = z_structure.copy()
for k in sorted(z_structure):
shape = z_structure[k]
next_pos = pos + shape.numel()
z_acc[k] = x_cloned[pos:next_pos].reshape(shape)
pos = next_pos
for name, z in z_acc.items():
if name in self.transforms:
z_acc[name] = self.transforms[name].inv(z)
self._statistics.update(
{
(chain_id, name): transformed_sample
for name, transformed_sample in z_acc.items()
}
)
# terminate the sampler (shut down worker processes)
self.sampler.terminate(True)
def get_statistics(self, group_by_chain=True):
"""
Returns a dict of statistics defined by those passed to the class constructor.
:param bool group_by_chain: Whether statistics should be chain-wise or merged together.
"""
if group_by_chain:
return self._statistics.get()
else:
# merge all chains with respect to names
merged_dict: Dict[str, StreamingStats] = {}
for (_, name), stat in self._statistics.stats.items():
if name in merged_dict:
merged_dict[name] = merged_dict[name].merge(stat)
else:
merged_dict[name] = stat
return {k: v.get() for k, v in merged_dict.items()}
def diagnostics(self):
"""
Gets diagnostics. Currently a split Gelman-Rubin is only supported and requires
'mean' and 'variance' streaming statistics to be present.
"""
statistics = self._statistics.get()
diag = diagnostics_from_stats(statistics, self.num_samples, self.num_chains)
for diag_name in self._diagnostics[0]:
diag[diag_name] = {
"chain {}".format(i): self._diagnostics[i][diag_name]
for i in range(self.num_chains)
}
return diag
|
{
"content_hash": "874c98e1b02570c6322c5fe4c9dabcaa",
"timestamp": "",
"source": "github",
"line_count": 789,
"max_line_length": 105,
"avg_line_length": 36.8871989860583,
"alnum_prop": 0.5619846069268829,
"repo_name": "uber/pyro",
"id": "2e6a00288b6bb422446eac4437646554a3cb237c",
"size": "29193",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "pyro/infer/mcmc/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "6121"
},
{
"name": "CSS",
"bytes": "478"
},
{
"name": "Dockerfile",
"bytes": "1635"
},
{
"name": "Makefile",
"bytes": "6857"
},
{
"name": "Python",
"bytes": "3388193"
},
{
"name": "Shell",
"bytes": "6465"
},
{
"name": "TeX",
"bytes": "3649"
}
],
"symlink_target": ""
}
|
from slick.utils.core import get_client
from slick.utils.html import strip_tags
from slick.utils.nested_dict import lookup, multikeysort
from SoftLayer import HardwareManager, SoftLayerAPIError
def all_servers(hw_filter=None):
systems = []
if not hw_filter:
hw_filter = {}
for hw in get_hardware_manager().list_hardware(**hw_filter):
systems.append(_extract_hw_data(hw))
return systems
#@memoized
def get_available_monthly_server_packages(username=None):
packages = get_hardware_manager().get_available_dedicated_server_packages()
categories = {}
for package in packages:
category = strip_tags(package[2])
if not category:
category = 'General'
if category not in categories:
categories[category] = []
categories[category].append((package[0], package[1]))
return categories
def change_port_speed(hw_id, nic, speed):
public = True
if 'eth0' == nic:
public = False
try:
get_hardware_manager().change_port_speed(hw_id, public, speed)
success = True
message = "Port speed changed. It may take up to a minute for this " \
"to take effect"
except SoftLayerAPIError as exception:
success = False
message = str(exception)
return (success, message)
#@memoized
def get_hourly_create_options(username):
results = get_hardware_manager().get_bare_metal_create_options()
# Sort locations by their long name
results['locations'] = sorted(results['locations'],
key=lambda x: x['long_name'])
# Sort items within each category by the sort key, then the capacity key
for k, v in results['categories'].items():
items = multikeysort(v['items'], ['sort', 'capacity'])
results['categories'][k]['items'] = items
# Deleting the 'other' category since we don't need it
if results['categories'].get('other'):
del(results['categories']['other'])
return results
#@memoized
def get_monthly_create_options(username, package_id):
mgr = get_hardware_manager()
results = mgr.get_dedicated_server_create_options(package_id)
package = get_client()['Product_Package'].getObject(id=package_id,
mask="mask[id,name]")
results['package_id'] = package_id
results['package_name'] = package['name']
# Sort locations by their long name
results['locations'] = sorted(results['locations'],
key=lambda x: x['long_name'])
groups = {}
# Sort items within each category by the sort key, then the capacity key
for k, v in results['categories'].items():
group = v['group'] or 'Miscellaneous'
if group not in groups:
groups[group] = {}
items = multikeysort(v['items'], ['sort', 'capacity', 'recurring_fee'])
v['items'] = items
groups[group][k] = v
#results['categories'][k]['items'] = items
results['groups'] = groups
return results
def get_hardware_manager():
return HardwareManager(get_client())
def get_server(server_id, full_data=False):
try:
server = get_hardware_manager().get_hardware(server_id)
except SoftLayerAPIError:
return None
if not full_data:
return _extract_hw_data(server)
return server
def reboot_server(server_id, soft=True):
""" Provides a single interface function for rebooting a server.
:param int server_id: The ID of the server to reboot.
:param bool soft: Flag to determine if this should be a soft or hard
reboot. [Default: true (soft)]
"""
try:
vg = get_client()['Hardware_Server']
if soft:
vg.rebootSoft(id=server_id)
else:
vg.rebootHard(id=server_id)
success = True
message = 'Reboot request sent to instance.'
except SoftLayerAPIError as exception:
success = False
message = str(exception)
return (success, message)
def reload_server(server_id):
""" Wrapper for the HardwareManager's reload() call.
:param int server_id: The ID of the server to reload.
"""
try:
get_hardware_manager().reload(server_id)
success = True
message = 'Reload request issued. You will receive an email when ' \
'the reload is complete.'
except SoftLayerAPIError as exception:
success = False
message = str(exception)
return (success, message)
def place_order(**kwargs):
try:
get_hardware_manager().place_order(**kwargs)
success = True
message = 'Order placed successfully. Check your email for more ' \
'information'
except SoftLayerAPIError as exception:
success = False
message = str(exception)
return (success, message)
def verify_order(**kwargs):
return get_hardware_manager().verify_order(**kwargs)
def _extract_hw_data(hw):
return_data = {
'id': hw.get('id', None),
'hostname': hw.get('hostname'),
'domain': hw.get('domain'),
'fqdn': hw.get('fullyQualifiedDomainName', None),
'datacenter': hw.get('datacenter', {}).get('name', None),
'public': hw.get('primaryIpAddress', None),
'private': hw.get('primaryBackendIpAddress', None),
'cpu': hw.get('processorCoreAmount', None),
'memory': hw.get('memoryCapacity', None),
}
if hw.get('activeTransaction'):
active = False
# print hw['activeTransaction']
status = lookup(hw, 'activeTransaction', 'transactionStatus',
'friendlyName')
if not status:
status = 'Unknown status'
else:
active = True
status = 'Running'
return_data['active'] = active
return_data['status'] = status
# status_id = hw.get('hardwareStatusId')
# if status_id == 5:
# return_data['active'] = True
# else:
# return_data['active'] = False
os_block = lookup(hw, 'operatingSystem', 'softwareLicense',
'softwareDescription')
if os_block:
return_data['os'] = os_block['name'] + ' ' + os_block['version']
if lookup(hw, 'operatingSystem', 'passwords'):
usernames = []
for username in lookup(hw, 'operatingSystem', 'passwords'):
usernames.append(username['username'])
return_data['usernames'] = usernames
if hw.get('networkComponents'):
network = []
for comp in hw.get('networkComponents'):
net = {'status': comp['status'],
'speed': comp['speed'],
'maxSpeed': comp['maxSpeed'],
'name': comp['name'],
'port': comp.get('port'),
}
if comp.get('macAddress'):
net['mac'] = comp.get('macAddress')
elif comp.get('ipmiMacAddress'):
net['mac'] = comp.get('ipmiMacAddress')
if comp.get('primaryIpAddress'):
net['ip'] = comp.get('primaryIpAddress')
elif comp.get('ipmiIpAddress'):
net['ip'] = comp.get('ipmiIpAddress')
if comp.get('primarySubnet'):
subnet = {
'netmask': lookup(comp, 'primarySubnet', 'netmask'),
'broadcast': lookup(comp, 'primarySubnet',
'broadcastAddress'),
'gateway': lookup(comp, 'primarySubnet', 'gateway'),
'network_identifier': lookup(comp, 'primarySubnet',
'networkIdentifier'),
}
net['subnet'] = subnet
network.append(net)
return_data['network'] = network
return return_data
|
{
"content_hash": "ff6f4ab71e61890f9df743a36612def7",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 79,
"avg_line_length": 30.28735632183908,
"alnum_prop": 0.5774826059456041,
"repo_name": "softlayer/slick",
"id": "ad3b71d17add36d71c39c21db176cff7f2b9ddb6",
"size": "7905",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "slick/blueprints/servers/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7930"
},
{
"name": "HTML",
"bytes": "78502"
},
{
"name": "JavaScript",
"bytes": "28929"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Nginx",
"bytes": "1059"
},
{
"name": "Python",
"bytes": "116400"
}
],
"symlink_target": ""
}
|
"""passlib.utils.compat._ordered_dict -- backport of collections.OrderedDict for py26
taken from stdlib-suggested recipe at http://code.activestate.com/recipes/576693/
this should be imported from passlib.utils.compat.OrderedDict, not here.
"""
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
class OrderedDict(dict):
"""Dictionary that remembers insertion order"""
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
|
{
"content_hash": "3c592b8883138ca493733b97e8e1ecdf",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 87,
"avg_line_length": 34.578512396694215,
"alnum_prop": 0.5424235181644359,
"repo_name": "morreene/tradenews",
"id": "cfd766db3aed5b85c4811c0e6560d988b70e8409",
"size": "8368",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/passlib/utils/compat/_ordered_dict.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1572055"
},
{
"name": "HTML",
"bytes": "464815"
},
{
"name": "JavaScript",
"bytes": "5197624"
},
{
"name": "PHP",
"bytes": "8415"
},
{
"name": "Python",
"bytes": "50512"
},
{
"name": "Shell",
"bytes": "110"
}
],
"symlink_target": ""
}
|
import os
import unittest
from textwrap import dedent
from mock import Mock
from parameterized.parameterized import parameterized
from conans.client.graph.python_requires import ConanPythonRequire
from conans.client.loader import ConanFileLoader
from conans.test.utils.test_files import temp_folder
from conans.test.utils.tools import TestClient, create_profile
from conans.util.files import save, load
base_conanfile = '''
from conans import ConanFile
from conans.tools import patch, replace_in_file
import os
class ConanFileToolsTest(ConanFile):
name = "test"
version = "1.9.10"
'''
class ToolsFilesPatchTest(unittest.TestCase):
@parameterized.expand([(0, ), (1, )])
def test_patch_from_file(self, strip):
if strip:
file_content = base_conanfile + '''
def build(self):
patch(patch_file="file.patch", strip=%s)
''' % strip
patch_content = '''--- %s/text.txt\t2016-01-25 17:57:11.452848309 +0100
+++ %s/text_new.txt\t2016-01-25 17:57:28.839869950 +0100
@@ -1 +1 @@
-ONE TWO THREE
+ONE TWO FOUR''' % ("old_path", "new_path")
else:
file_content = base_conanfile + '''
def build(self):
patch(patch_file="file.patch")
'''
patch_content = '''--- text.txt\t2016-01-25 17:57:11.452848309 +0100
+++ text_new.txt\t2016-01-25 17:57:28.839869950 +0100
@@ -1 +1 @@
-ONE TWO THREE
+ONE TWO FOUR'''
tmp_dir, file_path, text_file = self._save_files(file_content)
patch_file = os.path.join(tmp_dir, "file.patch")
save(patch_file, patch_content)
self._build_and_check(tmp_dir, file_path, text_file, "ONE TWO FOUR")
def test_patch_from_str(self):
file_content = base_conanfile + '''
def build(self):
patch_content = \'''--- text.txt\t2016-01-25 17:57:11.452848309 +0100
+++ text_new.txt\t2016-01-25 17:57:28.839869950 +0100
@@ -1 +1 @@
-ONE TWO THREE
+ONE TWO DOH!\'''
patch(patch_string=patch_content)
'''
tmp_dir, file_path, text_file = self._save_files(file_content)
self._build_and_check(tmp_dir, file_path, text_file, "ONE TWO DOH!")
def test_patch_strip_new(self):
conanfile = dedent("""
from conans import ConanFile, tools
class PatchConan(ConanFile):
def source(self):
tools.patch(self.source_folder, "example.patch", strip=1)""")
patch = dedent("""
--- /dev/null
+++ b/src/newfile
@@ -0,0 +0,1 @@
+New file!""")
client = TestClient()
client.save({"conanfile.py": conanfile,
"example.patch": patch})
client.run("source .")
self.assertEqual(client.load("newfile"), "New file!")
def test_patch_strip_delete(self):
conanfile = dedent("""
from conans import ConanFile, tools
class PatchConan(ConanFile):
def source(self):
tools.patch(self.source_folder, "example.patch", strip=1)""")
patch = dedent("""
--- a\src\oldfile
+++ b/dev/null
@@ -0,1 +0,0 @@
-legacy code""")
client = TestClient()
client.save({"conanfile.py": conanfile,
"example.patch": patch,
"oldfile": "legacy code"})
path = os.path.join(client.current_folder, "oldfile")
self.assertTrue(os.path.exists(path))
client.run("source .")
self.assertFalse(os.path.exists(path))
def test_patch_strip_delete_no_folder(self):
conanfile = dedent("""
from conans import ConanFile, tools
class PatchConan(ConanFile):
def source(self):
tools.patch(self.source_folder, "example.patch", strip=1)""")
patch = dedent("""
--- a/oldfile
+++ b/dev/null
@@ -0,1 +0,0 @@
-legacy code""")
client = TestClient()
client.save({"conanfile.py": conanfile,
"example.patch": patch,
"oldfile": "legacy code"})
path = os.path.join(client.current_folder, "oldfile")
self.assertTrue(os.path.exists(path))
client.run("source .")
self.assertFalse(os.path.exists(path))
def test_patch_new_delete(self):
conanfile = base_conanfile + '''
def build(self):
from conans.tools import load, save
save("oldfile", "legacy code")
assert(os.path.exists("oldfile"))
patch_content = """--- /dev/null
+++ b/newfile
@@ -0,0 +0,3 @@
+New file!
+New file!
+New file!
--- a/oldfile
+++ b/dev/null
@@ -0,1 +0,0 @@
-legacy code
"""
patch(patch_string=patch_content)
self.output.info("NEW FILE=%s" % load("newfile"))
self.output.info("OLD FILE=%s" % os.path.exists("oldfile"))
'''
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("create . user/testing")
self.assertIn("test/1.9.10@user/testing: NEW FILE=New file!\nNew file!\nNew file!\n",
client.out)
self.assertIn("test/1.9.10@user/testing: OLD FILE=False", client.out)
def test_patch_new_strip(self):
conanfile = base_conanfile + '''
def build(self):
from conans.tools import load, save
patch_content = """--- /dev/null
+++ b/newfile
@@ -0,0 +0,3 @@
+New file!
+New file!
+New file!
"""
patch(patch_string=patch_content, strip=1)
self.output.info("NEW FILE=%s" % load("newfile"))
'''
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("create . user/testing")
self.assertIn("test/1.9.10@user/testing: NEW FILE=New file!\nNew file!\nNew file!\n",
client.out)
def test_error_patch(self):
file_content = base_conanfile + '''
def build(self):
patch_content = "some corrupted patch"
patch(patch_string=patch_content, output=self.output)
'''
client = TestClient()
client.save({"conanfile.py": file_content})
client.run("install .")
client.run("build .", assert_error=True)
self.assertIn("patch_ng: error: no patch data found!", client.out)
self.assertIn("ERROR: conanfile.py (test/1.9.10): "
"Error in build() method, line 12", client.out)
self.assertIn("Failed to parse patch: string", client.out)
def test_add_new_file(self):
""" Validate issue #5320
"""
conanfile = dedent("""
from conans import ConanFile, tools
import os
class ConanFileToolsTest(ConanFile):
name = "foobar"
version = "0.1.0"
exports_sources = "*"
def build(self):
tools.patch(patch_file="add_files.patch")
assert os.path.isfile("foo.txt")
assert os.path.isfile("bar.txt")
""")
bar = "no creo en brujas"
patch = dedent("""
From c66347c66991b6e617d107b505c18b3115624b8a Mon Sep 17 00:00:00 2001
From: Uilian Ries <uilianries@gmail.com>
Date: Wed, 16 Oct 2019 14:31:34 -0300
Subject: [PATCH] add foo
---
bar.txt | 3 ++-
foo.txt | 3 +++
2 files changed, 5 insertions(+), 1 deletion(-)
create mode 100644 foo.txt
diff --git a/bar.txt b/bar.txt
index 0f4ff3a..0bd3158 100644
--- a/bar.txt
+++ b/bar.txt
@@ -1 +1,2 @@
-no creo en brujas
+Yo no creo en brujas, pero que las hay, las hay
+
diff --git a/foo.txt b/foo.txt
new file mode 100644
index 0000000..91e8c0d
--- /dev/null
+++ b/foo.txt
@@ -0,0 +1,3 @@
+For us, there is no spring.
+Just the wind that smells fresh before the storm.
+
--
2.23.0
""")
client = TestClient()
client.save({"conanfile.py": conanfile,
"add_files.patch": patch,
"bar.txt": bar})
client.run("install .")
client.run("build .")
bar_content = client.load("bar.txt")
self.assertIn(dedent("""Yo no creo en brujas, pero que las hay, las hay
"""), bar_content)
foo_content = client.load("foo.txt")
self.assertIn(dedent("""For us, there is no spring.
Just the wind that smells fresh before the storm."""), foo_content)
self.assertIn("Calling build()", client.out)
self.assertNotIn("Warning", client.out)
def _save_files(self, file_content):
tmp_dir = temp_folder()
file_path = os.path.join(tmp_dir, "conanfile.py")
text_file = os.path.join(tmp_dir, "text.txt")
save(file_path, file_content)
save(text_file, "ONE TWO THREE")
return tmp_dir, file_path, text_file
def _build_and_check(self, tmp_dir, file_path, text_file, msg):
loader = ConanFileLoader(None, Mock(), ConanPythonRequire(None, None))
ret = loader.load_consumer(file_path, create_profile())
curdir = os.path.abspath(os.curdir)
os.chdir(tmp_dir)
try:
ret.build()
finally:
os.chdir(curdir)
content = load(text_file)
self.assertEqual(content, msg)
def test_fuzzy_patch(self):
conanfile = dedent("""
from conans import ConanFile, tools
import os
class ConanFileToolsTest(ConanFile):
name = "fuzz"
version = "0.1.0"
exports_sources = "*"
def build(self):
tools.patch(patch_file="fuzzy.patch", fuzz=True)
""")
source = dedent("""X
Y
Z""")
patch = dedent("""diff --git a/Jamroot b/Jamroot
index a6981dd..0c08f09 100644
--- a/Jamroot
+++ b/Jamroot
@@ -1,3 +1,4 @@
X
YYYY
+V
W""")
expected = dedent("""X
Y
V
Z""")
client = TestClient()
client.save({"conanfile.py": conanfile,
"fuzzy.patch": patch,
"Jamroot": source})
client.run("install .")
client.run("build .")
content = client.load("Jamroot")
self.assertIn(expected, content)
|
{
"content_hash": "7407b684c93e36be639da27d520d4464",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 93,
"avg_line_length": 32.958990536277604,
"alnum_prop": 0.5491960183767228,
"repo_name": "conan-io/conan",
"id": "b123478e9747c1c0a1d938fda8db6706a2420399",
"size": "10448",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/unittests/tools/files_patch_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
}
|
from verta._swagger.base_type import BaseType
class ModeldbAddExperimentRunTag(BaseType):
def __init__(self, id=None, tag=None):
required = {
"id": False,
"tag": False,
}
self.id = id
self.tag = tag
for k, v in required.items():
if self[k] is None and v:
raise ValueError('attribute {} is required'.format(k))
@staticmethod
def from_json(d):
tmp = d.get('id', None)
if tmp is not None:
d['id'] = tmp
tmp = d.get('tag', None)
if tmp is not None:
d['tag'] = tmp
return ModeldbAddExperimentRunTag(**d)
|
{
"content_hash": "8a208e899e0f7b3c5e27a0cc28490a91",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 62,
"avg_line_length": 21.357142857142858,
"alnum_prop": 0.5769230769230769,
"repo_name": "mitdbg/modeldb",
"id": "fd59019a27598a8b2d5db0a1603d293f472a9da2",
"size": "641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/verta/verta/_swagger/_public/modeldb/model/ModeldbAddExperimentRunTag.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43352"
},
{
"name": "Dockerfile",
"bytes": "235"
},
{
"name": "HTML",
"bytes": "30924"
},
{
"name": "Java",
"bytes": "393927"
},
{
"name": "JavaScript",
"bytes": "1017682"
},
{
"name": "Python",
"bytes": "178774"
},
{
"name": "Scala",
"bytes": "251259"
},
{
"name": "Shell",
"bytes": "16870"
},
{
"name": "Thrift",
"bytes": "55683"
}
],
"symlink_target": ""
}
|
"""Google Translate Module
From https://github.com/EArmour/pyfibot/
"""
import logging
log = logging.getLogger("trans")
try:
import requests
except ImportError:
log.error("Missing requests library. The translate module won't work.")
gturl = "http://translate.google.com/translate_a/t"
gtheaders = {
"User-Agent":"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:27.0)"\
"Gecko/20100101 Firefox/27.0"
}
gtbody = "client=gtranslate&sl=&tl=en&text=%s"
def command_translate(bot, user, channel, args):
"""Transliterates text with Google Translate to English.
Usage: translate <text>."""
gtrans = requests.post(gturl, data=gtbody % args, headers=gtheaders)
json = gtrans.json()
translated = json["sentences"][0]["trans"]
bot.say(channel, "From " + json["src"] + ": " + translated)
def command_transliterate(bot, user, channel, args):
"""Transliterates text with Google Translate to English.
Usage: transliterate <text>."""
gtrans = requests.post(gturl, data=gtbody % args, headers=gtheaders)
json = gtrans.json()
transliterated = json["sentences"][0]["src_translit"]
if transliterated == "":
bot.say(channel, "No transliteration available.")
else:
bot.say(channel, "From " + json["src"] + ": " + transliterated)
|
{
"content_hash": "a9cda222f2122fdd7f3177034bf123ab",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 75,
"avg_line_length": 31.571428571428573,
"alnum_prop": 0.6523378582202112,
"repo_name": "mikar/demibot",
"id": "4e53d2c06e3b07cd5d619f5e2a12d1b6456e7956",
"size": "1350",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "demibot/modules/module_translate.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "72126"
}
],
"symlink_target": ""
}
|
import argparse
import getpass
import os
import urllib
import subprocess
import shutil
import re
import datetime
import time
import atexit
import json
from shutil import copyfile
run_timestamp = time.time()
# -------------------------------------------------------------------------------------------------
zookeeper_version = '3.4.6'
zookeeper_url ='http://download.nextag.com/apache/zookeeper/' + \
'zookeeper-${VERSION}/zookeeper-${VERSION}.tar.gz'
zookeeper_filename = 'zookeeper-' + zookeeper_version
outdir = '/nscratch/' + getpass.getuser() + '/zookeeper_bits'
download_path = outdir + '/' + zookeeper_filename + '.tar'
download_url = zookeeper_url.replace('${VERSION}', zookeeper_version)
zookeeper_path = outdir + '/' + zookeeper_filename
zookeeper_c_lib_path = zookeeper_path + '/src/c'
workdir = os.getcwd() + '/work'
rundir = workdir + '/zookeeper-' + datetime.datetime.fromtimestamp(run_timestamp). \
strftime('%Y-%m-%d-%H-%M-%S')
network_if = 'eth0'
# XXX number of nodes and specific nodes should be
# in config file or input parameters
zookeeper_num_nodes = 3
zookeeper_nodes = ['16','15','14']
nodes_hash = {} # {'16' : '1', '15' : '2', '14' : '3'}
# -------------------------------------------------------------------------------------------------
def make_dir(mydir):
if not os.path.exists(mydir):
print "Creating " + mydir
os.makedirs(mydir)
else:
print "Directory " + mydir + " already exists"
# -------------------------------------------------------------------------------------------------
def do_install():
print '> Installing Zookeeper libraries and includes'
print '>'
p = subprocess.Popen(['make','install'], cwd = zookeeper_c_lib_path)
p.wait()
print '> DONE'
# -------------------------------------------------------------------------------------------------
def do_setup():
print '> Setting up Zookeeper in directory ' + outdir
print '>'
outdir_data = outdir + '/data'
make_dir(outdir)
make_dir(outdir_data)
if not os.path.exists(download_path):
print '> Entering directory: ' + outdir
print '> Downloading Zookeeper..'
urllib.urlretrieve(download_url, download_path)
print '> Untaring zookeeper..'
p = subprocess.Popen(['tar','-xof',download_path], cwd = outdir)
p.wait()
zookeeper_path = outdir + '/' + zookeeper_filename
print '> Compiling Zookeeper in dir: ' + zookeeper_path + '..'
p = subprocess.Popen(['ant', 'bin-package'], cwd = zookeeper_path)
p.wait()
print '> Compiling Zookeeper libraries (used by RAMCloud) ' \
'in dir: ' + zookeeper_c_lib_path
p = subprocess.Popen(['./configure'], cwd = zookeeper_c_lib_path)
p.wait()
p = subprocess.Popen(['make','-j','10'], cwd = zookeeper_c_lib_path)
p.wait()
# Zookeeper compiled
# copy configuration to the deployed zookeeper
if not os.path.exists('conf/zookeeper1.cfg'):
print 'Should run this script from the BITS-zookeeper folder'
exit(-1)
conf_dir = outdir + '/conf'
make_dir(conf_dir)
print "Copying configuration files.."
for node in zookeeper_nodes:
filename = 'zookeeper' + nodes_hash[node] + '.cfg'
destination_filename = conf_dir + '/' + filename
copyfile('conf/zookeeper1.cfg', destination_filename)
subs_pattern1 = '\'s/${USER}/' + getpass.getuser() + '/\''
subs_pattern2 = '\'s/${NODE_ID}/' + nodes_hash[node] + '/\''
os.system("sed -i " + subs_pattern1 + " " + destination_filename)
os.system("sed -i " + subs_pattern2 + " " + destination_filename)
print "Creating zookeeper conf instances in " + outdir_data
for node in zookeeper_nodes:
instance_data_dir = outdir_data + '/' + 'zookeeper' + nodes_hash[node]
make_dir(instance_data_dir)
fo = open(instance_data_dir + '/myid', "w")
fo.write(node)
fo.close()
print '> DONE'
print '>'
# -------------------------------------------------------------------------------------------------
zookeeper_instances = {}
def check_slurm_allocation():
if (not 'SLURM_NODELIST' in os.environ) or (not os.environ['SLURM_NODELIST']):
print '[ERROR] Need to run script within SLURM allocation'
exit(1)
def shutdown_zookeeper_launch_processes():
print '> Shutting down Zookeeper launch processes'
for c in zookeeper_instances.values():
c['process'].terminate()
print '> Waiting for processes to terminate...'
all_done = True
while not all_done:
all_done = True
for c in zookeeper_instances.values():
if (c['process'].poll() == None):
all_done = False
time.sleep(0.01)
def shutdown_zookeeper_instances():
print '> Shutting down Zookeeper instances'
for node in zookeeper_nodes:
print '> Terminating Zookeeper on ' + node
srun_cmd = ['ssh', 'f' + node]
srun_cmd += [zookeeper_path + '/bin/zkServer.sh', 'stop']
srun_cmd += [zookeeper_path + '/../conf/zookeeper' + \
nodes_hash[node] + '.cfg']
p = subprocess.Popen(srun_cmd)
p.wait()
print '> DONE'
def initialize():
counter = 0
for node in zookeeper_nodes:
counter += 1
nodes_hash[node] = str(counter)
def do_start():
print '> Launching Zookeeper cluster...'
print '>'
print '> Output redirected to ' + rundir
print '>'
print '> Master/Worker Nodes: ' + ', '.join(zookeeper_nodes)
print '>'
# Launch Zookeeper Workers
print '> Launching Zookeeper nodes'
print '>'
# Create symlink for latest run
subprocess.call(['ln', '-s', '-f', '-T', rundir, workdir + '/latest'])
for node in zookeeper_nodes:
print '> Launching Zookeeper on ' + node
srun_cmd = ['ssh', 'f' + node]
srun_cmd += [zookeeper_path + '/bin/zkServer.sh', 'start']
srun_cmd += [zookeeper_path + '/../conf/zookeeper' + \
str(nodes_hash[node]) + '.cfg']
myrundir = rundir + '/worker-' + node
make_dir(myrundir)
myoutfile = myrundir + '/stdout'
myerrfile = myrundir + '/stderr'
fout = open(myoutfile, 'w')
ferr = open(myerrfile, 'w')
p = subprocess.Popen(srun_cmd, stdout=fout, stderr=ferr)
zookeeper_instances[node] = {'process': p, 'out': myoutfile, \
'err': myerrfile, 'node': node}
# When exiting, make sure all children are terminated cleanly
atexit.register(shutdown_zookeeper_instances)
print '>'
print '> ALL NODES ARE UP! TERMINATE THIS PROCESS TO SHUT DOWN ZOOKEEPER CLUSTER.'
while True:
time.sleep(0.5)
# -------------------------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Run script for Zookeeper on FireBox-0 cluster.')
parser.add_argument('action', nargs=1, help='the action to perform (setup|start|stop)')
args = parser.parse_args()
print '> ================================================================================'
print '> ZOOKEEPER RUN SCRIPT FOR FIREBOX-0 CLUSTER (VERSION ' + str(zookeeper_version) + ')'
print '> ================================================================================'
print '>'
git_rev = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
print '> GIT revision: ' + git_rev.replace('\n','')
print '>'
print '> COMMAND = ' + str(args.action)
initialize()
if args.action[0] == 'setup':
do_setup()
elif args.action[0] == 'start':
do_start()
elif args.action[0] == 'install':
do_install()
elif args.action[0] == 'stop':
shutdown_zookeeper_instances()
else:
print '[ERROR] Unknown action \'' + args.action[0] + '\''
|
{
"content_hash": "ae99ba8807cb67563d122cb022e901e2",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 99,
"avg_line_length": 32.70564516129032,
"alnum_prop": 0.5338429293551966,
"repo_name": "ucb-bar/bits",
"id": "dfc846782196126275e6aa9a4fab650488a5e483",
"size": "8370",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/zookeeper/zookeeper.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "120667"
},
{
"name": "Shell",
"bytes": "4325"
}
],
"symlink_target": ""
}
|
"""imports checkers for Python code"""
import collections
from distutils import sysconfig
import os
import sys
import copy
import six
import astroid
from astroid import are_exclusive
from astroid.modutils import (get_module_part, is_standard_module)
import isort
from pylint.interfaces import IAstroidChecker
from pylint.utils import get_global_option
from pylint.exceptions import EmptyReportError
from pylint.checkers import BaseChecker
from pylint.checkers.utils import (
check_messages,
node_ignores_exception,
is_from_fallback_block
)
from pylint.graph import get_cycles, DotBackend
from pylint.reporters.ureports.nodes import VerbatimText, Paragraph
def _qualified_names(modname):
"""Split the names of the given module into subparts
For example,
_qualified_names('pylint.checkers.ImportsChecker')
returns
['pylint', 'pylint.checkers', 'pylint.checkers.ImportsChecker']
"""
names = modname.split('.')
return ['.'.join(names[0:i+1]) for i in range(len(names))]
def _get_import_name(importnode, modname):
"""Get a prepared module name from the given import node
In the case of relative imports, this will return the
absolute qualified module name, which might be useful
for debugging. Otherwise, the initial module name
is returned unchanged.
"""
if isinstance(importnode, astroid.ImportFrom):
if importnode.level:
root = importnode.root()
if isinstance(root, astroid.Module):
modname = root.relative_to_absolute_name(
modname, level=importnode.level)
return modname
def _get_first_import(node, context, name, base, level, alias):
"""return the node where [base.]<name> is imported or None if not found
"""
fullname = '%s.%s' % (base, name) if base else name
first = None
found = False
for first in context.body:
if first is node:
continue
if first.scope() is node.scope() and first.fromlineno > node.fromlineno:
continue
if isinstance(first, astroid.Import):
if any(fullname == iname[0] for iname in first.names):
found = True
break
elif isinstance(first, astroid.ImportFrom):
if level == first.level:
for imported_name, imported_alias in first.names:
if fullname == '%s.%s' % (first.modname, imported_name):
found = True
break
if name != '*' and name == imported_name and not (alias or imported_alias):
found = True
break
if found:
break
if found and not are_exclusive(first, node):
return first
return None
def _ignore_import_failure(node, modname, ignored_modules):
for submodule in _qualified_names(modname):
if submodule in ignored_modules:
return True
return node_ignores_exception(node, ImportError)
# utilities to represents import dependencies as tree and dot graph ###########
def _make_tree_defs(mod_files_list):
"""get a list of 2-uple (module, list_of_files_which_import_this_module),
it will return a dictionary to represent this as a tree
"""
tree_defs = {}
for mod, files in mod_files_list:
node = (tree_defs, ())
for prefix in mod.split('.'):
node = node[0].setdefault(prefix, [{}, []])
node[1] += files
return tree_defs
def _repr_tree_defs(data, indent_str=None):
"""return a string which represents imports as a tree"""
lines = []
nodes = data.items()
for i, (mod, (sub, files)) in enumerate(sorted(nodes, key=lambda x: x[0])):
if not files:
files = ''
else:
files = '(%s)' % ','.join(sorted(files))
if indent_str is None:
lines.append('%s %s' % (mod, files))
sub_indent_str = ' '
else:
lines.append(r'%s\-%s %s' % (indent_str, mod, files))
if i == len(nodes)-1:
sub_indent_str = '%s ' % indent_str
else:
sub_indent_str = '%s| ' % indent_str
if sub:
lines.append(_repr_tree_defs(sub, sub_indent_str))
return '\n'.join(lines)
def _dependencies_graph(filename, dep_info):
"""write dependencies as a dot (graphviz) file
"""
done = {}
printer = DotBackend(filename[:-4], rankdir='LR')
printer.emit('URL="." node[shape="box"]')
for modname, dependencies in sorted(six.iteritems(dep_info)):
done[modname] = 1
printer.emit_node(modname)
for depmodname in dependencies:
if depmodname not in done:
done[depmodname] = 1
printer.emit_node(depmodname)
for depmodname, dependencies in sorted(six.iteritems(dep_info)):
for modname in dependencies:
printer.emit_edge(modname, depmodname)
printer.generate(filename)
def _make_graph(filename, dep_info, sect, gtype):
"""generate a dependencies graph and add some information about it in the
report's section
"""
_dependencies_graph(filename, dep_info)
sect.append(Paragraph('%simports graph has been written to %s'
% (gtype, filename)))
# the import checker itself ###################################################
MSGS = {
'E0401': ('Unable to import %s',
'import-error',
'Used when pylint has been unable to import a module.',
{'old_names': [('F0401', 'import-error')]}),
'E0402': ('Attempted relative import beyond top-level package',
'relative-beyond-top-level',
'Used when a relative import tries to access too many levels '
'in the current package.'),
'R0401': ('Cyclic import (%s)',
'cyclic-import',
'Used when a cyclic import between two or more modules is \
detected.'),
'W0401': ('Wildcard import %s',
'wildcard-import',
'Used when `from module import *` is detected.'),
'W0402': ('Uses of a deprecated module %r',
'deprecated-module',
'Used a module marked as deprecated is imported.'),
'W0403': ('Relative import %r, should be %r',
'relative-import',
'Used when an import relative to the package directory is '
'detected.',
{'maxversion': (3, 0)}),
'W0404': ('Reimport %r (imported line %s)',
'reimported',
'Used when a module is reimported multiple times.'),
'W0406': ('Module import itself',
'import-self',
'Used when a module is importing itself.'),
'W0410': ('__future__ import is not the first non docstring statement',
'misplaced-future',
'Python 2.5 and greater require __future__ import to be the \
first non docstring statement in the module.'),
'C0410': ('Multiple imports on one line (%s)',
'multiple-imports',
'Used when import statement importing multiple modules is '
'detected.'),
'C0411': ('%s should be placed before %s',
'wrong-import-order',
'Used when PEP8 import order is not respected (standard imports '
'first, then third-party libraries, then local imports)'),
'C0412': ('Imports from package %s are not grouped',
'ungrouped-imports',
'Used when imports are not grouped by packages'),
'C0413': ('Import "%s" should be placed at the top of the '
'module',
'wrong-import-position',
'Used when code and imports are mixed'),
}
DEFAULT_STANDARD_LIBRARY = ()
DEFAULT_KNOWN_THIRD_PARTY = ('enchant',)
class ImportsChecker(BaseChecker):
"""checks for
* external modules dependencies
* relative / wildcard imports
* cyclic imports
* uses of deprecated modules
"""
__implements__ = IAstroidChecker
name = 'imports'
msgs = MSGS
priority = -2
if six.PY2:
deprecated_modules = ('regsub', 'TERMIOS', 'Bastion', 'rexec')
elif sys.version_info < (3, 5):
deprecated_modules = ('optparse', )
else:
deprecated_modules = ('optparse', 'tkinter.tix')
options = (('deprecated-modules',
{'default' : deprecated_modules,
'type' : 'csv',
'metavar' : '<modules>',
'help' : 'Deprecated modules which should not be used,'
' separated by a comma'}
),
('import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of every (i.e. internal and'
' external) dependencies in the given file'
' (report RP0402 must not be disabled)'}
),
('ext-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of external dependencies in the'
' given file (report RP0402 must not be disabled)'}
),
('int-import-graph',
{'default' : '',
'type' : 'string',
'metavar' : '<file.dot>',
'help' : 'Create a graph of internal dependencies in the'
' given file (report RP0402 must not be disabled)'}
),
('known-standard-library',
{'default': DEFAULT_STANDARD_LIBRARY,
'type': 'csv',
'metavar': '<modules>',
'help': 'Force import order to recognize a module as part of'
' the standard compatibility libraries.'}
),
('known-third-party',
{'default': DEFAULT_KNOWN_THIRD_PARTY,
'type': 'csv',
'metavar': '<modules>',
'help': 'Force import order to recognize a module as part of'
' a third party library.'}
),
('analyse-fallback-blocks',
{'default': False,
'type': 'yn',
'metavar': '<y_or_n>',
'help': 'Analyse import fallback blocks. This can be used to '
'support both Python 2 and 3 compatible code, which means that '
'the block might have code that exists only in one or another '
'interpreter, leading to false positives when analysed.'},
),
('allow-wildcard-with-all',
{'default': False,
'type': 'yn',
'metavar': '<y_or_n>',
'help': 'Allow wildcard imports from modules that define __all__.'}),
)
def __init__(self, linter=None):
BaseChecker.__init__(self, linter)
self.stats = None
self.import_graph = None
self._imports_stack = []
self._first_non_import_node = None
self.__int_dep_info = self.__ext_dep_info = None
self.reports = (('RP0401', 'External dependencies',
self._report_external_dependencies),
('RP0402', 'Modules dependencies graph',
self._report_dependencies_graph),
)
self._site_packages = self._compute_site_packages()
@staticmethod
def _compute_site_packages():
def _normalized_path(path):
return os.path.normcase(os.path.abspath(path))
paths = set()
real_prefix = getattr(sys, 'real_prefix', None)
for prefix in filter(None, (real_prefix, sys.prefix)):
path = sysconfig.get_python_lib(prefix=prefix)
path = _normalized_path(path)
paths.add(path)
# Handle Debian's derivatives /usr/local.
if os.path.isfile("/etc/debian_version"):
for prefix in filter(None, (real_prefix, sys.prefix)):
libpython = os.path.join(prefix, "local", "lib",
"python" + sysconfig.get_python_version(),
"dist-packages")
paths.add(libpython)
return paths
def open(self):
"""called before visiting project (i.e set of modules)"""
self.linter.add_stats(dependencies={})
self.linter.add_stats(cycles=[])
self.stats = self.linter.stats
self.import_graph = collections.defaultdict(set)
self._excluded_edges = collections.defaultdict(set)
self._ignored_modules = get_global_option(
self, 'ignored-modules', default=[])
def _import_graph_without_ignored_edges(self):
filtered_graph = copy.deepcopy(self.import_graph)
for node in filtered_graph:
filtered_graph[node].difference_update(self._excluded_edges[node])
return filtered_graph
def close(self):
"""called before visiting project (i.e set of modules)"""
if self.linter.is_message_enabled('cyclic-import'):
graph = self._import_graph_without_ignored_edges()
vertices = list(graph)
for cycle in get_cycles(graph, vertices=vertices):
self.add_message('cyclic-import', args=' -> '.join(cycle))
@check_messages(*MSGS.keys())
def visit_import(self, node):
"""triggered when an import statement is seen"""
self._check_reimport(node)
modnode = node.root()
names = [name for name, _ in node.names]
if len(names) >= 2:
self.add_message('multiple-imports', args=', '.join(names), node=node)
for name in names:
self._check_deprecated_module(node, name)
imported_module = self._get_imported_module(node, name)
if isinstance(node.parent, astroid.Module):
# Allow imports nested
self._check_position(node)
if isinstance(node.scope(), astroid.Module):
self._record_import(node, imported_module)
if imported_module is None:
continue
self._check_relative_import(modnode, node, imported_module, name)
self._add_imported_module(node, imported_module.name)
@check_messages(*(MSGS.keys()))
def visit_importfrom(self, node):
"""triggered when a from statement is seen"""
basename = node.modname
imported_module = self._get_imported_module(node, basename)
self._check_misplaced_future(node)
self._check_deprecated_module(node, basename)
self._check_wildcard_imports(node, imported_module)
self._check_same_line_imports(node)
self._check_reimport(node, basename=basename, level=node.level)
if isinstance(node.parent, astroid.Module):
# Allow imports nested
self._check_position(node)
if isinstance(node.scope(), astroid.Module):
self._record_import(node, imported_module)
if imported_module is None:
return
modnode = node.root()
self._check_relative_import(modnode, node, imported_module, basename)
for name, _ in node.names:
if name != '*':
self._add_imported_module(node, '%s.%s' % (imported_module.name, name))
@check_messages(*(MSGS.keys()))
def leave_module(self, node):
# Check imports are grouped by category (standard, 3rd party, local)
std_imports, ext_imports, loc_imports = self._check_imports_order(node)
# Check imports are grouped by package within a given category
met = set()
current_package = None
for import_node, import_name in std_imports + ext_imports + loc_imports:
if not self.linter.is_message_enabled('ungrouped-imports', import_node.fromlineno):
continue
package, _, _ = import_name.partition('.')
if current_package and current_package != package and package in met:
self.add_message('ungrouped-imports', node=import_node,
args=package)
current_package = package
met.add(package)
self._imports_stack = []
self._first_non_import_node = None
def compute_first_non_import_node(self, node):
if not self.linter.is_message_enabled('wrong-import-position', node.fromlineno):
return
# if the node does not contain an import instruction, and if it is the
# first node of the module, keep a track of it (all the import positions
# of the module will be compared to the position of this first
# instruction)
if self._first_non_import_node:
return
if not isinstance(node.parent, astroid.Module):
return
nested_allowed = [astroid.TryExcept, astroid.TryFinally]
is_nested_allowed = [
allowed for allowed in nested_allowed if isinstance(node, allowed)]
if is_nested_allowed and \
any(node.nodes_of_class((astroid.Import, astroid.ImportFrom))):
return
if isinstance(node, astroid.Assign):
# Add compatibility for module level dunder names
# https://www.python.org/dev/peps/pep-0008/#module-level-dunder-names
valid_targets = [
isinstance(target, astroid.AssignName) and
target.name.startswith('__') and target.name.endswith('__')
for target in node.targets]
if all(valid_targets):
return
self._first_non_import_node = node
visit_tryfinally = visit_tryexcept = visit_assignattr = visit_assign = \
visit_ifexp = visit_comprehension = visit_expr = visit_if = \
compute_first_non_import_node
def visit_functiondef(self, node):
if not self.linter.is_message_enabled('wrong-import-position', node.fromlineno):
return
# If it is the first non import instruction of the module, record it.
if self._first_non_import_node:
return
# Check if the node belongs to an `If` or a `Try` block. If they
# contain imports, skip recording this node.
if not isinstance(node.parent.scope(), astroid.Module):
return
root = node
while not isinstance(root.parent, astroid.Module):
root = root.parent
if isinstance(root, (astroid.If, astroid.TryFinally, astroid.TryExcept)):
if any(root.nodes_of_class((astroid.Import, astroid.ImportFrom))):
return
self._first_non_import_node = node
visit_classdef = visit_for = visit_while = visit_functiondef
def _check_misplaced_future(self, node):
basename = node.modname
if basename == '__future__':
# check if this is the first non-docstring statement in the module
prev = node.previous_sibling()
if prev:
# consecutive future statements are possible
if not (isinstance(prev, astroid.ImportFrom)
and prev.modname == '__future__'):
self.add_message('misplaced-future', node=node)
return
def _check_same_line_imports(self, node):
# Detect duplicate imports on the same line.
names = (name for name, _ in node.names)
counter = collections.Counter(names)
for name, count in counter.items():
if count > 1:
self.add_message('reimported', node=node,
args=(name, node.fromlineno))
def _check_position(self, node):
"""Check `node` import or importfrom node position is correct
Send a message if `node` comes before another instruction
"""
# if a first non-import instruction has already been encountered,
# it means the import comes after it and therefore is not well placed
if self._first_non_import_node:
self.add_message('wrong-import-position', node=node,
args=node.as_string())
def _record_import(self, node, importedmodnode):
"""Record the package `node` imports from"""
importedname = importedmodnode.name if importedmodnode else None
if not importedname:
if isinstance(node, astroid.ImportFrom):
importedname = node.modname
else:
importedname = node.names[0][0].split('.')[0]
if isinstance(node, astroid.ImportFrom) and (node.level or 0) >= 1:
# We need the impotedname with first point to detect local package
# Example of node:
# 'from .my_package1 import MyClass1'
# the output should be '.my_package1' instead of 'my_package1'
# Example of node:
# 'from . import my_package2'
# the output should be '.my_package2' instead of '{pyfile}'
importedname = '.' + importedname
self._imports_stack.append((node, importedname))
@staticmethod
def _is_fallback_import(node, imports):
imports = [import_node for (import_node, _) in imports]
return any(astroid.are_exclusive(import_node, node)
for import_node in imports)
def _check_imports_order(self, _module_node):
"""Checks imports of module `node` are grouped by category
Imports must follow this order: standard, 3rd party, local
"""
std_imports = []
third_party_imports = []
first_party_imports = []
# need of a list that holds third or first party ordered import
external_imports = []
local_imports = []
third_party_not_ignored = []
first_party_not_ignored = []
local_not_ignored = []
isort_obj = isort.SortImports(
file_contents='', known_third_party=self.config.known_third_party,
known_standard_library=self.config.known_standard_library,
)
for node, modname in self._imports_stack:
if modname.startswith('.'):
package = '.' + modname.split('.')[1]
else:
package = modname.split('.')[0]
nested = not isinstance(node.parent, astroid.Module)
ignore_for_import_order = not self.linter.is_message_enabled('wrong-import-order',
node.fromlineno)
import_category = isort_obj.place_module(package)
node_and_package_import = (node, package)
if import_category in ('FUTURE', 'STDLIB'):
std_imports.append(node_and_package_import)
wrong_import = (third_party_not_ignored or first_party_not_ignored
or local_not_ignored)
if self._is_fallback_import(node, wrong_import):
continue
if wrong_import and not nested:
self.add_message('wrong-import-order', node=node,
args=('standard import "%s"' % node.as_string(),
'"%s"' % wrong_import[0][0].as_string()))
elif import_category == 'THIRDPARTY':
third_party_imports.append(node_and_package_import)
external_imports.append(node_and_package_import)
if not nested and not ignore_for_import_order:
third_party_not_ignored.append(node_and_package_import)
wrong_import = first_party_not_ignored or local_not_ignored
if wrong_import and not nested:
self.add_message('wrong-import-order', node=node,
args=('third party import "%s"' % node.as_string(),
'"%s"' % wrong_import[0][0].as_string()))
elif import_category == 'FIRSTPARTY':
first_party_imports.append(node_and_package_import)
external_imports.append(node_and_package_import)
if not nested and not ignore_for_import_order:
first_party_not_ignored.append(node_and_package_import)
wrong_import = local_not_ignored
if wrong_import and not nested:
self.add_message('wrong-import-order', node=node,
args=('first party import "%s"' % node.as_string(),
'"%s"' % wrong_import[0][0].as_string()))
elif import_category == 'LOCALFOLDER':
local_imports.append((node, package))
if not nested and not ignore_for_import_order:
local_not_ignored.append((node, package))
return std_imports, external_imports, local_imports
def _get_imported_module(self, importnode, modname):
try:
return importnode.do_import_module(modname)
except astroid.TooManyLevelsError:
if _ignore_import_failure(importnode, modname, self._ignored_modules):
return None
self.add_message('relative-beyond-top-level', node=importnode)
except astroid.AstroidBuildingException:
if _ignore_import_failure(importnode, modname, self._ignored_modules):
return None
if not self.config.analyse_fallback_blocks and is_from_fallback_block(importnode):
return None
dotted_modname = _get_import_name(importnode, modname)
self.add_message('import-error', args=repr(dotted_modname),
node=importnode)
def _check_relative_import(self, modnode, importnode, importedmodnode,
importedasname):
"""check relative import. node is either an Import or From node, modname
the imported module name.
"""
if not self.linter.is_message_enabled('relative-import'):
return None
if importedmodnode.file is None:
return False # built-in module
if modnode is importedmodnode:
return False # module importing itself
if modnode.absolute_import_activated() or getattr(importnode, 'level', None):
return False
if importedmodnode.name != importedasname:
# this must be a relative import...
self.add_message('relative-import',
args=(importedasname, importedmodnode.name),
node=importnode)
return None
return None
def _add_imported_module(self, node, importedmodname):
"""notify an imported module, used to analyze dependencies"""
module_file = node.root().file
context_name = node.root().name
base = os.path.splitext(os.path.basename(module_file))[0]
# Determine if we have a `from .something import` in a package's
# __init__. This means the module will never be able to import
# itself using this condition (the level will be bigger or
# if the same module is named as the package, it will be different
# anyway).
if isinstance(node, astroid.ImportFrom):
if node.level and node.level > 0 and base == '__init__':
return
try:
importedmodname = get_module_part(importedmodname,
module_file)
except ImportError:
pass
if context_name == importedmodname:
self.add_message('import-self', node=node)
elif not is_standard_module(importedmodname):
# handle dependencies
importedmodnames = self.stats['dependencies'].setdefault(
importedmodname, set())
if context_name not in importedmodnames:
importedmodnames.add(context_name)
# update import graph
self.import_graph[context_name].add(importedmodname)
if not self.linter.is_message_enabled('cyclic-import', line=node.lineno):
self._excluded_edges[context_name].add(importedmodname)
def _check_deprecated_module(self, node, mod_path):
"""check if the module is deprecated"""
for mod_name in self.config.deprecated_modules:
if mod_path == mod_name or mod_path.startswith(mod_name + '.'):
self.add_message('deprecated-module', node=node, args=mod_path)
def _check_reimport(self, node, basename=None, level=None):
"""check if the import is necessary (i.e. not already done)"""
if not self.linter.is_message_enabled('reimported'):
return
frame = node.frame()
root = node.root()
contexts = [(frame, level)]
if root is not frame:
contexts.append((root, None))
for known_context, known_level in contexts:
for name, alias in node.names:
first = _get_first_import(
node, known_context,
name, basename,
known_level, alias)
if first is not None:
self.add_message('reimported', node=node,
args=(name, first.fromlineno))
def _report_external_dependencies(self, sect, _, _dummy):
"""return a verbatim layout for displaying dependencies"""
dep_info = _make_tree_defs(six.iteritems(self._external_dependencies_info()))
if not dep_info:
raise EmptyReportError()
tree_str = _repr_tree_defs(dep_info)
sect.append(VerbatimText(tree_str))
def _report_dependencies_graph(self, sect, _, _dummy):
"""write dependencies as a dot (graphviz) file"""
dep_info = self.stats['dependencies']
if not dep_info or not (self.config.import_graph
or self.config.ext_import_graph
or self.config.int_import_graph):
raise EmptyReportError()
filename = self.config.import_graph
if filename:
_make_graph(filename, dep_info, sect, '')
filename = self.config.ext_import_graph
if filename:
_make_graph(filename, self._external_dependencies_info(),
sect, 'external ')
filename = self.config.int_import_graph
if filename:
_make_graph(filename, self._internal_dependencies_info(),
sect, 'internal ')
def _external_dependencies_info(self):
"""return cached external dependencies information or build and
cache them
"""
if self.__ext_dep_info is None:
package = self.linter.current_name
self.__ext_dep_info = result = {}
for importee, importers in six.iteritems(self.stats['dependencies']):
if not importee.startswith(package):
result[importee] = importers
return self.__ext_dep_info
def _internal_dependencies_info(self):
"""return cached internal dependencies information or build and
cache them
"""
if self.__int_dep_info is None:
package = self.linter.current_name
self.__int_dep_info = result = {}
for importee, importers in six.iteritems(self.stats['dependencies']):
if importee.startswith(package):
result[importee] = importers
return self.__int_dep_info
def _check_wildcard_imports(self, node, imported_module):
wildcard_import_is_allowed = (
self._wildcard_import_is_allowed(imported_module)
)
for name, _ in node.names:
if name == '*' and not wildcard_import_is_allowed:
self.add_message('wildcard-import', args=node.modname, node=node)
def _wildcard_import_is_allowed(self, imported_module):
return (self.config.allow_wildcard_with_all
and imported_module is not None
and '__all__' in imported_module.locals)
def register(linter):
"""required method to auto register this checker """
linter.register_checker(ImportsChecker(linter))
|
{
"content_hash": "45c502fac8d0312acb57963abd52c16e",
"timestamp": "",
"source": "github",
"line_count": 781,
"max_line_length": 95,
"avg_line_length": 41.79001280409731,
"alnum_prop": 0.5676512041178994,
"repo_name": "lucidmotifs/auto-aoc",
"id": "630090c41314df25ff5f2074a39b9f71b9817146",
"size": "34032",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": ".venv/lib/python3.5/site-packages/pylint/checkers/imports.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "74"
},
{
"name": "C",
"bytes": "41695"
},
{
"name": "C++",
"bytes": "35306"
},
{
"name": "CSS",
"bytes": "96"
},
{
"name": "HTML",
"bytes": "48431"
},
{
"name": "JavaScript",
"bytes": "2043"
},
{
"name": "Python",
"bytes": "4850280"
},
{
"name": "Shell",
"bytes": "3778"
},
{
"name": "Visual Basic",
"bytes": "820"
},
{
"name": "XSLT",
"bytes": "2058"
}
],
"symlink_target": ""
}
|
"""SCons.Tool.gs
Tool-specific initialization for Ghostscript.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons.Action
import SCons.Platform
import SCons.Util
# Ghostscript goes by different names on different platforms...
platform = SCons.Platform.platform_default()
if platform == 'os2':
gs = 'gsos2'
elif platform == 'win32':
gs = 'gswin32c'
else:
gs = 'gs'
GhostscriptAction = None
def generate(env):
"""Add Builders and construction variables for Ghostscript to an
Environment."""
global GhostscriptAction
if GhostscriptAction is None:
GhostscriptAction = SCons.Action.Action('$GSCOM', '$GSCOMSTR')
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ps', GhostscriptAction)
env['GS'] = gs
env['GSFLAGS'] = SCons.Util.CLVar('-dNOPAUSE -dBATCH -sDEVICE=pdfwrite')
env['GSCOM'] = '$GS $GSFLAGS -sOutputFile=$TARGET $SOURCES'
def exists(env):
if 'PS2PDF' in env:
return env.Detect(env['PS2PDF'])
else:
return env.Detect(gs) or SCons.Util.WhereIs(gs)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
{
"content_hash": "d557caa18292f938644e881cd896a861",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 76,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.71998371998372,
"repo_name": "azatoth/scons",
"id": "ada169ac32cf98232818759379c40275dcc8eda0",
"size": "2457",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/engine/SCons/Tool/gs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259"
},
{
"name": "JavaScript",
"bytes": "17316"
},
{
"name": "Perl",
"bytes": "45214"
},
{
"name": "Python",
"bytes": "6716123"
},
{
"name": "Shell",
"bytes": "2535"
}
],
"symlink_target": ""
}
|
import json
from datetime import datetime
import pytest
from django.urls import reverse
from rest_framework import viewsets
from rest_framework.serializers import ModelSerializer, SerializerMethodField
from rest_framework_json_api.renderers import JSONRenderer
from example.models import Blog, Comment, Entry
# serializers
class RelatedModelSerializer(ModelSerializer):
class Meta:
model = Comment
fields = ("id",)
class DummyTestSerializer(ModelSerializer):
"""
This serializer is a simple compound document serializer which includes only
a single embedded relation
"""
related_models = RelatedModelSerializer(
source="comments", many=True, read_only=True
)
json_field = SerializerMethodField()
def get_json_field(self, entry):
return {"JsonKey": "JsonValue"}
class Meta:
model = Entry
fields = ("related_models", "json_field")
# views
class DummyTestViewSet(viewsets.ModelViewSet):
queryset = Entry.objects.all()
serializer_class = DummyTestSerializer
def render_dummy_test_serialized_view(view_class, entry):
serializer = DummyTestSerializer(instance=entry)
renderer = JSONRenderer()
return renderer.render(serializer.data, renderer_context={"view": view_class()})
# tests
def test_simple_reverse_relation_included_renderer(db, entry):
"""
Test renderer when a single reverse fk relation is passed.
"""
rendered = render_dummy_test_serialized_view(DummyTestViewSet, entry)
assert rendered
def test_render_format_field_names(db, settings, entry):
"""Test that json field is kept untouched."""
settings.JSON_API_FORMAT_FIELD_NAMES = "dasherize"
rendered = render_dummy_test_serialized_view(DummyTestViewSet, entry)
result = json.loads(rendered.decode())
assert result["data"]["attributes"]["json-field"] == {"JsonKey": "JsonValue"}
@pytest.mark.django_db
def test_blog_create(client):
url = reverse("drf-entry-blog-list")
name = "Dummy Name"
request_data = {
"data": {"attributes": {"name": name}, "type": "blogs"},
}
resp = client.post(url, request_data)
# look for created blog in database
blog = Blog.objects.filter(name=name)
# check if blog exists in database
assert blog.count() == 1
# get created blog from database
blog = blog.first()
expected = {
"data": {
"attributes": {"name": blog.name, "tags": []},
"id": f"{blog.id}",
"links": {"self": f"http://testserver/blogs/{blog.id}"},
"meta": {"copyright": datetime.now().year},
"type": "blogs",
},
"meta": {"apiDocs": "/docs/api/blogs"},
}
assert resp.status_code == 201
assert resp.json() == expected
@pytest.mark.django_db
def test_get_object_gives_correct_blog(client, blog, entry):
url = reverse("drf-entry-blog-detail", kwargs={"entry_pk": entry.id})
resp = client.get(url)
expected = {
"data": {
"attributes": {"name": blog.name, "tags": []},
"id": f"{blog.id}",
"links": {"self": f"http://testserver/blogs/{blog.id}"},
"meta": {"copyright": datetime.now().year},
"type": "blogs",
},
"meta": {"apiDocs": "/docs/api/blogs"},
}
got = resp.json()
assert got == expected
@pytest.mark.django_db
def test_get_object_patches_correct_blog(client, blog, entry):
url = reverse("drf-entry-blog-detail", kwargs={"entry_pk": entry.id})
new_name = blog.name + " update"
assert not new_name == blog.name
request_data = {
"data": {
"attributes": {"name": new_name},
"id": f"{blog.id}",
"links": {"self": f"http://testserver/blogs/{blog.id}"},
"meta": {"copyright": datetime.now().year},
"relationships": {"tags": {"data": []}},
"type": "blogs",
},
"meta": {"apiDocs": "/docs/api/blogs"},
}
resp = client.patch(url, data=request_data)
assert resp.status_code == 200
expected = {
"data": {
"attributes": {"name": new_name, "tags": []},
"id": f"{blog.id}",
"links": {"self": f"http://testserver/blogs/{blog.id}"},
"meta": {"copyright": datetime.now().year},
"type": "blogs",
},
"meta": {"apiDocs": "/docs/api/blogs"},
}
got = resp.json()
assert got == expected
@pytest.mark.django_db
def test_get_object_deletes_correct_blog(client, entry):
url = reverse("drf-entry-blog-detail", kwargs={"entry_pk": entry.id})
resp = client.delete(url)
assert resp.status_code == 204
@pytest.mark.django_db
def test_get_entry_list_with_blogs(client, entry):
url = reverse("drf-entry-suggested", kwargs={"entry_pk": entry.id})
resp = client.get(url)
got = resp.json()
expected = {
"links": {
"first": "http://testserver/drf-entries/1/suggested/?page%5Bnumber%5D=1",
"last": "http://testserver/drf-entries/1/suggested/?page%5Bnumber%5D=1",
"next": None,
"prev": None,
},
"data": [
{
"type": "entries",
"id": "1",
"attributes": {
"tags": [],
},
"links": {"self": "http://testserver/drf-blogs/1"},
}
],
"meta": {"pagination": {"page": 1, "pages": 1, "count": 1}},
}
assert resp.status_code == 200
assert got == expected
|
{
"content_hash": "998b698dca0b5f25d6a4965619d98cfa",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 85,
"avg_line_length": 27.61881188118812,
"alnum_prop": 0.5820039433590249,
"repo_name": "django-json-api/django-rest-framework-json-api",
"id": "d74edac740c2b5c28e8b364cc0beb34af3a10c36",
"size": "5579",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "example/tests/unit/test_default_drf_serializers.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "2888"
},
{
"name": "Python",
"bytes": "421004"
}
],
"symlink_target": ""
}
|
from __future__ import division, absolute_import, print_function
import os
import sys
import subprocess
import shutil
from setuptools import setup
def _read(fn):
path = os.path.join(os.path.dirname(__file__), fn)
return open(path).read()
def build_manpages():
# Go into the docs directory and build the manpage.
docdir = os.path.join(os.path.dirname(__file__), 'docs')
curdir = os.getcwd()
os.chdir(docdir)
try:
subprocess.check_call(['make', 'man'])
except OSError:
print("Could not build manpages (make man failed)!", file=sys.stderr)
return
finally:
os.chdir(curdir)
# Copy resulting manpages.
mandir = os.path.join(os.path.dirname(__file__), 'man')
if os.path.exists(mandir):
shutil.rmtree(mandir)
shutil.copytree(os.path.join(docdir, '_build', 'man'), mandir)
# Build manpages if we're making a source distribution tarball.
if 'sdist' in sys.argv:
build_manpages()
setup(
name='beets',
version='1.4.4',
description='music tagger and library organizer',
author='Adrian Sampson',
author_email='adrian@radbox.org',
url='http://beets.io/',
license='MIT',
platforms='ALL',
long_description=_read('README.rst'),
test_suite='test.testall.suite',
include_package_data=True, # Install plugin resources.
packages=[
'beets',
'beets.ui',
'beets.autotag',
'beets.util',
'beets.dbcore',
'beetsplug',
'beetsplug.bpd',
'beetsplug.web',
'beetsplug.lastgenre',
'beetsplug.metasync',
],
entry_points={
'console_scripts': [
'beet = beets.ui:main',
],
},
install_requires=[
'six>=1.9',
'mutagen>=1.33',
'munkres',
'unidecode',
'musicbrainzngs>=0.4',
'pyyaml',
'jellyfish',
] + (['colorama'] if (sys.platform == 'win32') else []) +
(['enum34>=1.0.4'] if sys.version_info < (3, 4, 0) else []),
tests_require=[
'beautifulsoup4',
'flask',
'mock',
'pylast',
'rarfile',
'responses',
'pyxdg',
'pathlib',
'python-mpd2',
'discogs-client'
],
# Plugin (optional) dependencies:
extras_require={
'absubmit': ['requests'],
'fetchart': ['requests'],
'chroma': ['pyacoustid'],
'discogs': ['discogs-client>=2.2.1'],
'beatport': ['requests-oauthlib>=0.6.1'],
'lastgenre': ['pylast'],
'mpdstats': ['python-mpd2>=0.4.2'],
'web': ['flask', 'flask-cors'],
'import': ['rarfile'],
'thumbnails': ['pyxdg'] +
(['pathlib'] if (sys.version_info < (3, 4, 0)) else []),
'metasync': ['dbus-python'],
},
# Non-Python/non-PyPI plugin dependencies:
# convert: ffmpeg
# bpd: python-gi and GStreamer
# absubmit: extractor binary from http://acousticbrainz.org/download
classifiers=[
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Players :: MP3',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Environment :: Web Environment',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
|
{
"content_hash": "3bca830e586fa8f0ee73c5a6767dac99",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 77,
"avg_line_length": 27.440944881889763,
"alnum_prop": 0.5569583931133429,
"repo_name": "Kraymer/beets",
"id": "14864383755e45fb2a39f182f8834b4d90536043",
"size": "4179",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3306"
},
{
"name": "JavaScript",
"bytes": "85947"
},
{
"name": "Python",
"bytes": "1859705"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
}
|
import sys
import recipe_util # pylint: disable=F0401
# This class doesn't need an __init__ method, so we disable the warning
# pylint: disable=W0232
class Infra(recipe_util.Recipe):
"""Basic Recipe class for the Infrastructure repositories."""
@staticmethod
def fetch_spec(_props):
solution = lambda name, path_infix = None: {
'name' : name,
'url' : 'https://chromium.googlesource.com/infra/%s%s.git' % (
path_infix + '/' if path_infix else '', name
),
'deps_file': '.DEPS.git',
'managed' : False,
}
spec = {
'solutions': [
solution('infra'),
solution('expect_tests', 'testing'),
solution('testing_support', 'testing'),
],
}
return {
'type': 'gclient_git',
'gclient_git_spec': spec,
}
@staticmethod
def expected_root(_props):
return 'infra'
def main(argv=None):
return Infra().handle_args(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
{
"content_hash": "100de02fa16328ead141d1d13cb0142c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 73,
"avg_line_length": 23.488372093023255,
"alnum_prop": 0.5752475247524752,
"repo_name": "HackFisher/depot_tools",
"id": "3c65b4420f78aaef05421dfd187a64fbe50e7863",
"size": "1173",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "recipes/infra.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5028"
},
{
"name": "CSS",
"bytes": "136"
},
{
"name": "PHP",
"bytes": "586"
},
{
"name": "Python",
"bytes": "1931090"
},
{
"name": "Shell",
"bytes": "100230"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0038_contentnode_author'),
]
operations = [
migrations.AlterField(
model_name='formatpreset',
name='id',
field=models.CharField(choices=[(b'high_res_video', b'High Resolution'), (b'low_res_video', b'Low Resolution'), (b'vector_video', b'Vectorized'), (b'video_thumbnail', b'Thumbnail'), (b'video_subtitle', b'Subtitle'), (b'audio', b'Audio'), (b'audio_thumbnail', b'Thumbnail'), (b'document', b'Document'), (b'document_thumbnail', b'Thumbnail'), (b'exercise', b'Exercise'), (b'exercise_thumbnail', b'Thumbnail'), (b'exercise_image', b'Exercise Image'), (b'exercise_graphie', b'Exercise Graphie'), (b'channel_thumbnail', b'Channel Thumbnail')], max_length=150, primary_key=True, serialize=False),
),
]
|
{
"content_hash": "4ee2b9b3994f2138e70c8a4bd45e399e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 602,
"avg_line_length": 52.22222222222222,
"alnum_prop": 0.6574468085106383,
"repo_name": "aronasorman/content-curation",
"id": "76765bd24d6c4b9f9cfe908292e7db343a57b15d",
"size": "1012",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contentcuration/contentcuration/migrations/0039_auto_20161101_1555.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "85010"
},
{
"name": "HTML",
"bytes": "364417"
},
{
"name": "JavaScript",
"bytes": "285260"
},
{
"name": "Makefile",
"bytes": "579"
},
{
"name": "Python",
"bytes": "325384"
}
],
"symlink_target": ""
}
|
"""
Settings for the hello scripts.
You most likely need to edit a few of them, e.g. API_HOST and the OAuth
credentials.
"""
OUR_BANK = '00100'
USERNAME = '1000203893'
PASSWORD = '1000203893'
CONSUMER_KEY = 'bvldezvlnqj4mtva4jfktke4xliep0bt1xm44yxi'
CONSUMER_SECRET = 'fgwo35uhkroebasxlqgzjjcc0cf1yaujuynkwodz'
# API server URL
BASE_URL = 'https://socgen2-k-api.openbankproject.com'
API_VERSION = "v2.1.0"
# API server will redirect your browser to this URL, should be non-functional
# You will paste the redirect location here when running the script
CALLBACK_URI = 'http://127.0.0.1/cb'
# Our COUNTERPARTY account id (of the same currency)
OUR_COUNTERPARTY = '3806441b-bbdf-3c60-b2b3-14e2f645635f'
COUNTERPARTY_BANK = '00100'
# this following two fields are just used in V210
OUR_COUNTERPARTY_ID = ''
OUR_COUNTERPARTY_IBAN = ''
# Our currency to use
OUR_CURRENCY = 'XAF'
# Our value to transfer
# values below 1000 do not requre challenge request
OUR_VALUE = '0.01'
OUR_VALUE_LARGE = '1000.00'
PAYMENT_DESCRIPTION = 'Hello Payments v2.1!'
|
{
"content_hash": "543cdcc6e480f95ce51b2468ed689132",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 77,
"avg_line_length": 29.18421052631579,
"alnum_prop": 0.7105500450856628,
"repo_name": "OpenBankProject/Hello-OBP-DirectLogin-Python",
"id": "c70d22124fd8a0e9d65f0e2df32fff53b4ce36ea",
"size": "1133",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "props/socgen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "82679"
}
],
"symlink_target": ""
}
|
import sys
import csv
import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
if len(sys.argv) != 2:
print "Please specify a file to read"
sys.exit(1)
fileToPlot = open(sys.argv[1], "r")
csvReader = csv.DictReader(fileToPlot)
timestamps = []
data = []
for row in csvReader:
timestamps.append(datetime.datetime.strptime(row["timestamp"], "%Y-%m-%d %H:%M:%S"))
data.append(int(row["data"]))
fig, ax = plt.subplots()
ax.plot(timestamps, data)
ax.xaxis.set_major_locator(mdates.DayLocator())
ax.xaxis.set_major_formatter(mdates.DateFormatter("%m-%d"))
ax.grid(True)
fig.suptitle(sys.argv[1])
fig.autofmt_xdate()
plt.show()
|
{
"content_hash": "2748545496620eaebe7652533d202483",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 88,
"avg_line_length": 22.161290322580644,
"alnum_prop": 0.7088791848617176,
"repo_name": "boztalay/LappyLogger",
"id": "4ea49dc0f352c02761e809388f82cb02add8001f",
"size": "687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Analysis/basicGraph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7479"
},
{
"name": "DTrace",
"bytes": "346"
},
{
"name": "Objective-C",
"bytes": "2047159"
},
{
"name": "Python",
"bytes": "14195"
},
{
"name": "Shell",
"bytes": "977"
}
],
"symlink_target": ""
}
|
from datetime import datetime # noqa
from datetime import timedelta # noqa
from django.utils.translation import ugettext_lazy as _ # noqa
from horizon import exceptions
from horizon import tabs
from openstack_dashboard import api
from openstack_dashboard.api import ceilometer
from openstack_dashboard.dashboards.admin.metering import tables
def make_tenant_queries(request, days_before=30):
try:
tenants, more = api.keystone.tenant_list(
request,
domain=None,
paginate=True,
marker="tenant_marker")
except Exception:
tenants = []
exceptions.handle(request,
_('Unable to retrieve tenant list.'))
queries = {}
for tenant in tenants:
tenant_query = [{
"field": "project_id",
"op": "eq",
"value": tenant.id}]
queries[tenant.name] = tenant_query
# TODO(lsmola) Just show last 30 days, should be switchable somewhere
# above the table.
date_from = datetime.now() - timedelta(days_before)
date_to = datetime.now()
additional_query = [{'field': 'timestamp',
'op': 'ge',
'value': date_from},
{'field': 'timestamp',
'op': 'le',
'value': date_to}]
return queries, additional_query
def list_of_resource_aggregates(request, meters, stats_attr="avg"):
queries, additional_query = make_tenant_queries(request)
ceilometer_usage = ceilometer.CeilometerUsage(request)
try:
resource_aggregates = ceilometer_usage.\
resource_aggregates_with_statistics(
queries, meters, stats_attr="avg",
additional_query=additional_query)
except Exception:
resource_aggregates = []
exceptions.handle(request,
_('Unable to retrieve statistics.'))
return resource_aggregates
class GlobalDiskUsageTab(tabs.TableTab):
table_classes = (tables.GlobalDiskUsageTable,)
name = _("Global Disk Usage")
slug = "global_disk_usage"
template_name = ("horizon/common/_detail_table.html")
preload = False
def get_global_disk_usage_data(self):
""" Disk usage table data aggregated by project """
request = self.tab_group.request
return list_of_resource_aggregates(request,
ceilometer.GlobalDiskUsage.meters)
class GlobalNetworkTrafficUsageTab(tabs.TableTab):
table_classes = (tables.GlobalNetworkTrafficUsageTable,)
name = _("Global Network Traffic Usage")
slug = "global_network_traffic_usage"
template_name = ("horizon/common/_detail_table.html")
preload = False
def get_global_network_traffic_usage_data(self):
request = self.tab_group.request
return list_of_resource_aggregates(request,
ceilometer.GlobalNetworkTrafficUsage.meters)
class GlobalNetworkUsageTab(tabs.TableTab):
table_classes = (tables.GlobalNetworkUsageTable,)
name = _("Global Network Usage")
slug = "global_network_usage"
template_name = ("horizon/common/_detail_table.html")
preload = False
def get_global_network_usage_data(self):
request = self.tab_group.request
return list_of_resource_aggregates(request,
ceilometer.GlobalNetworkUsage.meters)
def allowed(self, request):
permissions = ("openstack.services.network",)
return request.user.has_perms(permissions)
class GlobalObjectStoreUsageTab(tabs.TableTab):
table_classes = (tables.GlobalObjectStoreUsageTable,)
name = _("Global Object Store Usage")
slug = "global_object_store_usage"
template_name = ("horizon/common/_detail_table.html")
preload = False
def get_global_object_store_usage_data(self):
request = self.tab_group.request
ceilometer_usage = ceilometer.CeilometerUsage(request)
date_from = datetime.now() - timedelta(30)
date_to = datetime.now()
additional_query = [{'field': 'timestamp',
'op': 'ge',
'value': date_from},
{'field': 'timestamp',
'op': 'le',
'value': date_to}]
try:
result = ceilometer_usage.global_object_store_usage(
with_statistics=True, additional_query=additional_query)
except Exception:
result = []
exceptions.handle(request,
_('Unable to retrieve statistics.'))
return result
def allowed(self, request):
permissions = ("openstack.services.object-store",)
return request.user.has_perms(permissions)
class GlobalStatsTab(tabs.Tab):
name = _("Stats")
slug = "stats"
template_name = ("admin/metering/stats.html")
preload = False
def get_context_data(self, request):
query = [{"field": "metadata.OS-EXT-AZ:availability_zone",
"op": "eq",
"value": "nova"}]
try:
resources = ceilometer.resource_list(request, query,
ceilometer_usage_object=None)
except Exception:
resources = []
exceptions.handle(request,
_('Unable to retrieve Nova Ceilometer '
'resources.'))
try:
resource = resources[0]
meters = [link['rel'] for link in resource.links
if link['rel'] != "self"]
except IndexError:
resource = None
meters = []
meter_titles = {"instance": _("Duration of instance"),
"instance:<type>": _("Duration of instance <type>"
" (openstack types)"),
"memory": _("Volume of RAM in MB"),
"cpu": _("CPU time used"),
"cpu_util": _("Average CPU utilisation"),
"vcpus": _("Number of VCPUs"),
"disk.read.requests": _("Number of read requests"),
"disk.write.requests": _("Number of write requests"),
"disk.read.bytes": _("Volume of reads in B"),
"disk.write.bytes": _("Volume of writes in B"),
"disk.root.size": _("Size of root disk in GB"),
"disk.ephemeral.size": _("Size of ephemeral disk "
"in GB"),
"network.incoming.bytes": _("Number of incoming bytes "
"on the network for a VM interface"),
"network.outgoing.bytes": _("Number of outgoing bytes "
"on the network for a VM interface"),
"network.incoming.packets": _("Number of incoming "
"packets for a VM interface"),
"network.outgoing.packets": _("Number of outgoing "
"packets for a VM interface")}
class MetersWrap(object):
""" A quick wrapper for meter and associated titles. """
def __init__(self, meter, meter_titles):
self.name = meter
self.title = meter_titles.get(meter, "")
meters_objs = []
for meter in meters:
meters_objs.append(MetersWrap(meter, meter_titles))
context = {'meters': meters_objs}
return context
class CeilometerOverviewTabs(tabs.TabGroup):
slug = "ceilometer_overview"
tabs = (GlobalDiskUsageTab, GlobalNetworkTrafficUsageTab,
GlobalObjectStoreUsageTab, GlobalNetworkUsageTab, GlobalStatsTab,)
sticky = True
|
{
"content_hash": "24a3ef3e5ab6601df51c6700ad167374",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 37.464114832535884,
"alnum_prop": 0.5627075351213282,
"repo_name": "r-icarus/openstack_microserver",
"id": "6539a62e2d29ec4c880dfe82be70cac1f00352fb",
"size": "8422",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/metering/tabs.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160741"
},
{
"name": "JavaScript",
"bytes": "359957"
},
{
"name": "Python",
"bytes": "2753685"
},
{
"name": "Shell",
"bytes": "12912"
}
],
"symlink_target": ""
}
|
from sef import *
from sef2 import *
from helpers import *
import time
import traceback
# from AnimalChecker import get_actual_neighbor
def other_player(player, game):
''' return the opponent player, used for alphaBetaMinimax'''
for p in game.players:
if player != p:
return p
def _sef(board, player, opponent):
'''
F(s) = g(s) + h(s) + k(s)
g = linear distance from den
k = distance from capturable enemies
a = number of piece alive
for each measure
the actual value is , value for current player minus value of opponent
positive metrics
- how close is the enemie from my DEN
- can i capture anyone?
negative metrics
- can i be captured ?
'''
res = 0
# # good for me
# dist = distance_from_den(player, opponent)
# print "\tdistance_from_den = %s" % dist
# rem = remaining_pieces(player, None)
# print "\tremaining_pieces = %s" % rem
# cap = can_capture(player, opponent)
# print "\tcan_capture = %s" % cap
# res -= dist
# res += rem
# res += cap
# # good for opponent
# # res -= 5*in_danger(player, opponent, "tiger")
# # res += distance_from_den(opponent, player)
# # res -= remaining_pieces(opponent, None)
# # res += can_capture(player, opponent)
# can_cap = can_be_captured(player, opponent)
# print "\tcan_be_captured = %s" % can_cap
# res -= can_cap # return netagive number
res += piece_importance(player, opponent, "tiger")
res += piece_importance(player, opponent, "elephant")
res += piece_importance(player, opponent, "mouse")
res += piece_importance(player, opponent, "wolf")
# res -= can_capture(opponent, player)
# res += 10 * apply_to_both(board, [player, opponent], remaining_pieces)
# # sum of the linear distance from each piece to the opponent den
# res += apply_to_both(board, [player, opponent], distance_from_den)
# res += 5*in_danger(player, opponent, "elephant")
# res += 2*in_danger(player, opponent, "mouse")
# res += 2*in_danger(player, opponent, "wolf")
# # sum of the linear distance from each piece to their respective pray
# res += 15 * apply_to_both(board, [player, opponent], distance_from_all_prays)
# res += 2 * hunted(player, opponent)
# res += 2 * hunting_mode(player, opponent, 'mouse')
# _res += 2 * hunting_mode(player, opponent, 'wolf')
# _res += 5 * hunting_mode(player, opponent, 'tiger')
# _res += 5 * hunting_mode(player, opponent, 'elephant')
print "INSIDE SEF>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> = ", res
return res
def available_moves(game, player, str_coordinate=None):
moves = []
for _animal in player:
if _animal not in ['name', 'den']:
_ani = player.__dict__['' + _animal]
if str_coordinate:
if not _ani.is_dead:
moves += [(_animal,) + move for move in get_actual_neighbor(*_ani._row_col_location) if _ani > game._board[move[0] - 1][move[1] - 1]]
else:
if not _ani.is_dead:
moves += [(_animal,) + move for move in get_actual_neighbor(str_loc=_ani.location) if _ani > game._board[move[0] - 1][move[1] - 1]]
# print moves
# moves += [move for move in _moves if _ani > game._board[move[][0] - 1][item[1] - 1]]]
# print moves
moves.sort(key=lambda tup: tup[0], reverse=False)
moves.sort(key=lambda tup: tup[1], reverse=False) # prioritize going down
# print moves.sort(key=lambda tup: tup[2], reverse=False)
return moves
def get_actual_neighbor(cur_row=0, cur_col=0, str_loc=None):
if str_loc:
cur_row, cur_col = get_xy_coordinates(str_loc)
_neighbor = {
'left': (cur_row, cur_col - 1) if in_board(cur_row, cur_col - 1) else None,
'up': (cur_row - 1, cur_col) if in_board(cur_row - 1, cur_col) else None,
'right': (cur_row, cur_col + 1) if in_board(cur_row, cur_col + 1) else None,
'down': (cur_row + 1, cur_col) if in_board(cur_row + 1, cur_col) else None,
}
neighbor = []
[neighbor.append(item) for item in _neighbor.values() if item]
# print neighbor
neighbor.sort(key=lambda tup: tup[0], reverse=False)
return neighbor
count = 0
TRANSPOSITION_TABLE = {
}
def alphaBetaMinimax(game, alpha, beta, level, depth, player, currentMove):
'''
game = node - current board state
'''
str_state = get_board_string_state(game)
if str_state in TRANSPOSITION_TABLE :
# print TRANSPOSITION_TABLE[str_state]
if currentMove != None:
# print "STATE FOUND IN TRANSPOSITION_TABLE", str_state
return TRANSPOSITION_TABLE[str_state][0], currentMove
try:
global count
count += 1
final_children = None
value = None
rval = None
# rmov = None
children = available_moves(game, player)
# time.sleep(2)
# print children, level
# game.display_board()
# print "%s at level %s - after last move %s \n\n available moves(children) : \n\t\t%s" %(player.name, level, currentMove, children)
if level == depth:
print "LEAF LEVEL"
_sef_ref = _sef(game, player, other_player(player, game))
# print "CURRENT PLAYER = ", player.name,"SEF =%s" % _sef_ref
TRANSPOSITION_TABLE[get_board_string_state(game)] = (_sef_ref, currentMove)
return _sef_ref, currentMove
elif level%2 == 0: # MAXIMIZING NODE
value = -1000
for idx, child in enumerate(children): # child is a move for the current player ('tiger', 4, 3)
# print "CURRENT PLAYER = ", game._find_whose_turn()
# print "******************=START Exploring Child = %s(sef val = %s) GAME PLY= %s =======================>" % (child, value, game.plys)
status = game._move(player[child[0]], new_row=child[1], new_col=child[2])
# print 'status' , status
if status:
rval, rmov = alphaBetaMinimax(game, value, 1000,level + 1, depth, other_player(player, game), child)
children[idx] += (rval,)
if (rval >= value):
value = rval
final_children = children[idx]
TRANSPOSITION_TABLE[get_board_string_state(game)] = (value, children[idx])
game.undo()
else:
print "Child %s [%s] skiped - This animal can't perform that move " % (child, player.name)
# print "******************=DONE Exploring Child = %s(sef val = %s) GAME PLY= %s =======================>" % (child, value, game.plys)
print "MAXIMIZING Node @ lvl %s=> %s\n\t %s" %(level, children, final_children)
else: # MINIMIZING NODE
value = 1000
for idx, child in enumerate(children):
# print "odd level", player.name, child
# print "CURRENT PLAYER = ", game._find_whose_turn()
# print "******************=START Exploring Child = %s(sef val = %s)=======================>" % (child, value)
status = game._move(player[child[0]], new_row=child[1], new_col=child[2])
# print 'status' , status
if status:
rval, rmov = alphaBetaMinimax(game, -1000 , value, level + 1, depth, other_player(player, game), child)
children[idx] += (rval,)
if (rval <= value):
value = rval
final_children = children[idx]
# TRANSPOSITION_TABLE[get_board_string_state(game)] = (value, children[idx])
# __move = rmov
game.undo()
else:
print "Child %s [%s] skiped - This animal can't perform that move " % (child, player)
# print "******************=DONE Exploring Child = %s(sef val = %s)=======================>" % (child, value)
print "MINIMIZING Node @ lvl %s=> %s\n\t %s" %(level,children, final_children)
game.is_gameover = False
# print "value = %s - child = %s" % (level, player.name)
print 'number of nodes visited in tree', count # (14^(6)-1)/(14-1) = 579 195 node to check
return value, final_children
except Exception, e:
print traceback.format_exc(e)
# print "node examined ", count
if __name__ == '__main__':
from AnimalChecker import *
# from helpers import get_actual_neighbor, in_board
# from sef import _sef, StageEvaluationBoard
game = AnimalChecker(rows=9, cols=7, starting_player=1)
game.setup()
p1, p2 = game.get_players()
game.display_board()
_, bestMove = alphaBetaMinimax(game, 0, 3, p1, None)
bestMove = (p1[bestMove[0]], None,)+bestMove[1:-1]
print "Best move = ",bestMove
print game._move(*bestMove)
_, bestMove = alphaBetaMinimax(game, 0, 4, p2, None)
bestMove = (p2[bestMove[0]], None,)+bestMove[1:-1]
print bestMove
print game._move(*bestMove)
# bestMove = (p2[bestMove[0]], None,)+bestMove[1:]
# print game._move(*bestMove)
game.display_board()
# print alphaBetaMinimax(game, 0, 2, p1)
print 'number of nodes in tree', count # (14^(6)-1)/(14-1) = 579 195 node to check
|
{
"content_hash": "bd89fa726cdb48421242696b19905716",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 153,
"avg_line_length": 40.86695278969957,
"alnum_prop": 0.552930056710775,
"repo_name": "Mimieam/CS540_AI",
"id": "c6e7777454f50638ff4218cc9ebcf55ce2705ac0",
"size": "9522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project1/alphaBetaMinimax.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74914"
},
{
"name": "Swift",
"bytes": "57502"
}
],
"symlink_target": ""
}
|
'''
Author : DORIAN JAVA BROWN
Version : N/A
Copyright : All Rights Reserve; You may use, distribute and modify this code.
Description : This Guessing Game tries to guess the user number by generating
a random number between 1 and 5
'''
# import the random module
import random
# title of game
print ('\n\n')
print('\tGuessing Game')
print('--------------------------')
print ('')
# question for user
print('Think of a number between 0 - 5')
print('')
# declaration of the limit of sayinh no
answerLimit = 0;
# random number between 0 and 5
numGuess = random.randint(0,5)
# conditional statements
while answerLimit >= 0:
answerReady = raw_input('READY ? [YES/NO] : ')
if answerReady == 'YES' or answerReady == 'yes' or answerReady == 'Yes':
print('\n\n')
print(' ********************')
print(' * *')
print(' * I GUESS ' + str(numGuess) + ' *')
print(' * *')
print(' ********************')
print('\n')
answerLimit = -1
elif answerReady == 'NO' or answerReady == 'no' or answerReady == 'No':
print('\nKeep thinking......\n\n')
print('___________________________________________________\n')
answerLimit = 1
else:
print('\n \n ERROR: invalid input... Please try again. \n\n')
answerLimit = 1
# declaration of reset to ask the user the question again
reset = 0
# conditional statements
while reset >= 0:
# input data from the user
answerCorrect = raw_input('\nCORRECT ? [YES/NO] : ')
# conditional statements
if answerCorrect == 'YES' or answerCorrect == 'yes' or answerCorrect == 'Yes':
print('\n\n :' + ')' )
print('\n\n')
reset = -1
elif answerCorrect == 'NO' or answerCorrect == 'no' or answerCorrect == 'No':
print('\n\n :' + '(' )
print('\n\n')
reset = -1
else:
print('\n\nERROR: invalid input... CODE #0E53')
print('\nPlease enter \"Yes or No" try again.\n')
reset = 1
|
{
"content_hash": "1d1b8097e42f7896968f9db752bfb051",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 81,
"avg_line_length": 27.064935064935064,
"alnum_prop": 0.5283109404990403,
"repo_name": "ZEUSOFCS/Python",
"id": "1580a1d70e2e4c1fece420eb1927172d41ac0894",
"size": "2084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Projects/RandomNumberGame.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7292"
}
],
"symlink_target": ""
}
|
"""
Tests for Posix backup driver.
"""
import os
import mock
from six.moves import builtins
from cinder.backup.drivers import posix
from cinder import context
from cinder import test
FAKE_FILE_SIZE = 52428800
FAKE_SHA_BLOCK_SIZE_BYTES = 1024
FAKE_BACKUP_ENABLE_PROGRESS_TIMER = True
FAKE_CONTAINER = 'fake/container'
FAKE_BACKUP_ID_PART1 = 'de'
FAKE_BACKUP_ID_PART2 = 'ad'
FAKE_BACKUP_ID_REST = 'beef-whatever'
FAKE_BACKUP_ID = (FAKE_BACKUP_ID_PART1 + FAKE_BACKUP_ID_PART2 +
FAKE_BACKUP_ID_REST)
FAKE_BACKUP = {'id': FAKE_BACKUP_ID, 'container': None}
UPDATED_CONTAINER_NAME = os.path.join(FAKE_BACKUP_ID_PART1,
FAKE_BACKUP_ID_PART2,
FAKE_BACKUP_ID)
FAKE_BACKUP_MOUNT_POINT_BASE = '/fake/mount-point-base'
FAKE_EXPORT_PATH = 'fake/export/path'
FAKE_BACKUP_POSIX_PATH = os.path.join(FAKE_BACKUP_MOUNT_POINT_BASE,
FAKE_EXPORT_PATH)
FAKE_PREFIX = 'prefix-'
FAKE_CONTAINER_ENTRIES = [FAKE_PREFIX + 'one', FAKE_PREFIX + 'two', 'three']
EXPECTED_CONTAINER_ENTRIES = [FAKE_PREFIX + 'one', FAKE_PREFIX + 'two']
FAKE_OBJECT_NAME = 'fake-object-name'
FAKE_OBJECT_PATH = os.path.join(FAKE_BACKUP_POSIX_PATH, FAKE_CONTAINER,
FAKE_OBJECT_NAME)
class PosixBackupDriverTestCase(test.TestCase):
def setUp(self):
super(PosixBackupDriverTestCase, self).setUp()
self.ctxt = context.get_admin_context()
self.override_config('backup_file_size',
FAKE_FILE_SIZE)
self.override_config('backup_sha_block_size_bytes',
FAKE_SHA_BLOCK_SIZE_BYTES)
self.override_config('backup_enable_progress_timer',
FAKE_BACKUP_ENABLE_PROGRESS_TIMER)
self.override_config('backup_posix_path',
FAKE_BACKUP_POSIX_PATH)
self.mock_object(posix, 'LOG')
self.driver = posix.PosixBackupDriver(self.ctxt)
def test_init(self):
drv = posix.PosixBackupDriver(self.ctxt)
self.assertEqual(FAKE_BACKUP_POSIX_PATH,
drv.backup_path)
def test_update_container_name_container_passed(self):
result = self.driver.update_container_name(FAKE_BACKUP, FAKE_CONTAINER)
self.assertEqual(FAKE_CONTAINER, result)
def test_update_container_na_container_passed(self):
result = self.driver.update_container_name(FAKE_BACKUP, None)
self.assertEqual(UPDATED_CONTAINER_NAME, result)
def test_put_container(self):
self.mock_object(os.path, 'exists', mock.Mock(return_value=False))
self.mock_object(os, 'makedirs')
self.mock_object(os, 'chmod')
path = os.path.join(self.driver.backup_path, FAKE_CONTAINER)
self.driver.put_container(FAKE_CONTAINER)
os.path.exists.assert_called_once_with(path)
os.makedirs.assert_called_once_with(path)
os.chmod.assert_called_once_with(path, 0o770)
def test_put_container_already_exists(self):
self.mock_object(os.path, 'exists', mock.Mock(return_value=True))
self.mock_object(os, 'makedirs')
self.mock_object(os, 'chmod')
path = os.path.join(self.driver.backup_path, FAKE_CONTAINER)
self.driver.put_container(FAKE_CONTAINER)
os.path.exists.assert_called_once_with(path)
self.assertEqual(0, os.makedirs.call_count)
self.assertEqual(0, os.chmod.call_count)
def test_put_container_exception(self):
self.mock_object(os.path, 'exists', mock.Mock(return_value=False))
self.mock_object(os, 'makedirs', mock.Mock(
side_effect=OSError))
self.mock_object(os, 'chmod')
path = os.path.join(self.driver.backup_path, FAKE_CONTAINER)
self.assertRaises(OSError, self.driver.put_container,
FAKE_CONTAINER)
os.path.exists.assert_called_once_with(path)
os.makedirs.assert_called_once_with(path)
self.assertEqual(0, os.chmod.call_count)
def test_get_container_entries(self):
self.mock_object(os, 'listdir', mock.Mock(
return_value=FAKE_CONTAINER_ENTRIES))
result = self.driver.get_container_entries(FAKE_CONTAINER, FAKE_PREFIX)
self.assertEqual(EXPECTED_CONTAINER_ENTRIES, result)
def test_get_container_entries_no_list(self):
self.mock_object(os, 'listdir', mock.Mock(
return_value=[]))
result = self.driver.get_container_entries(FAKE_CONTAINER, FAKE_PREFIX)
self.assertEqual([], result)
def test_get_container_entries_no_match(self):
self.mock_object(os, 'listdir', mock.Mock(
return_value=FAKE_CONTAINER_ENTRIES))
result = self.driver.get_container_entries(FAKE_CONTAINER,
FAKE_PREFIX + 'garbage')
self.assertEqual([], result)
def test_get_object_writer(self):
self.mock_object(builtins, 'open', mock.mock_open())
self.mock_object(os, 'chmod')
self.driver.get_object_writer(FAKE_CONTAINER, FAKE_OBJECT_NAME)
os.chmod.assert_called_once_with(FAKE_OBJECT_PATH, 0o660)
builtins.open.assert_called_once_with(FAKE_OBJECT_PATH, 'wb')
def test_get_object_reader(self):
self.mock_object(builtins, 'open', mock.mock_open())
self.driver.get_object_reader(FAKE_CONTAINER, FAKE_OBJECT_NAME)
builtins.open.assert_called_once_with(FAKE_OBJECT_PATH, 'rb')
def test_delete_object(self):
self.mock_object(os, 'remove')
self.driver.delete_object(FAKE_CONTAINER, FAKE_OBJECT_NAME)
def test_delete_nonexistent_object(self):
self.mock_object(os, 'remove', mock.Mock(
side_effect=OSError))
self.assertRaises(OSError,
self.driver.delete_object, FAKE_CONTAINER,
FAKE_OBJECT_NAME)
|
{
"content_hash": "92278f99501fc2ff84a89efef19b7826",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 79,
"avg_line_length": 35.48809523809524,
"alnum_prop": 0.6343508889634351,
"repo_name": "dims/cinder",
"id": "5e1138e9438e848516a4268843e73e27cd0c4e54",
"size": "6594",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/backup/drivers/test_backup_posix.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "14784553"
},
{
"name": "Shell",
"bytes": "8222"
}
],
"symlink_target": ""
}
|
import base64
import os
from flask import Flask, request
app = Flask(__name__)
# [END eventarc_pubsub_server]
# [START eventarc_pubsub_handler]
@app.route('/', methods=['POST'])
def index():
data = request.get_json()
if not data:
msg = 'no Pub/Sub message received'
print(f'error: {msg}')
return f'Bad Request: {msg}', 400
if not isinstance(data, dict) or 'message' not in data:
msg = 'invalid Pub/Sub message format'
print(f'error: {msg}')
return f'Bad Request: {msg}', 400
pubsub_message = data['message']
name = 'World'
if isinstance(pubsub_message, dict) and 'data' in pubsub_message:
name = base64.b64decode(pubsub_message['data']).decode('utf-8').strip()
resp = f"Hello, {name}! ID: {request.headers.get('ce-id')}"
print(resp)
return (resp, 200)
# [END eventarc_pubsub_handler]
# [START eventarc_pubsub_server]
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0', port=int(os.environ.get('PORT', 8080)))
# [END eventarc_pubsub_server]
|
{
"content_hash": "27f924f3beaa3bebe2f35f0c48f17398",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 25.829268292682926,
"alnum_prop": 0.6213408876298395,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "614dd5bc97134c60d498dee0ec2ffb9ad3923b6e",
"size": "1668",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "eventarc/pubsub/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
}
|
from app import settings
from tests.integration.integration_test_case import IntegrationTestCase
class TestApplicationVariablesNegative(IntegrationTestCase):
def setUp(self):
settings.EQ_DEV_MODE = True
settings.EQ_ENABLE_LIVE_RELOAD = False
super().setUp()
def test_flask_toolbar_is_not_displayed(self):
self.launchSurvey('0', 'star_wars')
self.assertStatusOK()
self.assertNotInBody('flDebugToolbarHandle')
def test_livereload_script_not_rendered(self):
self.launchSurvey('0', 'star_wars')
self.assertStatusOK()
self.assertFalse('__bs_script__' in self.getResponseData())
def test_google_analytics_code_is_present(self):
self.launchSurvey('0', 'star_wars')
self.assertStatusOK()
self.assertNotInHead('GoogleTagManagerObject')
|
{
"content_hash": "48893970c690c4aeb174340fa6f4641f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 71,
"avg_line_length": 33.8,
"alnum_prop": 0.6887573964497041,
"repo_name": "ONSdigital/eq-survey-runner",
"id": "3b37e2f7190337be8b03b7955f584062a306142f",
"size": "845",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/test_application_variables_negative.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "520"
},
{
"name": "HTML",
"bytes": "236859"
},
{
"name": "JavaScript",
"bytes": "423942"
},
{
"name": "Python",
"bytes": "1409591"
},
{
"name": "SCSS",
"bytes": "25858"
},
{
"name": "Shell",
"bytes": "10196"
}
],
"symlink_target": ""
}
|
import turtle
# Get user's input
line_length = input('Enter length: ')
degree = input('Enter degree: ')
# Validate user's input
try:
line_length = int(line_length)
if line_length == 0:
raise Exception('Length cannot be equal to 0.')
degree = int(degree)
except ValueError:
print('Invalid input!')
exit()
except Exception as error:
print(error)
exit()
# Configure the turtle
turtle.speed('fastest')
turtle.color('green')
# Infinity Drawing
while True:
turtle.right(degree)
turtle.forward(line_length)
|
{
"content_hash": "c154d60394f5f465a3956a5121ddbcf6",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 55,
"avg_line_length": 18.333333333333332,
"alnum_prop": 0.6727272727272727,
"repo_name": "Valka7a/python-playground",
"id": "3fe4b8a49094d5fed8c2cc2d35442f960fb390b6",
"size": "550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python-course-softuni/introduction-python3/lecture-one-excercises/ex4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "797"
},
{
"name": "HTML",
"bytes": "9695"
},
{
"name": "JavaScript",
"bytes": "6573"
},
{
"name": "Python",
"bytes": "1789753"
}
],
"symlink_target": ""
}
|
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# print('os.path.abs : ', os.path.abspath('../..'))
# sys.path.append(
# "/home/nacass/workspace/bigdatatrade/DataAPIsamplePy")
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'AlgorithmicTradingApiPython'
copyright = '2015, Nacass Tommy'
author = 'Nacass Tommy'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'DataAPIsamplePydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DataAPIsamplePy.tex', 'DataAPIsamplePy Documentation',
'Nacass Tommy', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'dataapisamplepy', 'DataAPIsamplePy Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DataAPIsamplePy', 'DataAPIsamplePy Documentation',
author, 'DataAPIsamplePy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "6e68c5aa5855b88ba54eb777bdd3e45e",
"timestamp": "",
"source": "github",
"line_count": 278,
"max_line_length": 79,
"avg_line_length": 32.719424460431654,
"alnum_prop": 0.7023966578715919,
"repo_name": "MoneyPush/AlgorithmicTradingApiPython",
"id": "4f5501e6b306a2c541f6aec9197adf612a8dc316",
"size": "9547",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5244"
}
],
"symlink_target": ""
}
|
import subprocess
import sys
import setup_util
import os
from os.path import expanduser
home = expanduser("~")
def start(args, logfile, errfile):
setup_util.replace_text("nawak/model_postgre.nim", "host=.* port=5432",
"host=" + args.database_host + " port=5432")
# compile the app
subprocess.check_call(
"nimrod c --threads:on -d:release -d:postgre_model --path:../installs/nawak/nawak -o:nawak_postgre app.nim",
shell=True, cwd="nawak", stderr=errfile, stdout=logfile)
# launch mongrel2
subprocess.check_call("mkdir -p run logs tmp", shell=True, cwd="nawak/conf", stderr=errfile, stdout=logfile)
subprocess.check_call("sudo m2sh load -config mongrel2.conf", shell=True, cwd="nawak/conf", stderr=errfile, stdout=logfile)
subprocess.check_call("sudo m2sh start -name test", shell=True, cwd="nawak/conf", stderr=errfile, stdout=logfile)
# launch workers
subprocess.Popen("./nawak_postgre", shell=True, cwd="nawak", stderr=errfile, stdout=logfile)
return 0
def stop(logfile, errfile):
ret = 0
try:
subprocess.check_call("sudo m2sh stop -every", shell=True, cwd="nawak/conf", stderr=errfile, stdout=logfile)
except:
ret = 1
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'nawak_postgre' in line:
try:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
except OSError:
ret = 1
return ret
|
{
"content_hash": "b9186721bae21b3efd5a7a3ec63bbe6b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 125,
"avg_line_length": 34.53488372093023,
"alnum_prop": 0.6727272727272727,
"repo_name": "ratpack/FrameworkBenchmarks",
"id": "99b461fafedd42be032d0b2e0ab335038b0bed5f",
"size": "1485",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "frameworks/Nimrod/nawak/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "838"
},
{
"name": "C",
"bytes": "39732"
},
{
"name": "C#",
"bytes": "128703"
},
{
"name": "C++",
"bytes": "402630"
},
{
"name": "CSS",
"bytes": "234858"
},
{
"name": "Clojure",
"bytes": "18787"
},
{
"name": "Dart",
"bytes": "35750"
},
{
"name": "Elixir",
"bytes": "1912"
},
{
"name": "Erlang",
"bytes": "7670"
},
{
"name": "Go",
"bytes": "35314"
},
{
"name": "Groovy",
"bytes": "15587"
},
{
"name": "Haskell",
"bytes": "8771"
},
{
"name": "Java",
"bytes": "264212"
},
{
"name": "JavaScript",
"bytes": "395155"
},
{
"name": "Lua",
"bytes": "7463"
},
{
"name": "MoonScript",
"bytes": "2204"
},
{
"name": "Nim",
"bytes": "32032"
},
{
"name": "PHP",
"bytes": "17587921"
},
{
"name": "Perl",
"bytes": "18774"
},
{
"name": "PowerShell",
"bytes": "35514"
},
{
"name": "Prolog",
"bytes": "317"
},
{
"name": "Python",
"bytes": "413446"
},
{
"name": "Racket",
"bytes": "5298"
},
{
"name": "Ruby",
"bytes": "73849"
},
{
"name": "Scala",
"bytes": "62267"
},
{
"name": "Shell",
"bytes": "114520"
},
{
"name": "Volt",
"bytes": "677"
}
],
"symlink_target": ""
}
|
class ZiplineError(Exception):
msg = None
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.message = str(self)
def __str__(self):
msg = self.msg.format(**self.kwargs)
return msg
__unicode__ = __str__
__repr__ = __str__
class WrongDataForTransform(ZiplineError):
"""
Raised whenever a rolling transform is called on an event that
does not have the necessary properties.
"""
msg = "{transform} requires {fields}. Event cannot be processed."
class UnsupportedSlippageModel(ZiplineError):
"""
Raised if a user script calls the override_slippage magic
with a slipage object that isn't a VolumeShareSlippage or
FixedSlipapge
"""
msg = """
You attempted to override slippage with an unsupported class. \
Please use VolumeShareSlippage or FixedSlippage.
""".strip()
class OverrideSlippagePostInit(ZiplineError):
# Raised if a users script calls override_slippage magic
# after the initialize method has returned.
msg = """
You attempted to override slippage outside of `initialize`. \
You may only call override_slippage in your initialize method.
""".strip()
class RegisterTradingControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set a trading control outside of `initialize`. \
Trading controls may only be set in your initialize method.
""".strip()
class RegisterAccountControlPostInit(ZiplineError):
# Raised if a user's script register's a trading control after initialize
# has been run.
msg = """
You attempted to set an account control outside of `initialize`. \
Account controls may only be set in your initialize method.
""".strip()
class UnsupportedCommissionModel(ZiplineError):
"""
Raised if a user script calls the override_commission magic
with a commission object that isn't a PerShare, PerTrade or
PerDollar commission
"""
msg = """
You attempted to override commission with an unsupported class. \
Please use PerShare or PerTrade.
""".strip()
class OverrideCommissionPostInit(ZiplineError):
"""
Raised if a users script calls override_commission magic
after the initialize method has returned.
"""
msg = """
You attempted to override commission outside of `initialize`. \
You may only call override_commission in your initialize method.
""".strip()
class TransactionWithNoVolume(ZiplineError):
"""
Raised if a transact call returns a transaction with zero volume.
"""
msg = """
Transaction {txn} has a volume of zero.
""".strip()
class TransactionWithWrongDirection(ZiplineError):
"""
Raised if a transact call returns a transaction with a direction that
does not match the order.
"""
msg = """
Transaction {txn} not in same direction as corresponding order {order}.
""".strip()
class TransactionWithNoAmount(ZiplineError):
"""
Raised if a transact call returns a transaction with zero amount.
"""
msg = """
Transaction {txn} has an amount of zero.
""".strip()
class TransactionVolumeExceedsOrder(ZiplineError):
"""
Raised if a transact call returns a transaction with a volume greater than
the corresponding order.
"""
msg = """
Transaction volume of {txn} exceeds the order volume of {order}.
""".strip()
class UnsupportedOrderParameters(ZiplineError):
"""
Raised if a set of mutually exclusive parameters are passed to an order
call.
"""
msg = "{msg}"
class BadOrderParameters(ZiplineError):
"""
Raised if any impossible parameters (nan, negative limit/stop)
are passed to an order call.
"""
msg = "{msg}"
class OrderDuringInitialize(ZiplineError):
"""
Raised if order is called during initialize()
"""
msg = "{msg}"
class AccountControlViolation(ZiplineError):
"""
Raised if the account violates a constraint set by a AccountControl.
"""
msg = """
Account violates account constraint {constraint}.
""".strip()
class TradingControlViolation(ZiplineError):
"""
Raised if an order would violate a constraint set by a TradingControl.
"""
msg = """
Order for {amount} shares of {asset} at {datetime} violates trading constraint
{constraint}.
""".strip()
class IncompatibleHistoryFrequency(ZiplineError):
"""
Raised when a frequency is given to history which is not supported.
At least, not yet.
"""
msg = """
Requested history at frequency '{frequency}' cannot be created with data
at frequency '{data_frequency}'.
""".strip()
class MultipleSymbolsFound(ZiplineError):
"""
Raised when a symbol() call contains a symbol that changed over
time and is thus not resolvable without additional information
provided via as_of_date.
"""
msg = """
Multiple symbols with the name '{symbol}' found. Use the
as_of_date' argument to to specify when the date symbol-lookup
should be valid.
Possible options:{options}
""".strip()
class SymbolNotFound(ZiplineError):
"""
Raised when a symbol() call contains a non-existant symbol.
"""
msg = """
Symbol '{symbol}' was not found.
""".strip()
class RootSymbolNotFound(ZiplineError):
"""
Raised when a lookup_future_chain() call contains a non-existant symbol.
"""
msg = """
Root symbol '{root_symbol}' was not found.
""".strip()
class SidNotFound(ZiplineError):
"""
Raised when a retrieve_asset() call contains a non-existent sid.
"""
msg = """
Asset with sid '{sid}' was not found.
""".strip()
class InvalidAssetType(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset with an invalid
AssetType.
"""
msg = """
AssetMetaData contained an invalid Asset type: '{asset_type}'.
""".strip()
class ConsumeAssetMetaDataError(ZiplineError):
"""
Raised when AssetFinder.consume() is called on an invalid object.
"""
msg = """
AssetFinder can not consume metadata of type {obj}. Metadata must be a dict, a
DataFrame, or a tables.Table. If the provided metadata is a Table, the rows
must contain both or one of 'sid' or 'symbol'.
""".strip()
class MapAssetIdentifierIndexError(ZiplineError):
"""
Raised when AssetMetaData.map_identifier_index_to_sids() is called on an
index of invalid objects.
"""
msg = """
AssetFinder can not map an index with values of type {obj}. Asset indices of
DataFrames or Panels must be integer sids, string symbols, or Asset objects.
""".strip()
class SidAssignmentError(ZiplineError):
"""
Raised when an AssetFinder tries to build an Asset that does not have a sid
and that AssetFinder is not permitted to assign sids.
"""
msg = """
AssetFinder metadata is missing a SID for identifier '{identifier}'.
""".strip()
class NoSourceError(ZiplineError):
"""
Raised when no source is given to the pipeline
"""
msg = """
No data source given.
""".strip()
class PipelineDateError(ZiplineError):
"""
Raised when only one date is passed to the pipeline
"""
msg = """
Only one simulation date given. Please specify both the 'start' and 'end' for
the simulation, or neither. If neither is given, the start and end of the
DataSource will be used. Given start = '{start}', end = '{end}'
""".strip()
class WindowLengthTooLong(ZiplineError):
"""
Raised when a trailing window is instantiated with a lookback greater than
the length of the underlying array.
"""
msg = (
"Can't construct a rolling window of length "
"{window_length} on an array of length {nrows}."
).strip()
class WindowLengthNotPositive(ZiplineError):
"""
Raised when a trailing window would be instantiated with a length less than
1.
"""
msg = (
"Expected a window_length greater than 0, got {window_length}."
).strip()
class InputTermNotAtomic(ZiplineError):
"""
Raised when a non-atomic term is specified as an input to an FFC term with
a lookback window.
"""
msg = (
"Can't compute {parent} with non-atomic input {child}."
)
class TermInputsNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = "{termname} requires inputs, but no inputs list was passed."
class WindowLengthNotSpecified(ZiplineError):
"""
Raised if a user attempts to construct a term without specifying inputs and
that term does not have class-level default inputs.
"""
msg = (
"{termname} requires a window_length, but no window_length was passed."
)
class BadPercentileBounds(ZiplineError):
"""
Raised by API functions accepting percentile bounds when the passed bounds
are invalid.
"""
msg = (
"Percentile bounds must fall between 0.0 and 100.0, and min must be "
"less than max."
"\nInputs were min={min_percentile}, max={max_percentile}."
)
class UnknownRankMethod(ZiplineError):
"""
Raised during construction of a Rank factor when supplied a bad Rank
method.
"""
msg = (
"Unknown ranking method: '{method}'. "
"`method` must be one of {choices}"
)
class AddTermPostInit(ZiplineError):
"""
Raised when a user tries to call add_{filter,factor,classifier}
outside of initialize.
"""
msg = (
"Attempted to add a new filter, factor, or classifier "
"outside of initialize.\n"
"New FFC terms may only be added during initialize."
)
class UnsupportedDataType(ZiplineError):
"""
Raised by FFC CustomFactors with unsupported dtypes.
"""
msg = "CustomFactors with dtype {dtype} are not supported."
class NoFurtherDataError(ZiplineError):
"""
Raised by calendar operations that would ask for dates beyond the extent of
our known data.
"""
# This accepts an arbitrary message string because it's used in more places
# that can be usefully templated.
msg = '{msg}'
class UnsupportedDatetimeFormat(ZiplineError):
"""
Raised when an unsupported datetime is passed to an API method.
"""
msg = ("The input '{input}' passed to '{method}' is not "
"coercible to a pandas.Timestamp object.")
class PositionTrackerMissingAssetFinder(ZiplineError):
"""
Raised by a PositionTracker if it is asked to update an Asset but does not
have an AssetFinder
"""
msg = (
"PositionTracker attempted to update its Asset information but does "
"not have an AssetFinder. This may be caused by a failure to properly "
"de-serialize a TradingAlgorithm."
)
|
{
"content_hash": "3e73ebeeaff88f3b67c869044c204622",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 79,
"avg_line_length": 27.015,
"alnum_prop": 0.6790671848972792,
"repo_name": "michaeljohnbennett/zipline",
"id": "8fd97ba52b01021dc66456a30a47f0f5c00f9d30",
"size": "11390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zipline/errors.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1354050"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import os
from os.path import join, exists
from subprocess import check_call
from citools.build import rename_template_files as _rename_template_files, replace_template_files, get_common_variables
from paver.easy import *
from paver.setuputils import _get_distribution
@task
@consume_args
@needs('unit', 'integrate')
def test():
""" Run whole testsuite """
def djangonize_test_environment(test_project_module):
sys.path.insert(0, options.rootdir)
sys.path.insert(0, join(options.rootdir, "tests"))
if exists(join(options.rootdir, "tests", test_project_module)):
sys.path.insert(0, join(options.rootdir, "tests", test_project_module))
os.environ['DJANGO_SETTINGS_MODULE'] = "%s.settings" % test_project_module
def run_tests(test_project_module, nose_args, nose_run_kwargs=None):
djangonize_test_environment(test_project_module)
import nose
os.chdir(join(options.rootdir, "tests", test_project_module))
argv = ["--with-django"] + nose_args
nose_run_kwargs = nose_run_kwargs or {}
nose.run_exit(
argv = ["nosetests"] + argv,
defaultTest = test_project_module,
**nose_run_kwargs
)
@task
@consume_args
def unit(args, nose_run_kwargs=None):
""" Run unittests """
run_tests(test_project_module="unit_project", nose_args=args, nose_run_kwargs=nose_run_kwargs)
@task
@consume_args
def integrate(args, nose_run_kwargs=None):
""" Run integration tests """
run_tests(test_project_module="example_project", nose_args=["--with-selenium", "--with-djangoliveserver"]+args, nose_run_kwargs=nose_run_kwargs)
@task
@consume_args
def integrate_project(args):
""" Run integration tests """
djangonize_test_environment(options.project_module)
os.chdir(join(options.rootdir, "tests"))
import nose
nose.run_exit(
argv = ["nosetests", "--with-django", "--with-selenium", "--with-djangoliveserver"]+args,
defaultTest = "tests"
)
@task
def install_dependencies():
sh('pip install -r requirements.txt')
@task
def bootstrap():
options.virtualenv = {'packages_to_install' : ['pip']}
call_task('paver.virtual.bootstrap')
sh("python bootstrap.py")
path('bootstrap.py').remove()
print '*'*80
if sys.platform in ('win32', 'winnt'):
print "* Before running other commands, You now *must* run %s" % os.path.join("bin", "activate.bat")
else:
print "* Before running other commands, You now *must* run source %s" % os.path.join("bin", "activate")
print '*'*80
@task
@needs('citools.paver.install_dependencies')
def prepare():
""" Prepare complete environment """
@task
def bump():
"""
Bump most-minor tagged version. Assumes git.
Bump is completed only since last release. This is assumed to have
$projectname-[digit]* format. If not, it shall be configured
as options.release_tag_format.
"""
if getattr(options, "release_tag_format", False):
format = release_tag_format
else:
format = "%s-[0-9]*" % options.name
from citools.version import get_git_describe, compute_version
version = compute_version(get_git_describe(accepted_tag_pattern=format))
new_version = list(version[:-1])
new_version[len(new_version)-1] += 1
tag = options.name + "-" + ".".join(map(str, new_version))
sh('git tag -a %s -m "paver bump to version %s"' % (tag, tag))
@task
@cmdopts([
('accepted-tag-pattern=', 't', 'Tag pattern passed to git describe for version recognition'),
])
def compute_version_git(options):
from citools.version import get_git_describe, compute_version, get_branch_suffix, retrieve_current_branch
if not getattr(options, "accepted_tag_pattern", None):
options.accepted_tag_pattern = "%s-[0-9]*" % options.name
dist = _get_distribution()
current_git_version = get_git_describe(accepted_tag_pattern=options.accepted_tag_pattern)
branch_suffix = get_branch_suffix(dist.metadata, retrieve_current_branch())
options.version = compute_version(current_git_version)
dist.metadata.version = options.version_str = '.'.join(map(str, options.version))
dist.metadata.branch_suffix = options.branch_suffix = branch_suffix
print options.version_str
@task
def compute_version_git_datetime(options):
from citools.version import get_git_head_tstamp, get_branch_suffix, retrieve_current_branch
tstamp = int(get_git_head_tstamp())
if not tstamp:
raise Exception("Git log parsing error")
commit_dtime = datetime.fromtimestamp(tstamp)
commit_version = commit_dtime.strftime("%Y.%m.%d.%H%M")#.split('.')
dist = _get_distribution()
branch_suffix = get_branch_suffix(dist.metadata, retrieve_current_branch())
options.version = commit_version
dist.metadata.version = commit_version
dist.metadata.branch_suffix = options.branch_suffix = branch_suffix
print options.version
@task
@needs('compute_version_git')
def compute_version(options):
pass
@task
def update_debian_version(options):
from citools.debian.commands import update_debianization
update_debianization(options.version)
@task
def replace_version(options):
from citools.version import replace_inits, replace_scripts, replace_version_in_file
replace_inits(options.version, options.packages)
# replace_scripts(options.version, options.py_modules)
replace_version_in_file(options.version, 'setup.py')
if os.path.exists('pavement.py'):
replace_version_in_file(options.version, 'pavement.py')
@task
def build_debian_package(options):
check_call(['dpkg-buildpackage', '-rfakeroot-tcp', '-us', '-uc'])
@task
#@needs(['create_debian_package'])
@cmdopts([
('ftp-host=', 'o', 'FTP host (for debian package upload)'),
('ftp-port=', 'p', 'FTP port (for debian package upload)'),
('ftp-user=', 'u', 'FTP username (for debian package upload)'),
('ftp-password=', 'w', 'FTP password (for debian package upload)'),
('ftp-directory=', 'd', 'FTP directory (in which to packages directories are) (for debian package upload)'),
('forgive-no-packages', 'n', 'It is OK to upload even if there are no packages'),
])
def upload_debian_package(options):
import os
from ftplib import FTP
from citools.debian.commands import get_packages_names, get_package_path
from citools.ftp import upload_package
packages = get_packages_names()
if len(packages) == 0:
raise ValueError("Not uploading: no package recognized")
if not getattr(options, "version_str", None):
call_task("compute_version")
print u"Uploading packages %s" % packages
for package_name in packages:
package_path = get_package_path(package_name, options.name, current_version=options.version_str)
upload_package(options.ftp_host, options.ftp_user, options.ftp_password, \
options.ftp_directory.split("/"), package_path, package_name, port=getattr(options, "ftp_port", 21))
@task
def rename_template_files():
_rename_template_files(root_directory=os.curdir, variables=get_common_variables(_get_distribution()))
@task
def replace_templates():
replace_template_files(
root_directory=os.curdir,
variables=get_common_variables(_get_distribution()),
subdirs=getattr(options, "template_files_directories", None)
)
@task
@needs([
'compute_version',
'replace_version',
'replace_templates',
'rename_template_files',
'update_debian_version',
'build_debian_package'
])
def create_debian_package(options):
pass
@task
@cmdopts([
('host=', 'o', 'Buildmaster hostname'),
('port=', 'p', 'Buildmaster port'),
('branch=', 'b', 'Branch with change'),
])
def ping_buildmaster():
from citools.buildbots import buildbot_ping_git
from citools.version import retrieve_current_branch
if not getattr(options, "branch", None):
options.branch = retrieve_current_branch()
buildbot_ping_git(options.host, int(options.port), options.branch)
@task
@cmdopts([
('production-machine=', 'p', 'Production machine'),
('clean-machine=', 'c', 'Clean machine'),
('production-backend-machine=', 'b', 'Production backend machine'),
('enabled-architectures=', 'a', 'Enabled architectures')
])
def install_production_packages(options):
production_machine = getattr(options, "production_machine", None)
clean_machine = getattr(options, "clean_machine")
production_backend_machine = getattr(options, "production_backend_machine", None)
enabled_architectures = getattr(options, "enabled_architectures", None)
fabfile_name = getattr(options, "fabfile_name", '')
# import your fabfile
if fabfile_name != '':
fabfile = import_fabfile(fabfile_name)
else:
fabfile = import_fabfile()
# invoke fabric task
args = (clean_machine, production_machine, production_backend_machine, enabled_architectures)
options.packages_list = fab(clean_machine,
fabfile['install_production_packages'],
resolve,
args
)
@task
@cmdopts([
('preproduction-machine=', 'r', 'Preproduction machine'),
('unwanted-packages=', 'n', 'Unwanted packages'),
('section-packages=', 's', 'Enabled packages section'),
('disable-urls=', 'l', 'Disable urls for debian repo')
])
@needs('install_production_packages')
def execute_diff_packages(options):
preproduction_machine = getattr(options, "preproduction_machine")
unwanted_packages = getattr(options, "unwanted_packages", '')
section_packages = getattr(options, "section_packages", ".*")
disable_urls = getattr(options, "disable_urls", '')
fabfile_name = getattr(options, "fabfile_name", '')
# import your fabfile
if fabfile_name != '':
fabfile = import_fabfile(fabfile_name)
else:
fabfile = import_fabfile()
# invoke fabric task
args = (options.packages_list, unwanted_packages, section_packages, disable_urls)
options.diff_packages_list = fab(preproduction_machine,
fabfile['execute_diff_packages'],
resolve,
args
)
@task
@cmdopts([
('project=', 'j', 'Project'),
('project-version=', 'v', 'Project version'),
('project-config=', 'f', 'Project config'),
('project-only=', 'o', 'Project packages only'),
('prompt-type=', 'e', 'Type of prompt for selecting packages')
])
@needs('execute_diff_packages')
def download_diff_packages(options):
clean_machine = getattr(options, "clean_machine")
project = getattr(options, "project")
project_version = getattr(options, "project_version", '')
project_config = getattr(options, "project_config", True)
project_only = getattr(options, "project_only", 'no')
prompt_type = getattr(options, "prompt_type", 'b')
fabfile_name = getattr(options, "fabfile_name", '')
# import your fabfile
if fabfile_name != '':
fabfile = import_fabfile(fabfile_name)
else:
fabfile = import_fabfile()
# invoke fabric task
args = (options.diff_packages_list, project, project_version, project_config, project_only, prompt_type)
options.packages_for_upload = fab(clean_machine,
fabfile['download_diff_packages'],
resolve,
args
)
@task
@cmdopts([
('domain-username=', 'd', 'Domain username'),
('directory-structure=', 't', 'Directory structure for upload packages'),
('upload-url=', 'u', 'Url for upload')
])
@needs('download_diff_packages')
def upload_packages(options):
clean_machine = getattr(options, "clean_machine")
domain_username = getattr(options, "domain_username", '')
upload_url = getattr(options, "upload_url", '')
directory_structure = getattr(options, "directory_structure", '')
fabfile_name = getattr(options, "fabfile_name", '')
# import your fabfile
if fabfile_name != '':
fabfile = import_fabfile(fabfile_name)
else:
fabfile = import_fabfile()
# invoke fabric task
args = (options.packages_for_upload,)
kwargs = { "rdir" : directory_structure, "upload_url" : upload_url, "domain_username" : domain_username }
fab(clean_machine, fabfile['upload_packages'], resolve, args, kwargs)
# fabric wrapper snippets
def resolve(host):
"""write similar function for eg: resolving from aws or ssh_config"""
from fabric.main import find_fabfile, load_fabfile
from fabric.network import normalize
from fabric import state
return (host,) + normalize(host)
def fab(host, cmd, resolve=resolve, args=(), kwargs={}):
"""call one fabric task"""
from fabric.main import find_fabfile, load_fabfile
from fabric.network import normalize
from fabric import state
host_string, username, hostname, port = resolve(host)
state.env.host_string = host_string
state.env.host = hostname
state.env.user = username
state.env.port = port
return cmd(*args, **kwargs)
def import_fabfile(fabfile='fabfile.py'):
""" you have to call this first to enable fabric tasks"""
from fabric.main import find_fabfile, load_fabfile
from fabric.network import normalize
from fabric import state
state.env.fabfile = fabfile
_, fabfile = load_fabfile(find_fabfile())
return fabfile
@task
@needs('paver.doctools.html')
def publish_docs(options):
"""Build documentation and move it into docroot"""
builtdocs = path("docs") / options.sphinx.builddir / "html"
if getattr(options, "docroot", None):
destdir = options.docroot
else:
destdir = path(getattr(options, "docroot", '/big/docs/')) / options.name
if getattr(options, "doc_use_branch_dir", False):
from citools.version import retrieve_current_branch
branch = retrieve_current_branch()
if branch != getattr(options, "doc_root_branch", "automation"):
destdir = destdir / "branches" / branch
destdir.rmtree()
builtdocs.move(destdir)
destdir.chmod(getattr(options, "doc_dir_chmod", 0777))
for dirpath, dirnames, filenames in os.walk(destdir):
for d in dirnames:
os.chmod(join(dirpath, d), getattr(options, "doc_dir_chmod", 0777))
for f in filenames:
os.chmod(join(dirpath, f), getattr(options, "doc_file_chmod", 0444))
|
{
"content_hash": "00d118065e61374106f569e32c003c9d",
"timestamp": "",
"source": "github",
"line_count": 428,
"max_line_length": 148,
"avg_line_length": 33.399532710280376,
"alnum_prop": 0.6728226652675761,
"repo_name": "ella/citools",
"id": "4c558e25d79a6c3421748941ba8341bda9d047be",
"size": "14295",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "citools/pavement.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "208730"
},
{
"name": "Shell",
"bytes": "3077"
}
],
"symlink_target": ""
}
|
"""Helper class for implementing a beam search decoder.
Individual models just need to provide a few callback functions.
"""
import collections
import re
import lingvo.compat as tf
from lingvo.core import base_layer
from lingvo.core import ops
from lingvo.core import py_utils
from tensorflow.python.ops import inplace_ops
# TODO(yonghui):
# 1) Change the tensor shape [max_decoder_time_steps, batch_size *
# num_hyps_per_beam] to [max_decoder_time_steps, num_hyps_per_beam,
# batch_size] to avoid confusing and mis-interpretation of the results.
# Defines a namedtuple to store the results of BeamSearchDecode. It contains
# the following entries:
# done_hyps: A string Tensor of shape
# [max_decoder_time_steps, batch_size * num_hyps_per_beam] which can be
# either an empty string, or a serialized Hypothesis proto. The non-empty
# hyps in done_hyps are terminated hypotheses. The 'h'-th hyp for sample
# 'b' at time step 't' can be found at done_hyps[t, batch_size * h + b].
# topk_hyps: A string Tensor of shape [batch_size, num_hyps_per_beam].
# topk_hyps[b, h] is the h-th hypothesis for the sample 'b' in the
# batch, which can either be an empty string or a serialized Hypothesis
# proto.
# topk_ids: Int32 Tensor of shape [batch_size * num_hyps_per_beam,
# target_seq_len] which contains the IDs of the targets in each of the
# hypotheses in the beam for the samples in the batch. For sample
# 'b' in the batch, the h-th hypothesis for this sample can be found at
# position [b * num_hyps_per_beam + h, :].
# topk_lens: Int32 Tensor of shape [batch_size * num_hyps_per_beam] which
# indicates the length (>=0) of each of the hypotheses.
# topk_scores: Float32 Tensor of shape [batch_size, num_hyps_per_beam]
# containing the scores (negative log probabilities) of each of the
# hypotheses in the beam.
# topk_decoded: A string Tensor of shape [batch_size * num_hyps_per_beam] which
# contains the decoded target strings in each of the hypotheses in the
# beam for the samples in the batch. The 'h'-th hyp for sample 'b' can
# be found at topk_decoded[b * num_hyps_per_beam + h]
# alignment: (optionally) A dict with the following field:
# * 'alignment_interval': tensor in shape
# [batch_size * num_hyps_per_beam, max_src_len, 2], where its
# [b, k, 0] indicates the start frame of k-th token (inc.),
# and [b, k, 1] is the start frame of k+1-th token or the last frame + 1.
# * 'alignment_indicator': a bool tensor in shape
# [batch_size * num_hyps_per_beam, max_src_len]; if [b, t]=True,
# It means a token is emitted in that position.
BeamSearchDecodeOutput = collections.namedtuple(
'BeamSearchDecodeOutput',
[
'topk_hyps', 'topk_ids', 'topk_lens', 'topk_scores', 'topk_decoded',
'other_states', 'topk_alignment'
],
)
# Make the last 2 attribute default to None and an empty dict
BeamSearchDecodeOutput.__new__.__defaults__ = (None, {})
# Keys in fusion state that can be two dimensional, with the batch element in
# the second dimension, requiring special treatment in hypothesis reordering.
POSSIBLY_TIME_MAJOR_STATE_KEYS = [
'misc_states.fusion_states.lm_states.prev_ids',
'misc_states.fusion_states.lm_states.prev_paddings',
'fusion_states.lm_states.prev_ids',
'fusion_states.lm_states.prev_paddings',
]
class BeamSearchSharedParams(base_layer.BaseLayer):
"""Class defining common beam search params."""
@classmethod
def Params(cls):
p = super().Params()
p.Define('num_hyps_per_beam', 8,
'Num of hyps to keep per beam during decoding.')
p.Define(
'target_seq_length_ratio', 1.0,
'Ratio of the average target sequence length over the average '
'source sequence length. Affects coverage penalty.')
p.Define(
'length_normalization', 0.0,
'Beam search length normalization factor, typically in [0, 1]. '
'This is the exponent on (len+5)/5 used to normalize '
'global score. The larger this value is, the more likely '
'longer sequences are produced. This value is alpha '
'in https://arxiv.org/abs/1609.08144, equation 14.')
p.Define(
'coverage_penalty', 0.0,
'Beam search coverage penalty. This value is beta in '
'https://arxiv.org/abs/1609.08144, equation 14. The higher this '
'value is, the more heavily low coverage is penalized.')
p.Define(
'valid_eos_max_logit_delta', 5.0,
'During beam search, allow </s> to terminate a hyp only if its '
'logit is no more than than this value away from the logit of the '
'best candidate. The larger this value is, the easier hyps can '
'terminate, and the more likely shorter sequences are produced.')
p.Define(
'local_eos_threshold', -100.0,
'During beam search, allow </s> to terminate a hyp if the local score '
'for </s> is greater than local_eos_threshold.')
p.Define(
'beam_size', 3.0,
'The maximum difference between best hyp and the worst in a beam.'
' This allows to prune our search when none of the active hyp is'
' close enough to the current best.')
p.Define('target_sos_id', 1, 'Id of the start of sentence token.')
p.Define('target_eos_id', 2, 'Id of the end of sentence token.')
p.Define(
'target_eoc_id', -1,
'Id of the end of chunk token. Used by neural transducer only.'
' Set this id to a non-negative value only for NT.')
p.Define(
'target_seq_len', 0, 'Maximum allowed target seq length. Note '
'this parameter is often used to determine the maximum number of '
'decode steps. For example, for the LAS model which does not uses eoc, '
'decoding terminates if an end of sentence (eos) token is not emitted '
'after target_seq_len decode steps. For the RNN-T model which does use '
'eoc, decoding terminates if an terminal token (eos or last frame eoc) '
'is not emitted after source_seq_len + target_seq_len.')
p.Define(
'merge_paths', False, 'If true, hyps which are identical when '
'epsilons are removed will be combined into a single hyp. The '
'probability for that combined hyp will be the sum of the '
'probabilities of the component hyps. This can only be applied '
'for epsilon-emitting models (RNN-T and NT).')
p.Define(
'force_eos_in_top_k', False,
'Whether to always consider the eos token to be among the top k tokens '
'for every step. When False, hyps can only terminate if the eos token '
'is part of the top k. Note that p.valid_eos_max_logit_delta and '
'p.local_eos_threshold always apply regardless of this.')
p.Define(
'force_last_chunk_eoc_in_top_k', False,
'Whether to always consider the last chunk eoc token to be among the '
'top k tokens. This is effective only when decoding has reached the '
'last frame of input. When True, hyps can terminate at the last frame '
'by eoc even if the eoc score is not high enough to enter the top k. '
'Note that p.valid_eos_max_logit_delta and p.local_eos_threshold '
'always apply regardless of this.')
p.Define(
'batch_major_state', True, 'If True, we use batch as the major '
'dimension of the hyp states. Otherwise, timing becomes the major '
'dimension, and the gathers are performed along the second-to-major '
'dimension.')
p.Define(
'batch_major_compute', False, 'If True, the target batch dimension '
'is organized as num_beams by num_hyps_per_beam during the '
'ExtendStep computation and the cache is stored following this order. '
'So the topk indices into the cache for ReOrderHyps needs to be '
'reordered before usage. Otherwise, the indices will be directly used '
'without extra transformation. '
'Setting batch_major_compute=True does not change the ordering of '
'ids and logits of beam search callbacks. '
'The target_batch dim for those tensors will remain num_hyps_per_beam '
'* num_beams.')
p.Define(
'short_seq_limit', 0,
'An integer, the sequence length limit for using early stop '
'method in attention layer (batch-major implementation). The sequence '
'is always treated as the default long sequence for decoding when the '
'limit is set to 0. For typical mt transformer config '
'(batch 16, sequence length 150), the break even point is around 40 '
'on TPU V3, and 50 on TPU V2. This may slightly change for '
'different batch size and sequence length, which requires more '
'experiments to set the value.')
p.Define(
'terminate_beams_independently', False,
'Whether each beam in the same batch can independently terminate. '
'This controls whether the search termination criteria set by params '
'like `p.beam_size` or `p.ensure_full_beam` are applied collectively '
'to all beams, or individually to each beam. When False, all beams '
'continue the search until each and every beam meets the termination '
'criteria. When True, each beam individually, independent of each '
'other, decides whether to terminate the search.')
return p
class BeamSearchHelper(BeamSearchSharedParams):
"""Helper class for performing beam search.
The user of this helper class needs to implement three callbacks.
This callback is called once only at the beginning of beam search:
.. code-block:: none
def InitBeamSearchState(theta, encoder_outputs, num_hyps_per_beam):
Args:
theta: A NestedMap object containing weights' values of this layer and
its children layers.
encoder_outputs: A NestedMap computed by encoder.
num_hyps_per_beam: An int, number hyps to keep for source sentence.
Returns:
A tuple (initial_results, states):
- initial_results: a `.NestedMap` of initial results. It must contain
the 'atten_probs' and 'log_probs' tensors. Optionally it may
contain 'step_ids'.
- log_probs: The initial log probs for each of the tokens in the
target vocab of shape [num_hyps_per_beam * src_batch, vocab_size].
src_batch "b" and hyp_per_beam "h" is represented at index
``(h * src_batch + b)``.
- atten_probs: The initial attention probs, of shape
[num_hyps_per_beam * src_batch, src_len]. src_batch "b" and
hyp_per_beam "h" is represented at index ``(h * src_batch + b)``.
- step_ids: Optional. The initial ids of shape [num_hyps_per_beam *
src_batch, 1] for which to start the beam search. src_batch "b"
hyp_per_beam "h" is represented at index ``(h * src_batch + b)``.
If not specified, defaults to a tensor filled with target_sos_id.
- states: a `.NestedMap` of tensors representing states that the
client would like to keep track of for each hyp.
This callback is called once every decoding time step before beam_search_step
is called:
.. code-block:: none
def PreBeamSearchStepCallback(theta,
encoder_outputs,
step_ids,
in_states,
num_hyps_per_beam,
cur_step):
Args:
theta: A NestedMap object containing weights' values of this layer and
its children layers.
encoder_outputs: A NestedMap computed by encoder.
step_ids: A tensor of shape [num_hyps_per_beam * src_batch, 1].
in_states: A `.NestedMap` of tensors representing states that the
clients would like to keep track of for each of the active hyps.
cur_step: Current step id.
Returns:
A tuple (results, out_states):
- results: A `.NestedMap` of beam search results. It should contain
the 'atten_probs' and 'log_probs' tensors at the minimal.
Optionally it may contain 'is_last_chunk' if it is decoding a
neural transducer model.
- atten_probs: The updated attention probs, of shape
[num_hyps_per_beam * src_batch, src_len]. src_batch "b" and
hyp_per_beam "h" is represented at index ``(h * src_batch + b)``.
- log_probs: Log prob for each of the tokens in the target vocab.
This is of shape [num_hyps_per_beam * src_batch, vocab_size].
src_batch "b" and hyp_per_beam "h" is represented at index
``(h * src_batch + b)``.
- is_last_chunk: Whether each of the hyp is at the end of a chunk.
If non-empty, it has shape [num_hyps_per_beam * src_batch].
- out_states: A `.NestedMap`. The updated states. This 'out_states'
should be of the exact same structure as 'in_states'
This callback is called once every decoding time step after beam_search_step
is called:
.. code-block:: none
def PostBeamSearchStepCallback(theta,
encoder_outputs,
new_step_ids,
other_states):
Args:
theta: A NestedMap object containing weights' values of this layer and
its children layers.
encoder_outputs: A NestedMap computed by encoder.
new_step_ids: Token ids for the next beam search step.
other_states: A `.NestedMap`.
Returns:
final_states, A `.NestedMap`.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define(
'allow_empty_terminated_hyp', True, 'Whether it is okay to consider a '
'hyp that consists only of epsilons as terminated. By default this '
'is true, as an utterance may consist of silence. It should be set '
'to false when EMBR training epsilon-emitting models (e.g., RNN-T), '
'which are prone to emit all-epsilon hyps even in the presence of '
'speech. Note that a hyp that terminates in EOS is not considered '
'empty, so this flag has no effect for non-epsilon-emitting models.')
p.Define(
'ensure_full_beam', False, 'If True, we will not terminate the search '
'until both of these conditions are satisfied: we have found at least '
'num_hyps_per_beam terminated hyps AND no active hyps have a score '
'within beam_size of the best terminated hyp. If False, only the '
'second condition must be satisfied. Note that in either case, we can '
'also terminate if we have run for target_seq_len steps. Generally '
'this should be False unless beam search is being run as part of '
'minimum word error rate training.')
p.Define(
'force_eos_in_last_step', False,
'For all active hyps that are still on the beam after target_seq_len '
'steps, return partial hyps with EOS set as the last token.')
p.Define(
'atten_vecs_in_hypothesis_protos', True,
'Whether to write atten_vecs fields in the Hypothesis protos. Setting '
'this to False saves memory, and can be used when the protos become '
'too large for long sequences, but requires p.coverage_penalty == 0.0.')
p.Define(
'merged_topk_buffer_size_factor', 2,
'The buffer size factor when pruning the per hyp top-k extensions to '
'form the per beam top-k extensions. If this factor is set to be '
'greater than or equal to num_hyps_per_beam + 2 when eoc_id >= 0, '
'there will be no pruning before all possible path mergings are '
'performed (if merge_paths=True). To be memory efficient (i.e., to '
'maintain less hyps during pruning), a reasonable value is 2.')
p.Define(
'reorder_tarzan_states', False,
'A flag to turn on the special state reordering logic for Tarzan LM '
'model.')
p.name = 'beam_search'
return p
def __init__(self, params):
super().__init__(params)
p = self.params
self._model_uses_eoc_id = p.target_eoc_id >= 0
if not p.atten_vecs_in_hypothesis_protos and p.coverage_penalty != 0.0:
raise ValueError('p.atten_vecs_in_hypothesis_protos requires '
'p.coverage_penalty == 0.0.')
if p.force_eos_in_top_k and p.force_last_chunk_eoc_in_top_k:
raise ValueError('Currently do not support both force_eos_in_top_k '
'and force_last_chunk_eoc_in_top_k being True.')
def _BeamSearchStep(self, theta, encoder_outputs, cur_step, step_ids,
core_bs_states, other_states, num_hyps_per_beam,
pre_beam_search_step_callback,
post_beam_search_step_callback):
"""Extend beam search hyps for one step.
| num_beams = Number of source sequences to be decoded.
| num_hyps_per_beam = Number of hyps to keep per source sequence.
| num_hyps = num_beams * num_hyps_per_beam
| src_seq_len = Number of time steps in the source sequence.
| src_batch = Number of examples in the source sequence.
| tgt_seq_len = Maximum allowed time steps in the target sequence.
| tgt_batch = num_hyps_per_beam * src_batch
Args:
theta: A `.NestedMap` object containing weights' values of the decoder
layer and its children layers.
encoder_outputs: A `.NestedMap` containing encoder outputs to be passed to
the callbacks.
cur_step: A scalar int tensor, the current time step, 0-based.
step_ids: An int tensor of shape [num_hyps, 1]. The input ids to the
current search step.
core_bs_states: A tuple of core beam search states. This list is
maintained by this helper class.
other_states: A `.NestedMap` of other beam search states. This
`.NestedMap` is managed and updated by the client. It is expected that
each of its member tensors are of rank >= 1. t[i, ...] is the state of
the i-th hyp at the beginning of this search step.
num_hyps_per_beam: Num of hyps to keep per beam.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
See class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
See class header comments for more details.
Returns:
A tuple of following elements for the next beam search step,
(next step, all_done, step_ids, core_bs_states, other_states)
"""
p = self.params
bs_results, other_states = pre_beam_search_step_callback(
theta, encoder_outputs, step_ids, other_states, num_hyps_per_beam,
cur_step)
(best_scores, cumulative_scores, in_scores, in_hyps, in_prev_hyps,
in_done_hyps, in_atten_probs, in_beam_done) = core_bs_states
(out_best_scores, out_cumulative_scores, out_scores, out_hyps,
out_prev_hyps, out_done_hyps, out_atten_probs, out_beam_done,
all_done) = ops.beam_search_step(
tf.cast(bs_results.log_probs, dtype=p.dtype),
tf.cast(bs_results.atten_probs, dtype=p.dtype),
best_scores,
cumulative_scores,
in_scores,
in_hyps,
in_prev_hyps,
in_done_hyps,
in_atten_probs,
in_beam_done,
bs_results.is_last_chunk if self._model_uses_eoc_id else [],
cur_step,
eoc_id=p.target_eoc_id,
eos_id=p.target_eos_id,
beam_size=p.beam_size,
num_hyps_per_beam=num_hyps_per_beam,
valid_eos_max_logit_delta=p.valid_eos_max_logit_delta,
merge_paths=p.merge_paths,
allow_empty_terminated_hyp=p.allow_empty_terminated_hyp,
ensure_full_beam=p.ensure_full_beam,
force_eos_in_last_step=p.force_eos_in_last_step,
force_eos_in_top_k=p.force_eos_in_top_k,
force_last_chunk_eoc_in_top_k=p.force_last_chunk_eoc_in_top_k,
merged_topk_buffer_size_factor=p.merged_topk_buffer_size_factor,
local_eos_threshold=p.local_eos_threshold,
beam_independence=p.terminate_beams_independently,
atten_vecs_in_hypothesis_protos=p.atten_vecs_in_hypothesis_protos)
new_step_ids = tf.reshape(out_hyps[cur_step, :], tf.shape(step_ids))
new_step_ids.set_shape(step_ids.get_shape())
# [num_hyps_per_beam * num_beams].
old_hyp_ids = tf.reshape(
tf.slice(out_prev_hyps, begin=[cur_step, 0], size=[1, -1]), [-1])
if p.batch_major_compute:
# Transformed the indices into the key/value cache for fast decoding
# (prefix_states in other_states) due to the num_hyps dimension of
# cache is computed as num_beams by num_hyps_per_beam, which is different
# from the old_hyp_ids assumption (num_hyps_per_beam by num_beams).
# Both transpose and recomputation are required to correct the indices.
num_beams = tf.shape(best_scores)[0]
# [num_beams * num_hyps_per_beam].
old_hyp_ids_in_cache_order = tf.reshape(
tf.transpose(tf.reshape(old_hyp_ids, [num_hyps_per_beam, -1])), [-1])
old_hyp_ids_in_cache_order = (
(old_hyp_ids_in_cache_order % num_beams) * num_hyps_per_beam +
old_hyp_ids_in_cache_order // num_beams)
new_bs_states = (out_best_scores, out_cumulative_scores, out_scores,
out_hyps, out_prev_hyps, out_done_hyps, out_atten_probs,
out_beam_done)
random_seed_regex = re.compile(r'rnn_states\[\d+\].r$')
tarzan_constant_regex = re.compile(
r'fusion_states.lm_states.*dec_self_attention.*')
# Special handling of Tarzan tgt_mask.
tarzan_mask_regex = re.compile(r'fusion_states.lm_states.tgt_mask$')
def ReOrderHyps(key, x_in):
"""Reorders x_in based on prev hyp ids."""
if random_seed_regex.match(key):
# For keys like rnn_states[0].r, it is a shape [2] random seeds tensor
# used for deterministic behavior and should not be reordered.
return py_utils.HasShape(x_in, [2])
if p.reorder_tarzan_states and tarzan_constant_regex.match(key):
# A group of tarzan LM states which need to be kept constant.
tf.logging.info('Not reorder: %s', key)
return x_in
correct_old_hyp_ids = (
old_hyp_ids_in_cache_order if p.batch_major_compute else old_hyp_ids)
if (isinstance(x_in, tf.Tensor) and x_in.shape.ndims):
if p.reorder_tarzan_states and tarzan_mask_regex.match(key):
# Special handling of flat_beam_search tgt_mask.
assert p.batch_major_state, 'Tarzan LM states should be batch major.'
tgt_mask = x_in
num_beams = tf.shape(best_scores)[0]
# prev_hyp: [beam, num_hyps_per_beam] --> [num_hyps_per_beam, beam]
prev_hyp = tf.transpose(
tf.reshape(old_hyp_ids, [num_hyps_per_beam, num_beams]))
# tgt_mask: [beam, num_hyps_per_beam, num_hyps_per_beam*time]
# j=k=num_hyps_per_beam
tgt_mask = tf.einsum('bkt,bjk->bjt', tgt_mask,
tf.one_hot(prev_hyp, num_hyps_per_beam))
# Extend to the next step
buf_size = py_utils.GetShape(tgt_mask)[-1]
t = cur_step + 1
tgt_mask += tf.one_hot(
tf.range(num_hyps_per_beam) + t * num_hyps_per_beam, buf_size)
x_out = tgt_mask
else:
if x_in.shape.ndims > 2 and not p.batch_major_state:
# Use corrected indices only here for batch major compute as
# key/value caches are the states being affected.
x_out = tf.gather(x_in, correct_old_hyp_ids, axis=1)
elif key in POSSIBLY_TIME_MAJOR_STATE_KEYS:
x_out = tf.gather(x_in, old_hyp_ids, axis=-1)
else:
x_out = tf.gather(x_in, correct_old_hyp_ids)
x_out.set_shape(x_in.get_shape())
return x_out
else:
return x_in
new_other_states = other_states.TransformWithKey(ReOrderHyps)
final_other_states = post_beam_search_step_callback(theta, encoder_outputs,
new_step_ids,
new_other_states)
return (cur_step + 1, all_done, new_step_ids, new_bs_states,
final_other_states)
def BeamSearchDecode(self,
theta,
encoder_outputs,
num_hyps_per_beam_override=0,
init_beam_search_state=None,
pre_beam_search_step_callback=None,
post_beam_search_step_callback=None,
max_steps=None):
"""Performs beam-search based decoding.
Args:
theta: A NestedMap object containing weights' values of the decoder layer
and its children layers.
encoder_outputs: A NestedMap containing encoder outputs to be passed to
the callbacks. Mostly opaque to BeamSearchHelper, except that it should
contain either a 'seq_lengths' field of shape [source_batch_size] or
a 'paddings' field of shape [source_max_lengths, source_batch_size].
num_hyps_per_beam_override: If set to a value <= 0, this parameter is
ignored. If set to a value > 0, then this value will be used to override
`p.num_hyps_per_beam`.
init_beam_search_state: The `InitBeamSearchState` callback. Please refer
to the class header comments for more details.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
max_steps: maximum beam search steps. If None, use
self.params.target_seq_len.
Returns:
A `BeamSearchDecodeOutput`.
"""
p = self.params
num_hyps_per_beam = p.num_hyps_per_beam
if num_hyps_per_beam_override > 0:
num_hyps_per_beam = num_hyps_per_beam_override
if max_steps is None:
max_steps = p.target_seq_len
initial_results, other_states = init_beam_search_state(
theta, encoder_outputs, num_hyps_per_beam)
num_hyps = tf.shape(initial_results.log_probs)[0]
num_beams = num_hyps // num_hyps_per_beam
if 'step_ids' in initial_results:
# [num_hyps, 1]
step_ids = tf.ensure_shape(initial_results.step_ids, [None, 1])
else:
step_ids = tf.fill([num_hyps, 1],
tf.constant(p.target_sos_id, dtype=tf.int32))
min_score = -1e36
best_scores = (tf.zeros(shape=[num_beams], dtype=p.dtype) + min_score)
cumulative_scores = tf.zeros(shape=[num_hyps], dtype=p.dtype)
in_scores = tf.zeros([max_steps, num_hyps], dtype=p.dtype)
in_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.int32)
in_prev_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.int32)
in_done_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.string)
bs_atten_probs = tf.zeros(
[max_steps, num_hyps,
tf.shape(initial_results.atten_probs)[1]],
dtype=p.dtype)
beam_done = tf.zeros([num_beams], dtype=tf.bool)
cur_step = tf.constant(0, dtype=tf.int32)
all_done = tf.constant(False, dtype=tf.bool)
core_bs_states = (best_scores, cumulative_scores, in_scores, in_hyps,
in_prev_hyps, in_done_hyps, bs_atten_probs, beam_done)
def LoopContinue(cur_step, all_done, unused_step_ids, unused_core_bs_states,
unused_other_states_list):
return tf.math.logical_and(cur_step < max_steps,
tf.math.logical_not(all_done))
def LoopBody(cur_step, unused_all_done, step_ids, core_bs_states,
other_states_list):
(cur_step, all_done, new_step_ids, new_bs_states,
new_other_states) = self._BeamSearchStep(
theta, encoder_outputs, cur_step, step_ids, core_bs_states,
other_states.Pack(other_states_list), num_hyps_per_beam,
pre_beam_search_step_callback, post_beam_search_step_callback)
return (cur_step, all_done, new_step_ids, new_bs_states,
new_other_states.Flatten())
flat_other_states = other_states.Flatten()
_, _, _, final_bs_states, flat_final_other_states = tf.while_loop(
LoopContinue,
LoopBody,
loop_vars=(cur_step, all_done, step_ids, core_bs_states,
flat_other_states),
parallel_iterations=10,
back_prop=False,
swap_memory=False,
shape_invariants=(tf.TensorShape(cur_step.get_shape()),
tf.TensorShape(all_done.get_shape()),
tf.TensorShape(step_ids.get_shape()),
_GetShapes(core_bs_states),
_GetShapes(flat_other_states, none_shapes=True)))
# [target_seq_len, num_beams * num_hyps_per_beam].
final_done_hyps = final_bs_states[5]
final_other_states = other_states.Pack(flat_final_other_states)
# Assume that `paddings` has shape [source_max_lengths, source_batch_size]
# by default, and compute `encoded_seq_lengths` accordingly. This can be
# overridden by directly passing `seq_lengths` in the `encoder_outputs`
# NestedMap.
encoded_seq_lengths = getattr(encoder_outputs, 'seq_lengths', None)
if encoded_seq_lengths is None:
source_paddings = encoder_outputs.padding
if isinstance(source_paddings, py_utils.NestedMap):
encoded_seq_lengths = tf.cast(
tf.round(
tf.reduce_sum(1.0 - tf.transpose(source_paddings.Flatten()[0]),
1)), tf.int32)
else:
encoded_seq_lengths = tf.cast(
tf.round(
tf.reduce_sum(
1.0 - tf.cast(tf.transpose(source_paddings), tf.float32),
1)), tf.int32)
# [num_beams, num_hyps_per_beam].
topk_hyps = ops.top_k_terminated_hyps(
final_done_hyps,
encoded_seq_lengths,
k=num_hyps_per_beam,
num_hyps_per_beam=num_hyps_per_beam,
length_normalization=p.length_normalization,
coverage_penalty=p.coverage_penalty,
target_seq_length_ratio=p.target_seq_length_ratio)
# [num_beams * num_hyps_per_beam, ...].
max_seq_length = 0 if isinstance(max_steps, tf.Tensor) else max_steps
topk_ids, topk_lens, topk_scores = ops.unpack_hyp(
tf.reshape(topk_hyps, [-1]), max_seq_length=max_seq_length)
# [num_beams, num_hyps_per_beam].
topk_scores = tf.reshape(topk_scores, tf.shape(topk_hyps))
return BeamSearchDecodeOutput(topk_hyps, topk_ids, topk_lens, topk_scores,
None, final_other_states, {})
def _GetShapes(tensors, none_shapes=False):
"""Util for getting nested structure of shapes from structure of tensors.
Args:
tensors: Structure of Tensors to get shapes for.
none_shapes: Returns None shapes if true.
Returns:
The same structure as tensors but of corresponding `TensorShape` objects.
"""
shapes = []
for t in tf.nest.flatten(tensors):
shape = t.get_shape() if isinstance(t, tf.Tensor) else None
if none_shapes:
if shape:
shapes.append(tf.TensorShape([None] * len(shape)))
else:
shapes.append(tf.TensorShape(None))
else:
shapes.append(tf.TensorShape(shape))
return type(tensors)(tf.nest.pack_sequence_as(tensors, shapes))
def MergeBeamSearchOutputs(max_hyps_per_beam, beam_search_outputs):
"""Merges beam search hyps from multiple decoders.
Args:
max_hyps_per_beam: the number of top hyps in the merged results. Must be
less than or equal to total number of input hyps.
beam_search_outputs: a list of BeamSearchDecodeOutput objects. Must share
the same source_batch and max sequence length.
Returns:
A BeamSearchDecodeOutput object containing max_hyps_per_beam hypotheses per
beam.
"""
source_batch = tf.shape(beam_search_outputs[0].topk_hyps)[0]
value_dict = {}
for output in beam_search_outputs:
hyps_per_beam = py_utils.with_dependencies([
py_utils.assert_equal(source_batch,
tf.shape(output.topk_hyps)[0]),
],
tf.shape(output.topk_hyps)[1])
for k, v in output._asdict().items():
if v is None or isinstance(v, dict):
# we do not support nested structure yet
continue
if k == 'done_hyps':
v = tf.transpose(v)
if k not in value_dict:
value_dict[k] = []
value_dict[k].append(tf.reshape(v, [source_batch, hyps_per_beam, -1]))
# Concatenate the tensors along the 'num_hyps_per_beam' dimension.
concatenated = {}
for k, values in value_dict.items():
if len(values) != len(beam_search_outputs):
raise ValueError('Incomplete values for %s: %s' %
(k, beam_search_outputs))
concatenated[k] = tf.concat(values, axis=1)
scores = concatenated['topk_scores']
scores = tf.where(
tf.equal(concatenated['topk_lens'], 0), tf.fill(tf.shape(scores), -1e6),
scores)
scores = tf.squeeze(scores, -1)
# Select top max_hyps_per_beam indices per beam.
_, top_indices = tf.nn.top_k(scores, max_hyps_per_beam)
batch_ids = tf.tile(
tf.expand_dims(tf.range(source_batch), -1), [1, max_hyps_per_beam])
# [source_batch, max_hyps_per_beam, 2]
gather_indices = tf.stack([batch_ids, top_indices], axis=-1)
# Gather the merged top hyps according to 'gather_indices'.
top = beam_search_outputs[0]._asdict()
total_hyps = source_batch * max_hyps_per_beam
for k, v in concatenated.items():
v = tf.gather_nd(v, gather_indices)
if k == 'done_hyps':
v = tf.transpose(tf.reshape(v, [total_hyps, -1]))
elif k == 'topk_hyps':
v = tf.reshape(v, [source_batch, max_hyps_per_beam])
elif k == 'topk_ids':
v = tf.reshape(v, [total_hyps, -1])
elif k in ('topk_lens', 'topk_scores', 'topk_decoded'):
v = tf.reshape(v, [total_hyps])
else:
raise ValueError('Unexpected field: %s' % k)
top[k] = v
return BeamSearchDecodeOutput(**top)
class GreedySearchHelper(base_layer.BaseLayer):
"""Helper class for performing greedy decoding.
The user of this helper class needs to implement three callbacks just as in a
beam search decoder.
"""
@classmethod
def Params(cls):
p = super().Params()
p.Define('target_sos_id', 1, 'Id of the start of sentence token.')
p.Define('target_eos_id', 2, 'Id of the end of sentence token.')
p.Define(
'target_seq_len', 0, 'Maximum allowed target seq length. Note '
'that decoding terminates if an end of sentence token '
'is not emitted after target_seq_len decode steps.')
p.name = 'greedy_search'
return p
def _GreedySearchStep(self, theta, encoder_outputs, cur_step, step_ids,
hyp_ids, hyp_lens, done_hyps, other_states,
pre_beam_search_step_callback,
post_beam_search_step_callback):
"""Extend greedy search hyps for one step.
Args:
theta: A `.NestedMap` object containing weights' values of the decoder
layer and its children layers.
encoder_outputs: A `.NestedMap` containing encoder outputs to be passed to
the callbacks.
cur_step: A scalar int tensor, the current time step, 0-based.
step_ids: An int tensor of shape [num_hyps, 1]. The input ids to the
current search step.
hyp_ids: An int tensor of shape [num_hyps, tgt_seq_len].
hyp_lens: Valid length of all the hyps. Tokens after eos ids are not
counted.
done_hyps: Whether or not a hyp has finished.
other_states: A `.NestedMap` of other beam search states. This
`.NestedMap` is managed and updated by the client. It is expected that
each of its member tensors are of rank >= 1. t[i, ...] is the state of
the i-th hyp at the beginning of this search step.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
See class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
See class header comments for more details.
Returns:
A tuple of following elements for the next greedy search step,
(next step, new_step_ids, hyp_ids, hyp_lens, done_hyps, other_states)
"""
p = self.params
# Increment hyp_lens by 1 if the hyp is not finished yet.
hyp_lens = hyp_lens + (1 - tf.cast(done_hyps, tf.int32))
bs_results, new_other_states = pre_beam_search_step_callback(
theta, encoder_outputs, step_ids, other_states, 1, cur_step)
new_step_ids = tf.math.argmax(bs_results.log_probs, 1)
new_step_ids = tf.cast(new_step_ids, tf.int32)
new_step_ids = tf.reshape(new_step_ids, tf.shape(step_ids))
final_other_states = post_beam_search_step_callback(theta, encoder_outputs,
new_step_ids,
new_other_states)
# Stash new_step_ids into the right slot.
new_step_ids_1d = tf.reshape(new_step_ids, [-1])
hyp_ids = inplace_ops.alias_inplace_update(hyp_ids, cur_step,
new_step_ids_1d)
# Update done_hyps if the current step_ids is the end of sequence token.
done_hyps = tf.math.logical_or(done_hyps,
tf.equal(new_step_ids_1d, p.target_eos_id))
return (cur_step + 1, new_step_ids, hyp_ids, hyp_lens, done_hyps,
final_other_states)
def GreedySearchDecode(self,
theta,
encoder_outputs,
init_beam_search_state=None,
pre_beam_search_step_callback=None,
post_beam_search_step_callback=None,
max_steps=None):
"""Performs greedy-search based decoding.
Args:
theta: A NestedMap object containing weights' values of the decoder layer
and its children layers.
encoder_outputs: A NestedMap containing encoder outputs to be passed to
the callbacks.
init_beam_search_state: The `InitBeamSearchState` callback. Please refer
to the class header comments for more details.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
max_steps: maximum beam search steps. If None, use
self.params.target_seq_len.
Returns:
A tuple (hyp_ids, hyp_lens, done_hyps). Note that num_hyps is same as
src_batch_size.
- hyp_ids: [num_hyps, max_step]. Hyps end with <eos> token if the <eos>
token is encountered during search.
- hyp_lens: [num_hyps].
- done_hyps: [num_hyps], whether or not an eos is encountered.
"""
p = self.params
if max_steps is None:
max_steps = p.target_seq_len
initial_results, other_states = init_beam_search_state(
theta,
encoder_outputs,
1 # num_hyps_per_beam
)
num_hyps = tf.shape(initial_results.log_probs)[0]
if 'step_ids' in initial_results:
# [num_hyps, 1]
step_ids = tf.ensure_shape(initial_results.step_ids, [None, 1])
else:
step_ids = tf.fill([num_hyps, 1],
tf.constant(p.target_sos_id, dtype=tf.int32))
cur_step = tf.constant(0, dtype=tf.int32)
done_hyps = inplace_ops.empty(shape=[num_hyps], dtype=tf.bool, init=True,
name='done_hyps')
hyp_lens = inplace_ops.empty(shape=[num_hyps], dtype=tf.int32, init=True,
name='hyp_lens')
hyp_ids = inplace_ops.empty(
shape=[max_steps, num_hyps], dtype=tf.int32, init=True,
name='hyp_ids')
def LoopContinue(cur_step, unused_step_ids, unused_hyp_ids, unused_hyp_lens,
done_hyps, unused_other_states_list):
return tf.math.logical_and(cur_step < max_steps,
tf.math.logical_not(tf.reduce_all(done_hyps)))
def LoopBody(cur_step, step_ids, hyp_ids, hyp_lens, done_hyps,
other_states_list):
(cur_step, new_step_ids, hyp_ids, hyp_lens, done_hyps,
new_other_states) = self._GreedySearchStep(
theta, encoder_outputs, cur_step,
step_ids, hyp_ids, hyp_lens, done_hyps,
other_states.Pack(other_states_list), pre_beam_search_step_callback,
post_beam_search_step_callback)
return (cur_step, new_step_ids, hyp_ids, hyp_lens, done_hyps,
new_other_states.Flatten())
flat_other_states = other_states.Flatten()
_, _, final_hyp_ids, final_hyp_lens, final_done_hyps, _ = tf.while_loop(
LoopContinue,
LoopBody,
loop_vars=(cur_step, step_ids, hyp_ids, hyp_lens, done_hyps,
flat_other_states),
parallel_iterations=10,
back_prop=False,
swap_memory=False,
shape_invariants=(tf.TensorShape(cur_step.get_shape()),
tf.TensorShape(step_ids.get_shape()),
tf.TensorShape(hyp_ids.get_shape()),
tf.TensorShape(hyp_lens.get_shape()),
tf.TensorShape(done_hyps.get_shape()),
_GetShapes(flat_other_states, none_shapes=True)))
# transpose hyp_ids so it matches BeamSearchDecode's output
final_hyp_ids = tf.transpose(final_hyp_ids)
return final_hyp_ids, final_hyp_lens, final_done_hyps
|
{
"content_hash": "c1a7fe8aff291e7dfa4eb6123c74a9c2",
"timestamp": "",
"source": "github",
"line_count": 906,
"max_line_length": 80,
"avg_line_length": 46.567328918322296,
"alnum_prop": 0.6268784072054989,
"repo_name": "tensorflow/lingvo",
"id": "4f226aa3f9b7c2b838810c7092779d2a130c2953",
"size": "42879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lingvo/core/beam_search_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5163"
},
{
"name": "C++",
"bytes": "556344"
},
{
"name": "Dockerfile",
"bytes": "8484"
},
{
"name": "Jupyter Notebook",
"bytes": "36721"
},
{
"name": "Python",
"bytes": "9574124"
},
{
"name": "Shell",
"bytes": "50408"
},
{
"name": "Starlark",
"bytes": "182688"
},
{
"name": "TeX",
"bytes": "37275"
}
],
"symlink_target": ""
}
|
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.utils.feedgenerator import Atom1Feed
from django.utils.html import strip_tags
from mezzanine.blog.models import BlogPost, BlogCategory
from mezzanine.generic.models import Keyword
from mezzanine.pages.models import Page
from mezzanine.conf import settings
from mezzanine.utils.models import get_user_model
User = get_user_model()
class PostsRSS(Feed):
"""
RSS feed for all blog posts.
"""
def __init__(self, *args, **kwargs):
"""
Use the title and description of the Blog page for the feed's
title and description. If the blog page has somehow been
removed, fall back to the ``SITE_TITLE`` and ``SITE_TAGLINE``
settings.
"""
self.tag = kwargs.pop("tag", None)
self.category = kwargs.pop("category", None)
self.username = kwargs.pop("username", None)
super(PostsRSS, self).__init__(*args, **kwargs)
self._public = True
try:
page = Page.objects.published().get(slug=settings.BLOG_SLUG)
except Page.DoesNotExist:
page = None
else:
self._public = not page.login_required
if self._public:
settings.use_editable()
if page is not None:
self._title = "%s | %s" % (page.title, settings.SITE_TITLE)
self._description = strip_tags(page.description)
else:
self._title = settings.SITE_TITLE
self._description = settings.SITE_TAGLINE
def title(self):
return self._title
def description(self):
return self._description
def link(self):
return reverse("blog_post_feed", kwargs={"format": "rss"})
def items(self):
if not self._public:
return []
blog_posts = BlogPost.objects.published().select_related("user")
if self.tag:
tag = get_object_or_404(Keyword, slug=self.tag)
blog_posts = blog_posts.filter(keywords__in=tag.assignments.all())
if self.category:
category = get_object_or_404(BlogCategory, slug=self.category)
blog_posts = blog_posts.filter(categories=category)
if self.username:
author = get_object_or_404(User, username=self.username)
blog_posts = blog_posts.filter(user=author)
limit = settings.BLOG_RSS_LIMIT
if limit is not None:
blog_posts = blog_posts[:settings.BLOG_RSS_LIMIT]
return blog_posts
def item_description(self, item):
return item.content
def categories(self):
if not self._public:
return []
return BlogCategory.objects.all()
def item_author_name(self, item):
return item.user.get_full_name() or item.user.username
def item_author_link(self, item):
username = item.user.username
return reverse("blog_post_list_author", kwargs={"username": username})
def item_pubdate(self, item):
return item.publish_date
def item_categories(self, item):
return item.categories.all()
class PostsAtom(PostsRSS):
"""
Atom feed for all blog posts.
"""
feed_type = Atom1Feed
def subtitle(self):
return self.description()
def link(self):
return reverse("blog_post_feed", kwargs={"format": "atom"})
|
{
"content_hash": "0c365cc761e83cb26c6737e4d7c2752b",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 78,
"avg_line_length": 32.138888888888886,
"alnum_prop": 0.6211466436185538,
"repo_name": "wrwrwr/mezzanine",
"id": "efc561b433293e153b010ef59eba6a517a6c5a2b",
"size": "3472",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "mezzanine/blog/feeds.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "75436"
},
{
"name": "JavaScript",
"bytes": "212127"
},
{
"name": "Python",
"bytes": "978985"
}
],
"symlink_target": ""
}
|
import os
import os.path
import sys
import string
import getopt
import re
import socket
import time
import threading
import traceback
import types
import io
import linecache
from code import InteractiveInterpreter
from platform import python_version
try:
from Tkinter import *
except ImportError:
print>>sys.__stderr__, "** IDLE can't import Tkinter. " \
"Your Python may not be configured for Tk. **"
sys.exit(1)
import tkMessageBox
from idlelib.EditorWindow import EditorWindow, fixwordbreaks
from idlelib.FileList import FileList
from idlelib.ColorDelegator import ColorDelegator
from idlelib.UndoDelegator import UndoDelegator
from idlelib.OutputWindow import OutputWindow
from idlelib.configHandler import idleConf
from idlelib import idlever
from idlelib import rpc
from idlelib import Debugger
from idlelib import RemoteDebugger
from idlelib import macosxSupport
IDENTCHARS = string.ascii_letters + string.digits + "_"
HOST = '127.0.0.1' # python execution server on localhost loopback
PORT = 0 # someday pass in host, port for remote debug capability
try:
from signal import SIGTERM
except ImportError:
SIGTERM = 15
# Override warnings module to write to warning_stream. Initialize to send IDLE
# internal warnings to the console. ScriptBinding.check_syntax() will
# temporarily redirect the stream to the shell window to display warnings when
# checking user's code.
global warning_stream
warning_stream = sys.__stderr__
try:
import warnings
except ImportError:
pass
else:
def idle_showwarning(message, category, filename, lineno,
file=None, line=None):
if file is None:
file = warning_stream
try:
file.write(warnings.formatwarning(message, category, filename,
lineno, line=line))
except IOError:
pass ## file (probably __stderr__) is invalid, warning dropped.
warnings.showwarning = idle_showwarning
def idle_formatwarning(message, category, filename, lineno, line=None):
"""Format warnings the IDLE way"""
s = "\nWarning (from warnings module):\n"
s += ' File \"%s\", line %s\n' % (filename, lineno)
if line is None:
line = linecache.getline(filename, lineno)
line = line.strip()
if line:
s += " %s\n" % line
s += "%s: %s\n>>> " % (category.__name__, message)
return s
warnings.formatwarning = idle_formatwarning
def extended_linecache_checkcache(filename=None,
orig_checkcache=linecache.checkcache):
"""Extend linecache.checkcache to preserve the <pyshell#...> entries
Rather than repeating the linecache code, patch it to save the
<pyshell#...> entries, call the original linecache.checkcache()
(skipping them), and then restore the saved entries.
orig_checkcache is bound at definition time to the original
method, allowing it to be patched.
"""
cache = linecache.cache
save = {}
for key in list(cache):
if key[:1] + key[-1:] == '<>':
save[key] = cache.pop(key)
orig_checkcache(filename)
cache.update(save)
# Patch linecache.checkcache():
linecache.checkcache = extended_linecache_checkcache
class PyShellEditorWindow(EditorWindow):
"Regular text edit window in IDLE, supports breakpoints"
def __init__(self, *args):
self.breakpoints = []
EditorWindow.__init__(self, *args)
self.text.bind("<<set-breakpoint-here>>", self.set_breakpoint_here)
self.text.bind("<<clear-breakpoint-here>>", self.clear_breakpoint_here)
self.text.bind("<<open-python-shell>>", self.flist.open_shell)
self.breakpointPath = os.path.join(idleConf.GetUserCfgDir(),
'breakpoints.lst')
# whenever a file is changed, restore breakpoints
def filename_changed_hook(old_hook=self.io.filename_change_hook,
self=self):
self.restore_file_breaks()
old_hook()
self.io.set_filename_change_hook(filename_changed_hook)
if self.io.filename:
self.restore_file_breaks()
rmenu_specs = [
("Cut", "<<cut>>", "rmenu_check_cut"),
("Copy", "<<copy>>", "rmenu_check_copy"),
("Paste", "<<paste>>", "rmenu_check_paste"),
("Set Breakpoint", "<<set-breakpoint-here>>", None),
("Clear Breakpoint", "<<clear-breakpoint-here>>", None)
]
def set_breakpoint(self, lineno):
text = self.text
filename = self.io.filename
text.tag_add("BREAK", "%d.0" % lineno, "%d.0" % (lineno+1))
try:
i = self.breakpoints.index(lineno)
except ValueError: # only add if missing, i.e. do once
self.breakpoints.append(lineno)
try: # update the subprocess debugger
debug = self.flist.pyshell.interp.debugger
debug.set_breakpoint_here(filename, lineno)
except: # but debugger may not be active right now....
pass
def set_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
self.set_breakpoint(lineno)
def clear_breakpoint_here(self, event=None):
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
lineno = int(float(text.index("insert")))
try:
self.breakpoints.remove(lineno)
except:
pass
text.tag_remove("BREAK", "insert linestart",\
"insert lineend +1char")
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_breakpoint_here(filename, lineno)
except:
pass
def clear_file_breaks(self):
if self.breakpoints:
text = self.text
filename = self.io.filename
if not filename:
text.bell()
return
self.breakpoints = []
text.tag_remove("BREAK", "1.0", END)
try:
debug = self.flist.pyshell.interp.debugger
debug.clear_file_breaks(filename)
except:
pass
def store_file_breaks(self):
"Save breakpoints when file is saved"
# XXX 13 Dec 2002 KBK Currently the file must be saved before it can
# be run. The breaks are saved at that time. If we introduce
# a temporary file save feature the save breaks functionality
# needs to be re-verified, since the breaks at the time the
# temp file is created may differ from the breaks at the last
# permanent save of the file. Currently, a break introduced
# after a save will be effective, but not persistent.
# This is necessary to keep the saved breaks synched with the
# saved file.
#
# Breakpoints are set as tagged ranges in the text. Certain
# kinds of edits cause these ranges to be deleted: Inserting
# or deleting a line just before a breakpoint, and certain
# deletions prior to a breakpoint. These issues need to be
# investigated and understood. It's not clear if they are
# Tk issues or IDLE issues, or whether they can actually
# be fixed. Since a modified file has to be saved before it is
# run, and since self.breakpoints (from which the subprocess
# debugger is loaded) is updated during the save, the visible
# breaks stay synched with the subprocess even if one of these
# unexpected breakpoint deletions occurs.
breaks = self.breakpoints
filename = self.io.filename
try:
with open(self.breakpointPath,"r") as old_file:
lines = old_file.readlines()
except IOError:
lines = []
try:
with open(self.breakpointPath,"w") as new_file:
for line in lines:
if not line.startswith(filename + '='):
new_file.write(line)
self.update_breakpoints()
breaks = self.breakpoints
if breaks:
new_file.write(filename + '=' + str(breaks) + '\n')
except IOError as err:
if not getattr(self.root, "breakpoint_error_displayed", False):
self.root.breakpoint_error_displayed = True
tkMessageBox.showerror(title='IDLE Error',
message='Unable to update breakpoint list:\n%s'
% str(err),
parent=self.text)
def restore_file_breaks(self):
self.text.update() # this enables setting "BREAK" tags to be visible
if self.io is None:
# can happen if IDLE closes due to the .update() call
return
filename = self.io.filename
if filename is None:
return
if os.path.isfile(self.breakpointPath):
lines = open(self.breakpointPath,"r").readlines()
for line in lines:
if line.startswith(filename + '='):
breakpoint_linenumbers = eval(line[len(filename)+1:])
for breakpoint_linenumber in breakpoint_linenumbers:
self.set_breakpoint(breakpoint_linenumber)
def update_breakpoints(self):
"Retrieves all the breakpoints in the current window"
text = self.text
ranges = text.tag_ranges("BREAK")
linenumber_list = self.ranges_to_linenumbers(ranges)
self.breakpoints = linenumber_list
def ranges_to_linenumbers(self, ranges):
lines = []
for index in range(0, len(ranges), 2):
lineno = int(float(ranges[index].string))
end = int(float(ranges[index+1].string))
while lineno < end:
lines.append(lineno)
lineno += 1
return lines
# XXX 13 Dec 2002 KBK Not used currently
# def saved_change_hook(self):
# "Extend base method - clear breaks if module is modified"
# if not self.get_saved():
# self.clear_file_breaks()
# EditorWindow.saved_change_hook(self)
def _close(self):
"Extend base method - clear breaks when module is closed"
self.clear_file_breaks()
EditorWindow._close(self)
class PyShellFileList(FileList):
"Extend base class: IDLE supports a shell and breakpoints"
# override FileList's class variable, instances return PyShellEditorWindow
# instead of EditorWindow when new edit windows are created.
EditorWindow = PyShellEditorWindow
pyshell = None
def open_shell(self, event=None):
if self.pyshell:
self.pyshell.top.wakeup()
else:
self.pyshell = PyShell(self)
if self.pyshell:
if not self.pyshell.begin():
return None
return self.pyshell
class ModifiedColorDelegator(ColorDelegator):
"Extend base class: colorizer for the shell window itself"
def __init__(self):
ColorDelegator.__init__(self)
self.LoadTagDefs()
def recolorize_main(self):
self.tag_remove("TODO", "1.0", "iomark")
self.tag_add("SYNC", "1.0", "iomark")
ColorDelegator.recolorize_main(self)
def LoadTagDefs(self):
ColorDelegator.LoadTagDefs(self)
theme = idleConf.GetOption('main','Theme','name')
self.tagdefs.update({
"stdin": {'background':None,'foreground':None},
"stdout": idleConf.GetHighlight(theme, "stdout"),
"stderr": idleConf.GetHighlight(theme, "stderr"),
"console": idleConf.GetHighlight(theme, "console"),
})
def removecolors(self):
# Don't remove shell color tags before "iomark"
for tag in self.tagdefs:
self.tag_remove(tag, "iomark", "end")
class ModifiedUndoDelegator(UndoDelegator):
"Extend base class: forbid insert/delete before the I/O mark"
def insert(self, index, chars, tags=None):
try:
if self.delegate.compare(index, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.insert(self, index, chars, tags)
def delete(self, index1, index2=None):
try:
if self.delegate.compare(index1, "<", "iomark"):
self.delegate.bell()
return
except TclError:
pass
UndoDelegator.delete(self, index1, index2)
class MyRPCClient(rpc.RPCClient):
def handle_EOF(self):
"Override the base class - just re-raise EOFError"
raise EOFError
class ModifiedInterpreter(InteractiveInterpreter):
def __init__(self, tkconsole):
self.tkconsole = tkconsole
locals = sys.modules['__main__'].__dict__
InteractiveInterpreter.__init__(self, locals=locals)
self.save_warnings_filters = None
self.restarting = False
self.subprocess_arglist = None
self.port = PORT
self.original_compiler_flags = self.compile.compiler.flags
rpcclt = None
rpcpid = None
def spawn_subprocess(self):
if self.subprocess_arglist is None:
self.subprocess_arglist = self.build_subprocess_arglist()
args = self.subprocess_arglist
self.rpcpid = os.spawnv(os.P_NOWAIT, sys.executable, args)
def build_subprocess_arglist(self):
assert (self.port!=0), (
"Socket should have been assigned a port number.")
w = ['-W' + s for s in sys.warnoptions]
if 1/2 > 0: # account for new division
w.append('-Qnew')
# Maybe IDLE is installed and is being accessed via sys.path,
# or maybe it's not installed and the idle.py script is being
# run from the IDLE source directory.
del_exitf = idleConf.GetOption('main', 'General', 'delete-exitfunc',
default=False, type='bool')
if __name__ == 'idlelib.PyShell':
command = "__import__('idlelib.run').run.main(%r)" % (del_exitf,)
else:
command = "__import__('run').main(%r)" % (del_exitf,)
if sys.platform[:3] == 'win' and ' ' in sys.executable:
# handle embedded space in path by quoting the argument
decorated_exec = '"%s"' % sys.executable
else:
decorated_exec = sys.executable
return [decorated_exec] + w + ["-c", command, str(self.port)]
def start_subprocess(self):
addr = (HOST, self.port)
# GUI makes several attempts to acquire socket, listens for connection
for i in range(3):
time.sleep(i)
try:
self.rpcclt = MyRPCClient(addr)
break
except socket.error, err:
pass
else:
self.display_port_binding_error()
return None
# if PORT was 0, system will assign an 'ephemeral' port. Find it out:
self.port = self.rpcclt.listening_sock.getsockname()[1]
# if PORT was not 0, probably working with a remote execution server
if PORT != 0:
# To allow reconnection within the 2MSL wait (cf. Stevens TCP
# V1, 18.6), set SO_REUSEADDR. Note that this can be problematic
# on Windows since the implementation allows two active sockets on
# the same address!
self.rpcclt.listening_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.spawn_subprocess()
#time.sleep(20) # test to simulate GUI not accepting connection
# Accept the connection from the Python execution server
self.rpcclt.listening_sock.settimeout(10)
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.rpcclt.register("console", self.tkconsole)
self.rpcclt.register("stdin", self.tkconsole.stdin)
self.rpcclt.register("stdout", self.tkconsole.stdout)
self.rpcclt.register("stderr", self.tkconsole.stderr)
self.rpcclt.register("flist", self.tkconsole.flist)
self.rpcclt.register("linecache", linecache)
self.rpcclt.register("interp", self)
self.transfer_path(with_cwd=True)
self.poll_subprocess()
return self.rpcclt
def restart_subprocess(self, with_cwd=False):
if self.restarting:
return self.rpcclt
self.restarting = True
# close only the subprocess debugger
debug = self.getdebugger()
if debug:
try:
# Only close subprocess debugger, don't unregister gui_adap!
RemoteDebugger.close_subprocess_debugger(self.rpcclt)
except:
pass
# Kill subprocess, spawn a new one, accept connection.
self.rpcclt.close()
self.unix_terminate()
console = self.tkconsole
was_executing = console.executing
console.executing = False
self.spawn_subprocess()
try:
self.rpcclt.accept()
except socket.timeout, err:
self.display_no_subprocess_error()
return None
self.transfer_path(with_cwd=with_cwd)
console.stop_readline()
# annotate restart in shell window and mark it
console.text.delete("iomark", "end-1c")
if was_executing:
console.write('\n')
console.showprompt()
halfbar = ((int(console.width) - 16) // 2) * '='
console.write(halfbar + ' RESTART ' + halfbar)
console.text.mark_set("restart", "end-1c")
console.text.mark_gravity("restart", "left")
console.showprompt()
# restart subprocess debugger
if debug:
# Restarted debugger connects to current instance of debug GUI
gui = RemoteDebugger.restart_subprocess_debugger(self.rpcclt)
# reload remote debugger breakpoints for all PyShellEditWindows
debug.load_breakpoints()
self.compile.compiler.flags = self.original_compiler_flags
self.restarting = False
return self.rpcclt
def __request_interrupt(self):
self.rpcclt.remotecall("exec", "interrupt_the_server", (), {})
def interrupt_subprocess(self):
threading.Thread(target=self.__request_interrupt).start()
def kill_subprocess(self):
try:
self.rpcclt.close()
except AttributeError: # no socket
pass
self.unix_terminate()
self.tkconsole.executing = False
self.rpcclt = None
def unix_terminate(self):
"UNIX: make sure subprocess is terminated and collect status"
if hasattr(os, 'kill'):
try:
os.kill(self.rpcpid, SIGTERM)
except OSError:
# process already terminated:
return
else:
try:
os.waitpid(self.rpcpid, 0)
except OSError:
return
def transfer_path(self, with_cwd=False):
if with_cwd: # Issue 13506
path = [''] # include Current Working Directory
path.extend(sys.path)
else:
path = sys.path
self.runcommand("""if 1:
import sys as _sys
_sys.path = %r
del _sys
\n""" % (path,))
active_seq = None
def poll_subprocess(self):
clt = self.rpcclt
if clt is None:
return
try:
response = clt.pollresponse(self.active_seq, wait=0.05)
except (EOFError, IOError, KeyboardInterrupt):
# lost connection or subprocess terminated itself, restart
# [the KBI is from rpc.SocketIO.handle_EOF()]
if self.tkconsole.closing:
return
response = None
self.restart_subprocess()
if response:
self.tkconsole.resetoutput()
self.active_seq = None
how, what = response
console = self.tkconsole.console
if how == "OK":
if what is not None:
print >>console, repr(what)
elif how == "EXCEPTION":
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.remote_stack_viewer()
elif how == "ERROR":
errmsg = "PyShell.ModifiedInterpreter: Subprocess ERROR:\n"
print >>sys.__stderr__, errmsg, what
print >>console, errmsg, what
# we received a response to the currently active seq number:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
# Reschedule myself
if not self.tkconsole.closing:
self.tkconsole.text.after(self.tkconsole.pollinterval,
self.poll_subprocess)
debugger = None
def setdebugger(self, debugger):
self.debugger = debugger
def getdebugger(self):
return self.debugger
def open_remote_stack_viewer(self):
"""Initiate the remote stack viewer from a separate thread.
This method is called from the subprocess, and by returning from this
method we allow the subprocess to unblock. After a bit the shell
requests the subprocess to open the remote stack viewer which returns a
static object looking at the last exception. It is queried through
the RPC mechanism.
"""
self.tkconsole.text.after(300, self.remote_stack_viewer)
return
def remote_stack_viewer(self):
from idlelib import RemoteObjectBrowser
oid = self.rpcclt.remotequeue("exec", "stackviewer", ("flist",), {})
if oid is None:
self.tkconsole.root.bell()
return
item = RemoteObjectBrowser.StubObjectTreeItem(self.rpcclt, oid)
from idlelib.TreeWidget import ScrolledCanvas, TreeNode
top = Toplevel(self.tkconsole.root)
theme = idleConf.GetOption('main','Theme','name')
background = idleConf.GetHighlight(theme, 'normal')['background']
sc = ScrolledCanvas(top, bg=background, highlightthickness=0)
sc.frame.pack(expand=1, fill="both")
node = TreeNode(sc.canvas, None, item)
node.expand()
# XXX Should GC the remote tree when closing the window
gid = 0
def execsource(self, source):
"Like runsource() but assumes complete exec source"
filename = self.stuffsource(source)
self.execfile(filename, source)
def execfile(self, filename, source=None):
"Execute an existing file"
if source is None:
source = open(filename, "r").read()
try:
code = compile(source, filename, "exec")
except (OverflowError, SyntaxError):
self.tkconsole.resetoutput()
tkerr = self.tkconsole.stderr
print>>tkerr, '*** Error in script or command!\n'
print>>tkerr, 'Traceback (most recent call last):'
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
else:
self.runcode(code)
def runsource(self, source):
"Extend base class method: Stuff the source in the line cache first"
filename = self.stuffsource(source)
self.more = 0
self.save_warnings_filters = warnings.filters[:]
warnings.filterwarnings(action="error", category=SyntaxWarning)
if isinstance(source, types.UnicodeType):
from idlelib import IOBinding
try:
source = source.encode(IOBinding.encoding)
except UnicodeError:
self.tkconsole.resetoutput()
self.write("Unsupported characters in input\n")
return
try:
# InteractiveInterpreter.runsource() calls its runcode() method,
# which is overridden (see below)
return InteractiveInterpreter.runsource(self, source, filename)
finally:
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
def stuffsource(self, source):
"Stuff source in the filename cache"
filename = "<pyshell#%d>" % self.gid
self.gid = self.gid + 1
lines = source.split("\n")
linecache.cache[filename] = len(source)+1, 0, lines, filename
return filename
def prepend_syspath(self, filename):
"Prepend sys.path with file's directory if not already included"
self.runcommand("""if 1:
_filename = %r
import sys as _sys
from os.path import dirname as _dirname
_dir = _dirname(_filename)
if not _dir in _sys.path:
_sys.path.insert(0, _dir)
del _filename, _sys, _dirname, _dir
\n""" % (filename,))
def showsyntaxerror(self, filename=None):
"""Extend base class method: Add Colorizing
Color the offending position instead of printing it and pointing at it
with a caret.
"""
text = self.tkconsole.text
stuff = self.unpackerror()
if stuff:
msg, lineno, offset, line = stuff
if lineno == 1:
pos = "iomark + %d chars" % (offset-1)
else:
pos = "iomark linestart + %d lines + %d chars" % \
(lineno-1, offset-1)
text.tag_add("ERROR", pos)
text.see(pos)
char = text.get(pos)
if char and char in IDENTCHARS:
text.tag_add("ERROR", pos + " wordstart", pos)
self.tkconsole.resetoutput()
self.write("SyntaxError: %s\n" % str(msg))
else:
self.tkconsole.resetoutput()
InteractiveInterpreter.showsyntaxerror(self, filename)
self.tkconsole.showprompt()
def unpackerror(self):
type, value, tb = sys.exc_info()
ok = type is SyntaxError
if ok:
try:
msg, (dummy_filename, lineno, offset, line) = value
if not offset:
offset = 0
except:
ok = 0
if ok:
return msg, lineno, offset, line
else:
return None
def showtraceback(self):
"Extend base class method to reset output properly"
self.tkconsole.resetoutput()
self.checklinecache()
InteractiveInterpreter.showtraceback(self)
if self.tkconsole.getvar("<<toggle-jit-stack-viewer>>"):
self.tkconsole.open_stack_viewer()
def checklinecache(self):
c = linecache.cache
for key in c.keys():
if key[:1] + key[-1:] != "<>":
del c[key]
def runcommand(self, code):
"Run the code without invoking the debugger"
# The code better not raise an exception!
if self.tkconsole.executing:
self.display_executing_dialog()
return 0
if self.rpcclt:
self.rpcclt.remotequeue("exec", "runcode", (code,), {})
else:
exec code in self.locals
return 1
def runcode(self, code):
"Override base class method"
if self.tkconsole.executing:
self.interp.restart_subprocess()
self.checklinecache()
if self.save_warnings_filters is not None:
warnings.filters[:] = self.save_warnings_filters
self.save_warnings_filters = None
debugger = self.debugger
try:
self.tkconsole.beginexecuting()
if not debugger and self.rpcclt is not None:
self.active_seq = self.rpcclt.asyncqueue("exec", "runcode",
(code,), {})
elif debugger:
debugger.run(code, self.locals)
else:
exec code in self.locals
except SystemExit:
if not self.tkconsole.closing:
if tkMessageBox.askyesno(
"Exit?",
"Do you want to exit altogether?",
default="yes",
master=self.tkconsole.text):
raise
else:
self.showtraceback()
else:
raise
except:
if use_subprocess:
print >>self.tkconsole.stderr, \
"IDLE internal error in runcode()"
self.showtraceback()
self.tkconsole.endexecuting()
else:
if self.tkconsole.canceled:
self.tkconsole.canceled = False
print >>self.tkconsole.stderr, "KeyboardInterrupt"
else:
self.showtraceback()
finally:
if not use_subprocess:
try:
self.tkconsole.endexecuting()
except AttributeError: # shell may have closed
pass
def write(self, s):
"Override base class method"
self.tkconsole.stderr.write(s)
def display_port_binding_error(self):
tkMessageBox.showerror(
"Port Binding Error",
"IDLE can't bind to a TCP/IP port, which is necessary to "
"communicate with its Python execution server. This might be "
"because no networking is installed on this computer. "
"Run IDLE with the -n command line switch to start without a "
"subprocess and refer to Help/IDLE Help 'Running without a "
"subprocess' for further details.",
master=self.tkconsole.text)
def display_no_subprocess_error(self):
tkMessageBox.showerror(
"Subprocess Startup Error",
"IDLE's subprocess didn't make connection. Either IDLE can't "
"start a subprocess or personal firewall software is blocking "
"the connection.",
master=self.tkconsole.text)
def display_executing_dialog(self):
tkMessageBox.showerror(
"Already executing",
"The Python Shell window is already executing a command; "
"please wait until it is finished.",
master=self.tkconsole.text)
class PyShell(OutputWindow):
shell_title = "Python " + python_version() + " Shell"
# Override classes
ColorDelegator = ModifiedColorDelegator
UndoDelegator = ModifiedUndoDelegator
# Override menus
menu_specs = [
("file", "_File"),
("edit", "_Edit"),
("debug", "_Debug"),
("options", "_Options"),
("windows", "_Windows"),
("help", "_Help"),
]
if macosxSupport.runningAsOSXApp():
del menu_specs[-3]
menu_specs[-2] = ("windows", "_Window")
# New classes
from idlelib.IdleHistory import History
def __init__(self, flist=None):
if use_subprocess:
ms = self.menu_specs
if ms[2][0] != "shell":
ms.insert(2, ("shell", "She_ll"))
self.interp = ModifiedInterpreter(self)
if flist is None:
root = Tk()
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
#
OutputWindow.__init__(self, flist, None, None)
#
## self.config(usetabs=1, indentwidth=8, context_use_ps1=1)
self.usetabs = True
# indentwidth must be 8 when using tabs. See note in EditorWindow:
self.indentwidth = 8
self.context_use_ps1 = True
#
text = self.text
text.configure(wrap="char")
text.bind("<<newline-and-indent>>", self.enter_callback)
text.bind("<<plain-newline-and-indent>>", self.linefeed_callback)
text.bind("<<interrupt-execution>>", self.cancel_callback)
text.bind("<<end-of-file>>", self.eof_callback)
text.bind("<<open-stack-viewer>>", self.open_stack_viewer)
text.bind("<<toggle-debugger>>", self.toggle_debugger)
text.bind("<<toggle-jit-stack-viewer>>", self.toggle_jit_stack_viewer)
if use_subprocess:
text.bind("<<view-restart>>", self.view_restart_mark)
text.bind("<<restart-shell>>", self.restart_shell)
#
self.save_stdout = sys.stdout
self.save_stderr = sys.stderr
self.save_stdin = sys.stdin
from idlelib import IOBinding
self.stdin = PseudoInputFile(self, "stdin", IOBinding.encoding)
self.stdout = PseudoOutputFile(self, "stdout", IOBinding.encoding)
self.stderr = PseudoOutputFile(self, "stderr", IOBinding.encoding)
self.console = PseudoOutputFile(self, "console", IOBinding.encoding)
if not use_subprocess:
sys.stdout = self.stdout
sys.stderr = self.stderr
sys.stdin = self.stdin
#
self.history = self.History(self.text)
#
self.pollinterval = 50 # millisec
def get_standard_extension_names(self):
return idleConf.GetExtensions(shell_only=True)
reading = False
executing = False
canceled = False
endoffile = False
closing = False
_stop_readline_flag = False
def set_warning_stream(self, stream):
global warning_stream
warning_stream = stream
def get_warning_stream(self):
return warning_stream
def toggle_debugger(self, event=None):
if self.executing:
tkMessageBox.showerror("Don't debug now",
"You can only toggle the debugger when idle",
master=self.text)
self.set_debugger_indicator()
return "break"
else:
db = self.interp.getdebugger()
if db:
self.close_debugger()
else:
self.open_debugger()
def set_debugger_indicator(self):
db = self.interp.getdebugger()
self.setvar("<<toggle-debugger>>", not not db)
def toggle_jit_stack_viewer(self, event=None):
pass # All we need is the variable
def close_debugger(self):
db = self.interp.getdebugger()
if db:
self.interp.setdebugger(None)
db.close()
if self.interp.rpcclt:
RemoteDebugger.close_remote_debugger(self.interp.rpcclt)
self.resetoutput()
self.console.write("[DEBUG OFF]\n")
sys.ps1 = ">>> "
self.showprompt()
self.set_debugger_indicator()
def open_debugger(self):
if self.interp.rpcclt:
dbg_gui = RemoteDebugger.start_remote_debugger(self.interp.rpcclt,
self)
else:
dbg_gui = Debugger.Debugger(self)
self.interp.setdebugger(dbg_gui)
dbg_gui.load_breakpoints()
sys.ps1 = "[DEBUG ON]\n>>> "
self.showprompt()
self.set_debugger_indicator()
def beginexecuting(self):
"Helper for ModifiedInterpreter"
self.resetoutput()
self.executing = 1
def endexecuting(self):
"Helper for ModifiedInterpreter"
self.executing = 0
self.canceled = 0
self.showprompt()
def close(self):
"Extend EditorWindow.close()"
if self.executing:
response = tkMessageBox.askokcancel(
"Kill?",
"The program is still running!\n Do you want to kill it?",
default="ok",
parent=self.text)
if response is False:
return "cancel"
self.stop_readline()
self.canceled = True
self.closing = True
# Wait for poll_subprocess() rescheduling to stop
self.text.after(2 * self.pollinterval, self.close2)
def close2(self):
return EditorWindow.close(self)
def _close(self):
"Extend EditorWindow._close(), shut down debugger and execution server"
self.close_debugger()
if use_subprocess:
self.interp.kill_subprocess()
# Restore std streams
sys.stdout = self.save_stdout
sys.stderr = self.save_stderr
sys.stdin = self.save_stdin
# Break cycles
self.interp = None
self.console = None
self.flist.pyshell = None
self.history = None
EditorWindow._close(self)
def ispythonsource(self, filename):
"Override EditorWindow method: never remove the colorizer"
return True
def short_title(self):
return self.shell_title
COPYRIGHT = \
'Type "copyright", "credits" or "license()" for more information.'
def begin(self):
self.resetoutput()
if use_subprocess:
nosub = ''
client = self.interp.start_subprocess()
if not client:
self.close()
return False
else:
nosub = "==== No Subprocess ===="
self.write("Python %s on %s\n%s\n%s" %
(sys.version, sys.platform, self.COPYRIGHT, nosub))
self.showprompt()
import Tkinter
Tkinter._default_root = None # 03Jan04 KBK What's this?
return True
def stop_readline(self):
if not self.reading: # no nested mainloop to exit.
return
self._stop_readline_flag = True
self.top.quit()
def readline(self):
save = self.reading
try:
self.reading = 1
self.top.mainloop() # nested mainloop()
finally:
self.reading = save
if self._stop_readline_flag:
self._stop_readline_flag = False
return ""
line = self.text.get("iomark", "end-1c")
if len(line) == 0: # may be EOF if we quit our mainloop with Ctrl-C
line = "\n"
if isinstance(line, unicode):
from idlelib import IOBinding
try:
line = line.encode(IOBinding.encoding)
except UnicodeError:
pass
self.resetoutput()
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
if self.endoffile:
self.endoffile = 0
line = ""
return line
def isatty(self):
return True
def cancel_callback(self, event=None):
try:
if self.text.compare("sel.first", "!=", "sel.last"):
return # Active selection -- always use default binding
except:
pass
if not (self.executing or self.reading):
self.resetoutput()
self.interp.write("KeyboardInterrupt\n")
self.showprompt()
return "break"
self.endoffile = 0
self.canceled = 1
if (self.executing and self.interp.rpcclt):
if self.interp.getdebugger():
self.interp.restart_subprocess()
else:
self.interp.interrupt_subprocess()
if self.reading:
self.top.quit() # exit the nested mainloop() in readline()
return "break"
def eof_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (delete next char) take over
if not (self.text.compare("iomark", "==", "insert") and
self.text.compare("insert", "==", "end-1c")):
return # Let the default binding (delete next char) take over
if not self.executing:
self.resetoutput()
self.close()
else:
self.canceled = 0
self.endoffile = 1
self.top.quit()
return "break"
def linefeed_callback(self, event):
# Insert a linefeed without entering anything (still autoindented)
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
return "break"
def enter_callback(self, event):
if self.executing and not self.reading:
return # Let the default binding (insert '\n') take over
# If some text is selected, recall the selection
# (but only if this before the I/O mark)
try:
sel = self.text.get("sel.first", "sel.last")
if sel:
if self.text.compare("sel.last", "<=", "iomark"):
self.recall(sel, event)
return "break"
except:
pass
# If we're strictly before the line containing iomark, recall
# the current line, less a leading prompt, less leading or
# trailing whitespace
if self.text.compare("insert", "<", "iomark linestart"):
# Check if there's a relevant stdin range -- if so, use it
prev = self.text.tag_prevrange("stdin", "insert")
if prev and self.text.compare("insert", "<", prev[1]):
self.recall(self.text.get(prev[0], prev[1]), event)
return "break"
next = self.text.tag_nextrange("stdin", "insert")
if next and self.text.compare("insert lineend", ">=", next[0]):
self.recall(self.text.get(next[0], next[1]), event)
return "break"
# No stdin mark -- just get the current line, less any prompt
indices = self.text.tag_nextrange("console", "insert linestart")
if indices and \
self.text.compare(indices[0], "<=", "insert linestart"):
self.recall(self.text.get(indices[1], "insert lineend"), event)
else:
self.recall(self.text.get("insert linestart", "insert lineend"), event)
return "break"
# If we're between the beginning of the line and the iomark, i.e.
# in the prompt area, move to the end of the prompt
if self.text.compare("insert", "<", "iomark"):
self.text.mark_set("insert", "iomark")
# If we're in the current input and there's only whitespace
# beyond the cursor, erase that whitespace first
s = self.text.get("insert", "end-1c")
if s and not s.strip():
self.text.delete("insert", "end-1c")
# If we're in the current input before its last line,
# insert a newline right at the insert point
if self.text.compare("insert", "<", "end-1c linestart"):
self.newline_and_indent_event(event)
return "break"
# We're in the last line; append a newline and submit it
self.text.mark_set("insert", "end-1c")
if self.reading:
self.text.insert("insert", "\n")
self.text.see("insert")
else:
self.newline_and_indent_event(event)
self.text.tag_add("stdin", "iomark", "end-1c")
self.text.update_idletasks()
if self.reading:
self.top.quit() # Break out of recursive mainloop() in raw_input()
else:
self.runit()
return "break"
def recall(self, s, event):
# remove leading and trailing empty or whitespace lines
s = re.sub(r'^\s*\n', '' , s)
s = re.sub(r'\n\s*$', '', s)
lines = s.split('\n')
self.text.undo_block_start()
try:
self.text.tag_remove("sel", "1.0", "end")
self.text.mark_set("insert", "end-1c")
prefix = self.text.get("insert linestart", "insert")
if prefix.rstrip().endswith(':'):
self.newline_and_indent_event(event)
prefix = self.text.get("insert linestart", "insert")
self.text.insert("insert", lines[0].strip())
if len(lines) > 1:
orig_base_indent = re.search(r'^([ \t]*)', lines[0]).group(0)
new_base_indent = re.search(r'^([ \t]*)', prefix).group(0)
for line in lines[1:]:
if line.startswith(orig_base_indent):
# replace orig base indentation with new indentation
line = new_base_indent + line[len(orig_base_indent):]
self.text.insert('insert', '\n'+line.rstrip())
finally:
self.text.see("insert")
self.text.undo_block_stop()
def runit(self):
line = self.text.get("iomark", "end-1c")
# Strip off last newline and surrounding whitespace.
# (To allow you to hit return twice to end a statement.)
i = len(line)
while i > 0 and line[i-1] in " \t":
i = i-1
if i > 0 and line[i-1] == "\n":
i = i-1
while i > 0 and line[i-1] in " \t":
i = i-1
line = line[:i]
more = self.interp.runsource(line)
def open_stack_viewer(self, event=None):
if self.interp.rpcclt:
return self.interp.remote_stack_viewer()
try:
sys.last_traceback
except:
tkMessageBox.showerror("No stack trace",
"There is no stack trace yet.\n"
"(sys.last_traceback is not defined)",
master=self.text)
return
from idlelib.StackViewer import StackBrowser
sv = StackBrowser(self.root, self.flist)
def view_restart_mark(self, event=None):
self.text.see("iomark")
self.text.see("restart")
def restart_shell(self, event=None):
"Callback for Run/Restart Shell Cntl-F6"
self.interp.restart_subprocess(with_cwd=True)
def showprompt(self):
self.resetoutput()
try:
s = str(sys.ps1)
except:
s = ""
self.console.write(s)
self.text.mark_set("insert", "end-1c")
self.set_line_and_column()
self.io.reset_undo()
def resetoutput(self):
source = self.text.get("iomark", "end-1c")
if self.history:
self.history.history_store(source)
if self.text.get("end-2c") != "\n":
self.text.insert("end-1c", "\n")
self.text.mark_set("iomark", "end-1c")
self.set_line_and_column()
sys.stdout.softspace = 0
def write(self, s, tags=()):
try:
self.text.mark_gravity("iomark", "right")
OutputWindow.write(self, s, tags, "iomark")
self.text.mark_gravity("iomark", "left")
except:
pass
if self.canceled:
self.canceled = 0
if not use_subprocess:
raise KeyboardInterrupt
def rmenu_check_cut(self):
try:
if self.text.compare('sel.first', '<', 'iomark'):
return 'disabled'
except TclError: # no selection, so the index 'sel.first' doesn't exist
return 'disabled'
return super(PyShell, self).rmenu_check_cut()
def rmenu_check_paste(self):
if self.text.compare('insert', '<', 'iomark'):
return 'disabled'
return super(PyShell, self).rmenu_check_paste()
class PseudoFile(io.TextIOBase):
def __init__(self, shell, tags, encoding=None):
self.shell = shell
self.tags = tags
self.softspace = 0
self._encoding = encoding
@property
def encoding(self):
return self._encoding
@property
def name(self):
return '<%s>' % self.tags
def isatty(self):
return True
class PseudoOutputFile(PseudoFile):
def writable(self):
return True
def write(self, s):
if self.closed:
raise ValueError("write to closed file")
if not isinstance(s, (basestring, bytearray)):
raise TypeError('must be string, not ' + type(s).__name__)
return self.shell.write(s, self.tags)
class PseudoInputFile(PseudoFile):
def __init__(self, shell, tags, encoding=None):
PseudoFile.__init__(self, shell, tags, encoding)
self._line_buffer = ''
def readable(self):
return True
def read(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
result = self._line_buffer
self._line_buffer = ''
if size < 0:
while True:
line = self.shell.readline()
if not line: break
result += line
else:
while len(result) < size:
line = self.shell.readline()
if not line: break
result += line
self._line_buffer = result[size:]
result = result[:size]
return result
def readline(self, size=-1):
if self.closed:
raise ValueError("read from closed file")
if size is None:
size = -1
elif not isinstance(size, int):
raise TypeError('must be int, not ' + type(size).__name__)
line = self._line_buffer or self.shell.readline()
if size < 0:
size = len(line)
self._line_buffer = line[size:]
return line[:size]
def close(self):
self.shell.close()
usage_msg = """\
USAGE: idle [-deins] [-t title] [file]*
idle [-dns] [-t title] (-c cmd | -r file) [arg]*
idle [-dns] [-t title] - [arg]*
-h print this help message and exit
-n run IDLE without a subprocess (see Help/IDLE Help for details)
The following options will override the IDLE 'settings' configuration:
-e open an edit window
-i open a shell window
The following options imply -i and will open a shell:
-c cmd run the command in a shell, or
-r file run script from file
-d enable the debugger
-s run $IDLESTARTUP or $PYTHONSTARTUP before anything else
-t title set title of shell window
A default edit window will be bypassed when -c, -r, or - are used.
[arg]* are passed to the command (-c) or script (-r) in sys.argv[1:].
Examples:
idle
Open an edit window or shell depending on IDLE's configuration.
idle foo.py foobar.py
Edit the files, also open a shell if configured to start with shell.
idle -est "Baz" foo.py
Run $IDLESTARTUP or $PYTHONSTARTUP, edit foo.py, and open a shell
window with the title "Baz".
idle -c "import sys; print sys.argv" "foo"
Open a shell window and run the command, passing "-c" in sys.argv[0]
and "foo" in sys.argv[1].
idle -d -s -r foo.py "Hello World"
Open a shell window, run a startup script, enable the debugger, and
run foo.py, passing "foo.py" in sys.argv[0] and "Hello World" in
sys.argv[1].
echo "import sys; print sys.argv" | idle - "foobar"
Open a shell window, run the script piped in, passing '' in sys.argv[0]
and "foobar" in sys.argv[1].
"""
def main():
global flist, root, use_subprocess
use_subprocess = True
enable_shell = False
enable_edit = False
debug = False
cmd = None
script = None
startup = False
try:
opts, args = getopt.getopt(sys.argv[1:], "c:deihnr:st:")
except getopt.error, msg:
sys.stderr.write("Error: %s\n" % str(msg))
sys.stderr.write(usage_msg)
sys.exit(2)
for o, a in opts:
if o == '-c':
cmd = a
enable_shell = True
if o == '-d':
debug = True
enable_shell = True
if o == '-e':
enable_edit = True
if o == '-h':
sys.stdout.write(usage_msg)
sys.exit()
if o == '-i':
enable_shell = True
if o == '-n':
use_subprocess = False
if o == '-r':
script = a
if os.path.isfile(script):
pass
else:
print "No script file: ", script
sys.exit()
enable_shell = True
if o == '-s':
startup = True
enable_shell = True
if o == '-t':
PyShell.shell_title = a
enable_shell = True
if args and args[0] == '-':
cmd = sys.stdin.read()
enable_shell = True
# process sys.argv and sys.path:
for i in range(len(sys.path)):
sys.path[i] = os.path.abspath(sys.path[i])
if args and args[0] == '-':
sys.argv = [''] + args[1:]
elif cmd:
sys.argv = ['-c'] + args
elif script:
sys.argv = [script] + args
elif args:
enable_edit = True
pathx = []
for filename in args:
pathx.append(os.path.dirname(filename))
for dir in pathx:
dir = os.path.abspath(dir)
if dir not in sys.path:
sys.path.insert(0, dir)
else:
dir = os.getcwd()
if not dir in sys.path:
sys.path.insert(0, dir)
# check the IDLE settings configuration (but command line overrides)
edit_start = idleConf.GetOption('main', 'General',
'editor-on-startup', type='bool')
enable_edit = enable_edit or edit_start
enable_shell = enable_shell or not enable_edit
# start editor and/or shell windows:
root = Tk(className="Idle")
fixwordbreaks(root)
root.withdraw()
flist = PyShellFileList(root)
macosxSupport.setupApp(root, flist)
if enable_edit:
if not (cmd or script):
for filename in args[:]:
if flist.open(filename) is None:
# filename is a directory actually, disconsider it
args.remove(filename)
if not args:
flist.new()
if enable_shell:
shell = flist.open_shell()
if not shell:
return # couldn't open shell
if macosxSupport.runningAsOSXApp() and flist.dict:
# On OSX: when the user has double-clicked on a file that causes
# IDLE to be launched the shell window will open just in front of
# the file she wants to see. Lower the interpreter window when
# there are open files.
shell.top.lower()
shell = flist.pyshell
# handle remaining options:
if debug:
shell.open_debugger()
if startup:
filename = os.environ.get("IDLESTARTUP") or \
os.environ.get("PYTHONSTARTUP")
if filename and os.path.isfile(filename):
shell.interp.execfile(filename)
if shell and cmd or script:
shell.interp.runcommand("""if 1:
import sys as _sys
_sys.argv = %r
del _sys
\n""" % (sys.argv,))
if cmd:
shell.interp.execsource(cmd)
elif script:
shell.interp.prepend_syspath(script)
shell.interp.execfile(script)
# Check for problematic OS X Tk versions and print a warning message
# in the IDLE shell window; this is less intrusive than always opening
# a separate window.
tkversionwarning = macosxSupport.tkVersionWarning(root)
if tkversionwarning:
shell.interp.runcommand(''.join(("print('", tkversionwarning, "')")))
while flist.inversedict: # keep IDLE running while files are open.
root.mainloop()
root.destroy()
if __name__ == "__main__":
sys.modules['PyShell'] = sys.modules['__main__']
main()
|
{
"content_hash": "25682be2414c9cab84edc9f54a6cece8",
"timestamp": "",
"source": "github",
"line_count": 1566,
"max_line_length": 87,
"avg_line_length": 35.82375478927203,
"alnum_prop": 0.5674866310160428,
"repo_name": "efortuna/AndroidSDKClone",
"id": "81af85a84044e4f53ed3df3e69c1a2f3c54a9015",
"size": "56124",
"binary": false,
"copies": "35",
"ref": "refs/heads/master",
"path": "ndk_experimental/prebuilt/linux-x86_64/lib/python2.7/idlelib/PyShell.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AppleScript",
"bytes": "0"
},
{
"name": "Assembly",
"bytes": "79928"
},
{
"name": "Awk",
"bytes": "101642"
},
{
"name": "C",
"bytes": "110780727"
},
{
"name": "C++",
"bytes": "62609188"
},
{
"name": "CSS",
"bytes": "318944"
},
{
"name": "Component Pascal",
"bytes": "220"
},
{
"name": "Emacs Lisp",
"bytes": "4737"
},
{
"name": "Groovy",
"bytes": "82931"
},
{
"name": "IDL",
"bytes": "31867"
},
{
"name": "Java",
"bytes": "102919416"
},
{
"name": "JavaScript",
"bytes": "44616"
},
{
"name": "Objective-C",
"bytes": "196166"
},
{
"name": "Perl",
"bytes": "45617403"
},
{
"name": "Prolog",
"bytes": "1828886"
},
{
"name": "Python",
"bytes": "34997242"
},
{
"name": "Rust",
"bytes": "17781"
},
{
"name": "Shell",
"bytes": "1585527"
},
{
"name": "Visual Basic",
"bytes": "962"
},
{
"name": "XC",
"bytes": "802542"
}
],
"symlink_target": ""
}
|
import os
from github_acadwf import getenvOrDie
from github_acadwf import getCSVFromURL
GHA_GITHUB_ORG = getenvOrDie("GHA_GITHUB_ORG",
"Error: please set GHA_GITHUB_ORG to name of github organization for the course, e.g. UCSB-CS56-W14")
GHA_STUDENT_LIST_URL = getenvOrDie('GHA_STUDENT_LIST_URL',
"Error: please set GHA_STUDENT_LIST_URL to url of Google Spreadsheet with the github ids")
GHA_STAFF_LIST_URL = getenvOrDie('GHA_STAFF_LIST_URL',
"Error: please set GHA_STAFF_LIST_URL to url of Google Spreadsheet with the github ids")
GHA_WORKDIR = getenvOrDie('GHA_WORKDIR',
"Error: please set GHA_WORKDIR to a writeable scratch directory")
GHA_STARTPOINT_DIR = getenvOrDie('GHA_STARTPOINT_DIR',
"Error: please set GHA_STARTPOINT_DIR to a readable directory")
labSubmissionsDir = GHA_WORKDIR + "/labSubmissions"
if not os.access(labSubmissionsDir, os.W_OK):
os.mkdir(labSubmissionsDir, 0700)
import getpass
import sys
import argparse
from github_acadwf import pullRepoForGrading
# In the main directory of the repo where you are developing with PyGithub,
# type:
# git submodule add git://github.com/jacquev6/PyGithub.git PyGithub
# git submodule init
# git submodule update
#
# That will populate a PyGithub subdirectory with a clone of PyGithub
# Then, to add it to your Python path, you can do:
sys.path.append("./PyGithub");
from github import Github
from github import GithubException
parser = argparse.ArgumentParser(description='Pull repos for grading that start with a certain prefix')
parser.add_argument('prefix',help='prefix e.g. lab00')
parser.add_argument('-u','--githubUsername',
help="github username, default is current OS user",
default=getpass.getuser())
args = parser.parse_args()
username = args.githubUsername
pw = getpass.getpass()
g = Github(username, pw, user_agent='PyGithub')
org = g.get_organization(GHA_GITHUB_ORG)
## TODO: Add some error checking code here to see whether
## the lookup was successful. Do we try/except or check the return value?
repos = org.get_repos()
for repo in repos:
if repo.name.startswith(args.prefix):
print(repo.name)
pullRepoForGrading(repo,labSubmissionsDir+'/'+args.prefix)
|
{
"content_hash": "842bf396d2abccb2b09dccdb6cc1f3bb",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 125,
"avg_line_length": 33.25352112676056,
"alnum_prop": 0.7005506141465481,
"repo_name": "UCSB-CS-Using-GitHub-In-Courses/github-acad-scripts",
"id": "9f12974ec1f8adc2480fb16c61a8e0697624186e",
"size": "2380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "getLabSubmissions.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40902"
},
{
"name": "Shell",
"bytes": "3411"
}
],
"symlink_target": ""
}
|
from participantCollection import ParticipantCollection
import string
import re
import datetime
import pyperclip
# Edit Me!
# Remember, this is during signup, so current month is not March, it's February.
currentMonthTotalDays = 31
currentMonthURL = "https://www.reddit.com/r/pornfree/comments/6qvbft/stay_clean_august_this_thread_updated_daily_check/"
currentMonthIndex = datetime.date.today().month
currentMonthPenultimateDayIndex = currentMonthTotalDays - 1
currentMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[currentMonthIndex]
nextMonthIndex = currentMonthIndex % 12 + 1
nextMonthName = {1: 'January', 2: 'February', 3: 'March', 4: 'April', 5: 'May', 6: 'June', 7: 'July', 8: 'August', 9: 'September', 10: 'October', 11: 'November', 12: 'December'}[nextMonthIndex]
uppercaseMonth = string.upper(nextMonthName)
currentDayOfMonthIndex = datetime.date.today().day
currentDayOfMonthName = {1: 'first', 2: 'second', 3: 'third', 4: 'fourth', 5: 'fifth', 6: 'sixth', 7: 'seventh', 8: 'eighth', 9: 'ninth', 10: 'tenth', 11: 'eleventh', 12: 'twelfth', 13: 'thirteenth', 14: 'fourteenth', 15: 'fifteenth', 16: 'sixteenth', 17: 'seventeenth', 18: 'eighteenth', 19: 'nineteenth', 20: 'twentieth', 21: 'twenty-first', 22: 'twenty-second', 23: 'twenty-third', 24: 'twenty-fourth', 25: 'twenty-fifth', 26: 'twenty-sixth', 27: 'twenty-seventh', 28: 'twenty-eighth', 29: 'twenty-ninth', 30: 'thirtieth', 31: 'thirty-first'}[currentDayOfMonthIndex]
currentDayOfWeekName = {0: 'Monday', 1: 'Tuesday', 2: 'Wednesday', 3: 'Thursday', 4: 'Friday', 5: 'Saturday', 6: 'Sunday'}[datetime.date.today().weekday()]
# TODO: testing
# currentDayOfMonthIndex = 28
participants = ParticipantCollection()
initialNumber = participants.size()
def templateForParticipants():
answer = ""
answer += "Here are the **INITIAL_NUMBER participants** who have already signed up:\n\n"
for participant in participants.participants:
answer += "/u/" + participant.name
answer += "\n\n"
return answer
def templateForTooEarly():
answer = ""
answer += "(Too early. Come back on CURRENT_MONTH_NAME " + str(currentMonthTotalDays - 6) + ")\n"
return answer
def templateForFirstSignupDay():
answer = ""
answer += "STAY CLEAN UPPERCASE_MONTH! Sign up here! (CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX)\n"
answer += "Hey everybody, we had a great turnout for [Stay Clean CURRENT_MONTH_NAME](CURRENT_MONTH_URL) - let's see if we can knock it out of the park for NEXT_MONTH_NAME. Have you been clean for the month of CURRENT_MONTH_NAME? Great! Join us here, and let's keep our streak going. Did you slip in CURRENT_MONTH_NAME? Then NEXT_MONTH_NAME is your month to shine, and we will gladly fight the good fight along with you. Did you miss out on the CURRENT_MONTH_NAME challenge? Well then here is your opportunity to join us.\n"
answer += "\n"
answer += "If you would like to be included in this challenge, please post a brief comment to this thread, and I will include you. After midnight, NEXT_MONTH_NAME 1, the sign up window will close, and the challenge will begin."
return answer
def templateForMiddleSignupDays():
answer = ""
answer += "STAY CLEAN UPPERCASE_MONTH! Sign up here! (CURRENT_MONTH_NAME CURRENT_DAY_OF_MONTH_INDEX)\n"
answer += "Hey everybody, so far **INITIAL_NUMBER participants** have signed up. Have you been clean for **[the month of CURRENT_MONTH_NAME](CURRENT_MONTH_URL)**? Great! Join us here, and let's keep our streak going. Did you slip in CURRENT_MONTH_NAME? Then NEXT_MONTH_NAME is your month to shine, and we will gladly fight the good fight along with you. Did you miss out on the CURRENT_MONTH_NAME challenge? Well then here is your opportunity to join us.\n"
answer += "\n"
answer += "If you would like to be included in this challenge, please post a brief comment to this thread (if you haven't already done so on an earlier signup thread), and I will include you. After midnight, NEXT_MONTH_NAME 1, the sign up window will close, and the challenge will begin.\n"
answer += "\n"
answer += templateForParticipants()
return answer
def templateForLastSignupDay():
answer = ""
answer += "LAST CHANCE TO SIGN UP FOR STAY CLEAN UPPERCASE_MONTH! Sign up here!\n"
answer += "The Stay Clean NEXT_MONTH_NAME challenge **begins tomorrow**! So far, we have **INITIAL_NUMBER participants** signed up. If you would like to be included in the challenge, please post a brief comment to this thread (if you haven't already done so on an earlier signup thread), and we will include you. After midnight tonight, we will not be accepting any more participants. I will create the official update post tomorrow.\n"
answer += "\n"
answer += templateForParticipants()
return answer
def templateToUse():
if currentDayOfMonthIndex <= (currentMonthTotalDays - 7):
return templateForTooEarly()
elif currentDayOfMonthIndex == (currentMonthTotalDays - 6):
return templateForFirstSignupDay()
elif (currentMonthTotalDays - 5) <= currentDayOfMonthIndex <= (currentMonthTotalDays - 1):
return templateForMiddleSignupDays()
elif currentMonthTotalDays == currentDayOfMonthIndex:
return templateForLastSignupDay()
def stringToPrint():
answer = templateToUse()
answer = re.sub('INITIAL_NUMBER', str(initialNumber), answer)
answer = re.sub('CURRENT_MONTH_INDEX', str(currentMonthIndex), answer)
answer = re.sub('CURRENT_MONTH_TOTAL_DAYS', str(currentMonthTotalDays), answer)
answer = re.sub('CURRENT_MONTH_PENULTIMATE_DAY_INDEX', str(currentMonthPenultimateDayIndex), answer)
answer = re.sub('CURRENT_MONTH_NAME', currentMonthName, answer)
answer = re.sub('CURRENT_MONTH_URL', currentMonthURL, answer)
answer = re.sub('NEXT_MONTH_INDEX', str(nextMonthIndex), answer)
answer = re.sub('NEXT_MONTH_NAME', nextMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_INDEX', str(currentDayOfMonthIndex), answer)
answer = re.sub('CURRENT_DAY_OF_MONTH_NAME', currentDayOfMonthName, answer)
answer = re.sub('CURRENT_DAY_OF_WEEK_NAME', currentDayOfWeekName, answer)
answer = re.sub('UPPERCASE_MONTH', uppercaseMonth, answer)
return answer
outputString = stringToPrint()
print "============================================================="
print outputString
print "============================================================="
pyperclip.copy(outputString)
|
{
"content_hash": "af080c9188bea1c7802c5de6f63cd1ec",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 569,
"avg_line_length": 62.885714285714286,
"alnum_prop": 0.7022565500530062,
"repo_name": "foobarbazblarg/stayclean",
"id": "8613d89502630e93f92fb1b19c782e12c8e9fa9f",
"size": "6729",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stayclean-2017-september/display-during-signup.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4232161"
},
{
"name": "Shell",
"bytes": "52056"
}
],
"symlink_target": ""
}
|
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Convenience functions for using Coconut as a module.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
from coconut.root import * # NOQA
import sys
import os.path
import codecs
try:
from encodings import utf_8
except ImportError:
utf_8 = None
from coconut.integrations import embed
from coconut.exceptions import CoconutException
from coconut.command import Command
from coconut.command.cli import cli_version
from coconut.compiler import Compiler
from coconut.constants import (
version_tag,
code_exts,
coconut_import_hook_args,
coconut_kernel_kwargs,
)
# -----------------------------------------------------------------------------------------------------------------------
# COMMAND:
# -----------------------------------------------------------------------------------------------------------------------
GLOBAL_STATE = None
def get_state(state=None):
"""Get a Coconut state object; None gets a new state, False gets the global state."""
global GLOBAL_STATE
if state is None:
return Command()
elif state is False:
if GLOBAL_STATE is None:
GLOBAL_STATE = Command()
return GLOBAL_STATE
else:
return state
def cmd(cmd_args, interact=False, state=False, **kwargs):
"""Process command-line arguments."""
if isinstance(cmd_args, (str, bytes)):
cmd_args = cmd_args.split()
return get_state(state).cmd(cmd_args, interact=interact, **kwargs)
VERSIONS = {
"num": VERSION,
"name": VERSION_NAME,
"spec": VERSION_STR,
"tag": version_tag,
"-v": cli_version,
}
def version(which="num"):
"""Get the Coconut version."""
if which in VERSIONS:
return VERSIONS[which]
else:
raise CoconutException(
"invalid version type " + repr(which),
extra="valid versions are " + ", ".join(VERSIONS),
)
# -----------------------------------------------------------------------------------------------------------------------
# COMPILER:
# -----------------------------------------------------------------------------------------------------------------------
def setup(*args, **kwargs):
"""Set up the given state object."""
state = kwargs.pop("state", False)
return get_state(state).setup(*args, **kwargs)
PARSERS = {
"sys": lambda comp: comp.parse_sys,
"exec": lambda comp: comp.parse_exec,
"file": lambda comp: comp.parse_file,
"package": lambda comp: comp.parse_package,
"block": lambda comp: comp.parse_block,
"single": lambda comp: comp.parse_single,
"eval": lambda comp: comp.parse_eval,
"lenient": lambda comp: comp.parse_lenient,
"xonsh": lambda comp: comp.parse_xonsh,
}
# deprecated aliases
PARSERS["any"] = PARSERS["debug"] = PARSERS["lenient"]
def parse(code="", mode="sys", state=False, keep_internal_state=None):
"""Compile Coconut code."""
if keep_internal_state is None:
keep_internal_state = bool(state)
command = get_state(state)
if command.comp is None:
command.setup()
if mode not in PARSERS:
raise CoconutException(
"invalid parse mode " + repr(mode),
extra="valid modes are " + ", ".join(PARSERS),
)
return PARSERS[mode](command.comp)(code, keep_state=keep_internal_state)
def coconut_eval(expression, globals=None, locals=None, state=False, **kwargs):
"""Compile and evaluate Coconut code."""
command = get_state(state)
if command.comp is None:
setup()
command.check_runner(set_sys_vars=False)
if globals is None:
globals = {}
command.runner.update_vars(globals)
compiled_python = parse(expression, "eval", state, **kwargs)
return eval(compiled_python, globals, locals)
# -----------------------------------------------------------------------------------------------------------------------
# BREAKPOINT:
# -----------------------------------------------------------------------------------------------------------------------
def _coconut_breakpoint():
"""Determine coconut.embed depth based on whether we're being
called by Coconut's breakpoint() or Python's breakpoint()."""
if sys.version_info >= (3, 7):
return embed(depth=1)
else:
return embed(depth=2)
def use_coconut_breakpoint(on=True):
"""Switches the breakpoint() built-in (universally accessible via
coconut.__coconut__.breakpoint) to use coconut.embed."""
if on:
sys.breakpointhook = _coconut_breakpoint
else:
sys.breakpointhook = sys.__breakpointhook__
use_coconut_breakpoint()
# -----------------------------------------------------------------------------------------------------------------------
# AUTOMATIC COMPILATION:
# -----------------------------------------------------------------------------------------------------------------------
class CoconutImporter(object):
"""Finder and loader for compiling Coconut files at import time."""
ext = code_exts[0]
@staticmethod
def run_compiler(path):
"""Run the Coconut compiler on the given path."""
cmd([path] + list(coconut_import_hook_args))
def find_module(self, fullname, path=None):
"""Searches for a Coconut file of the given name and compiles it."""
basepaths = [""] + list(sys.path)
if fullname.startswith("."):
if path is None:
# we can't do a relative import if there's no package path
return
fullname = fullname[1:]
basepaths.insert(0, path)
fullpath = os.path.join(*fullname.split("."))
for head in basepaths:
path = os.path.join(head, fullpath)
filepath = path + self.ext
dirpath = os.path.join(path, "__init__" + self.ext)
if os.path.exists(filepath):
self.run_compiler(filepath)
# Coconut file was found and compiled, now let Python import it
return
if os.path.exists(dirpath):
self.run_compiler(path)
# Coconut package was found and compiled, now let Python import it
return
coconut_importer = CoconutImporter()
def auto_compilation(on=True):
"""Turn automatic compilation of Coconut files on or off."""
if on:
if coconut_importer not in sys.meta_path:
sys.meta_path.insert(0, coconut_importer)
else:
try:
sys.meta_path.remove(coconut_importer)
except ValueError:
pass
auto_compilation()
# -----------------------------------------------------------------------------------------------------------------------
# ENCODING:
# -----------------------------------------------------------------------------------------------------------------------
if utf_8 is not None:
class CoconutStreamReader(utf_8.StreamReader, object):
"""Compile Coconut code from a stream of UTF-8."""
coconut_compiler = None
@classmethod
def compile_coconut(cls, source):
"""Compile the given Coconut source text."""
if cls.coconut_compiler is None:
cls.coconut_compiler = Compiler(**coconut_kernel_kwargs)
return cls.coconut_compiler.parse_sys(source)
@classmethod
def decode(cls, input_bytes, errors="strict"):
"""Decode and compile the given Coconut source bytes."""
input_str, len_consumed = super(CoconutStreamReader, cls).decode(input_bytes, errors)
return cls.compile_coconut(input_str), len_consumed
class CoconutIncrementalDecoder(utf_8.IncrementalDecoder, object):
"""Compile Coconut at the end of incrementally decoding UTF-8."""
invertible = False
_buffer_decode = CoconutStreamReader.decode
def get_coconut_encoding(encoding="coconut"):
"""Get a CodecInfo for the given Coconut encoding."""
if not encoding.startswith("coconut"):
return None
if encoding != "coconut":
raise CoconutException("unknown Coconut encoding: " + repr(encoding))
if utf_8 is None:
raise CoconutException("coconut encoding requires encodings.utf_8")
return codecs.CodecInfo(
name=encoding,
encode=utf_8.encode,
decode=CoconutStreamReader.decode,
incrementalencoder=utf_8.IncrementalEncoder,
incrementaldecoder=CoconutIncrementalDecoder,
streamreader=CoconutStreamReader,
streamwriter=utf_8.StreamWriter,
)
codecs.register(get_coconut_encoding)
|
{
"content_hash": "6ee294e9c0bb2075539ed3957790477f",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 121,
"avg_line_length": 33.50375939849624,
"alnum_prop": 0.5353456014362658,
"repo_name": "evhub/coconut",
"id": "917734d60d28ec20195b4f5d73674026e0f6c6bf",
"size": "9212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coconut/convenience.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "8029"
},
{
"name": "Python",
"bytes": "685257"
}
],
"symlink_target": ""
}
|
import string
import nltk
import json
from nltk.stem.porter import PorterStemmer
import numpy as np
import sys, argparse
reload(sys)
sys.setdefaultencoding('utf8')
#######
# based on http://www.cs.duke.edu/courses/spring14/compsci290/assignments/lab02.html
stemmer = PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
tokens = nltk.word_tokenize(text.lower())
tokens = [i for i in tokens if i not in string.punctuation]
stems = stem_tokens(tokens, stemmer)
str_lem = ""
for i in stems :
str_lem += i + " "
return str_lem
def tokenizeList(list_text) :
token_list = []
for i in list_text :
token_tmp = tokenize(i)
token_list.append(token_tmp)
return token_list
# stemmer list of key words : rebuild the bigram at the end to be sure.
def createVocabularyDictTokenized(cluster = 0, mr_tools=[], mr_demographic=[] ) : # 0 if all, 1 if cluster tools, 2 if cluster demographic
dico = dict()
if cluster == 0 or cluster == 1 :
for m in mr_tools :
dico[tokenize(m)] = 0
if cluster == 0 or cluster == 2 :
for m in mr_demographic :
dico[tokenize(m)] = 0
return dico
def createVocabularyDict(cluster = 0, mr_tools=[], mr_demographic=[]) : # 0 if all, 1 if cluster tools, 2 if cluster demographic
# list of key word for the 2 clusters
dico = dict()
if cluster == 0 or cluster == 1 :
for m in mr_tools :
dico[m] = 0
if cluster == 0 or cluster == 2 :
for m in mr_demographic :
dico[m] = 0
return dico
def computeTiming(file_name, cluster = 0) :
time = dict()
# for each time, we have a dict() with: we need to create the json file containing the information
# we only have the ids in the resulting picklefile
for doc in json :
year = doc["year"]
text = doc["title"]+" " + doc["abstract"]
if year not in time :
time[year] = createVocabularyDict(cluster)
tmp = createVocabularyDict(cluster)
for m in tmp :
l_text = tokenize(text)
if m in l_text :
time[year][m] += 1
return time
class Document(object) :
def __init__(self, istex_id, text, publicationDate, source):
self.istex_id = istex_id
self.text = text
self.publicationDate = publicationDate
self.source = source
# read the raw data files (mainly ucbl and mental rotation istex documents)
def read_json_inputs(path, source):
f_input = open(path, "rb")
json_docs = json.load(f_input)
f_input.close()
doc_list = []
for doc in json_docs :
doc_list.append(Document(doc["istex_id"], doc["title"]+" __ "+doc["abstract"], doc["publicationDate"], source))
return doc_list
# read the resulting file out of istex database, supposely containing documents related to mental rotation
def read_json_results(path) :
f_input = open(path, "rb")
json_docs = json.load(f_input)
f_input.close()
doc_list = []
for doc in json_docs :
doc_list.append(Document(doc["istex_id"], doc["text"], doc["publicationDate"], "ISTEX"))
return doc_list
def create_phrases_dict(nb_phrases) :
dico = dict()
for p in range(nb_phrases) :
dico[p] = 0
return dico
# nb_years : lines, len_voc: columns
def createNumpyArray(nb_years, len_voc) :
return np.zeros((nb_years, len_voc))
#list_voc contains the key phrases on which we do the statistics, the labels of the columns
def save_csv(max_year, min_year, count_years, all_voc, cluster_one, cluster_two, list_voc, output_file) :
f_out = open(output_file, "w")
str_first_line = "year\tnb_documents\tnb_doc_selected_domain\tcluster selected domain methods\tcluster demograhic"
for v in list_voc :
str_first_line += "\t"+v
f_out.write(str_first_line)
for year in range(min_year,max_year) :
str_line = str(year)+"\t"+str(count_years[year-min_year])+"\t"+str(all_voc[year-min_year][0])+"\t"+str(cluster_one[year-min_year][0])+"\t"+str(cluster_two[year-min_year][0])
for w in range(len(list_voc)) :
str_line += "\t"+str(numpy_array[year-min_year][w])
f_out.write("\n"+str_line)
f_out.close()
def document_in_cluster(list_voc, text) :
for v in list_voc :
if v in text :
return True
return False
if __name__ == "__main__" :
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", default='results/chart_input_complete.json', type=str)
parser.add_argument("--key_words_cluster1", default=["mental rotation", "motor" , "task" , "stimuli" , "orientation", "event related potentials", "mismatch negativity", "attention deficit hyperactivity disorder", "lingual gyrus", "perirhinal cortex", "transcranial magnetic stimulation"], type=str, nargs='+')
parser.add_argument("--key_words_cluster2", default = ["mental rotation", "sex differences", "spatial ability", "visual", "age", "perform"], type=str, nargs='+')
#parser.add_argument("--output_csv", default='results/chart_csv_ucbl_clusters.csv', type=str) # is a .json file
parser.add_argument("--output_csv", default='results/chart_csv_istex_clusters.csv', type=str) # is a .json file
parser.add_argument("--min_year", default=1944, type=str) # is a .json file
parser.add_argument("--max_year", default=2017, type=str) # is a .json file
parser.add_argument("--voc", default=0, type=str) # all the selected voc= 0, only cluster 1 (tools) voc=1, cluster 2 : demographics voc=2
args = parser.parse_args()
input_file = args.input_file
output_file = args.output_csv
max_year = args.max_year
min_year = args.min_year
voc_selection = args.voc
cluster1= args.key_words_cluster1
cluster2= args.key_words_cluster2
docs = read_json_results(input_file)
#docs = read_json_inputs(input_file, "UCBL")
voc = createVocabularyDict(voc_selection, mr_tools=cluster1, mr_demographic=cluster2) # without tokenization
list_voc = voc.keys()
tokenstrings = tokenizeList(list_voc) # can give an integer to specify a selection of vocabulary
nb_phrases = len(tokenstrings)
voc_clusterone = tokenizeList(createVocabularyDict(1, mr_tools=cluster1, mr_demographic=cluster2).keys())
voc_clustertwo = tokenizeList(createVocabularyDict(2, mr_tools=cluster1, mr_demographic=cluster2).keys())
nb_years = max_year-min_year # from 1944 to 2016
numpy_array = createNumpyArray(nb_years, nb_phrases)
cluster_one = createNumpyArray(nb_years, 1)
cluster_two = createNumpyArray(nb_years, 1)
all_voc = createNumpyArray(nb_years, 1)
# need to retrieve the documents with an object: list of documents, each doc is an object with : time, paragraph, istex_id
count_years = np.zeros(nb_years)
for doc in docs :
token_text = tokenize(doc.text)
index_year = int(doc.publicationDate)-min_year
for p in range(nb_phrases) :
if tokenstrings[p] in token_text:
numpy_array[index_year][p] += 1
if document_in_cluster(voc_clusterone, token_text): # any text containing one of the key_phrase of the cluster is considered to belong to this cluster
cluster_one[index_year] += 1
if document_in_cluster(voc_clustertwo, token_text):
cluster_two[index_year] += 1
if document_in_cluster(tokenstrings, token_text):
all_voc[index_year] += 1
count_years[int(doc.publicationDate)-min_year] += 1
save_csv(max_year, min_year, count_years, all_voc, cluster_one, cluster_two, list_voc, output_file)
|
{
"content_hash": "e4ce9b81cba7dcf4626e2aec7f8b9efe",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 310,
"avg_line_length": 37.97872340425532,
"alnum_prop": 0.7053221288515407,
"repo_name": "lmartinet/ISTEX_MentalRotation",
"id": "2b4f3615dc05d2d4a4aed43756637cfff86fe6ac",
"size": "7745",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "TrendingDataForChart.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "150562"
},
{
"name": "Python",
"bytes": "74027"
},
{
"name": "Shell",
"bytes": "4390"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="z", parent_name="mesh3d.lightposition", **kwargs):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 100000),
min=kwargs.pop("min", -100000),
**kwargs,
)
|
{
"content_hash": "a4acfcef634349e06363a2902fcf1573",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 86,
"avg_line_length": 36.84615384615385,
"alnum_prop": 0.5908141962421712,
"repo_name": "plotly/plotly.py",
"id": "886e1e997ac1b839af7dfdd1d663972a2f9c7330",
"size": "479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/mesh3d/lightposition/_z.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
import datetime
from paypal.pro.helpers import PayPalWPP
from paypal.pro.exceptions import PayPalFailure
from django.conf import settings
from billing import Gateway, GatewayNotConfigured
from billing.utils.credit_card import (Visa, MasterCard, AmericanExpress,
Discover, InvalidCard)
from billing.signals import transaction_was_successful, transaction_was_unsuccessful
class PayPalGateway(Gateway):
default_currency = "USD"
supported_countries = ["US"]
supported_cardtypes = [Visa, MasterCard, AmericanExpress, Discover]
homepage_url = "https://merchant.paypal.com/us/cgi-bin/?&cmd=_render-content&content_ID=merchant/wp_pro"
display_name = "PayPal Website Payments Pro"
def __init__(self):
merchant_settings = getattr(settings, "MERCHANT_SETTINGS")
if not merchant_settings or not merchant_settings.get("pay_pal"):
raise GatewayNotConfigured("The '%s' gateway is not correctly "
"configured." % self.display_name)
pay_pal_settings = merchant_settings["pay_pal"]
@property
def service_url(self):
# Implemented in django-paypal
raise NotImplementedError
def purchase(self, money, credit_card, options=None):
"""Using PAYPAL DoDirectPayment, charge the given
credit card for specified money"""
if not options:
options = {}
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
params = {}
params['creditcardtype'] = credit_card.card_type.card_name
params['acct'] = credit_card.number
params['expdate'] = '%02d%04d' % (credit_card.month, credit_card.year)
params['cvv2'] = credit_card.verification_value
params['ipaddress'] = options['request'].META.get("REMOTE_ADDR", "")
params['amt'] = money
if options.get("email"):
params['email'] = options["email"]
address = options.get("billing_address", {})
first_name = None
last_name = None
try:
first_name, last_name = address.get("name", "").split(" ")
except ValueError:
pass
params['firstname'] = first_name or credit_card.first_name
params['lastname'] = last_name or credit_card.last_name
params['street'] = address.get("address1", '')
params['street2'] = address.get("address2", "")
params['city'] = address.get("city", '')
params['state'] = address.get("state", '')
params['countrycode'] = address.get("country", '')
params['zip'] = address.get("zip", '')
params['phone'] = address.get("phone", "")
shipping_address = options.get("shipping_address", None)
if shipping_address:
params['shiptoname'] = shipping_address["name"]
params['shiptostreet'] = shipping_address["address1"]
params['shiptostreet2'] = shipping_address.get("address2", "")
params['shiptocity'] = shipping_address["city"]
params['shiptostate'] = shipping_address["state"]
params['shiptocountry'] = shipping_address["country"]
params['shiptozip'] = shipping_address["zip"]
params['shiptophonenum'] = shipping_address.get("phone", "")
wpp = PayPalWPP(options['request'])
try:
response = wpp.doDirectPayment(params)
transaction_was_successful.send(sender=self,
type="purchase",
response=response)
except PayPalFailure as e:
transaction_was_unsuccessful.send(sender=self,
type="purchase",
response=e)
# Slight skewness because of the way django-paypal
# is implemented.
return {"status": "FAILURE", "response": e}
return {"status": response.ack.upper(), "response": response}
def authorize(self, money, credit_card, options=None):
if not options:
options = {}
if not self.validate_card(credit_card):
raise InvalidCard("Invalid Card")
raise NotImplementedError
def capture(self, money, authorization, options=None):
raise NotImplementedError
def void(self, identification, options=None):
raise NotImplementedError
def credit(self, money, identification, options=None):
raise NotImplementedError
def recurring(self, money, creditcard, options=None):
if not options:
options = {}
params = {}
params['profilestartdate'] = options.get('startdate') or datetime.datetime.now().strftime("%Y-%m-%dT00:00:00Z")
params['startdate'] = options.get('startdate') or datetime.datetime.now().strftime("%m%Y")
params['billingperiod'] = options.get('billingperiod') or 'Month'
params['billingfrequency'] = options.get('billingfrequency') or '1'
params['amt'] = money
params['desc'] = 'description of the billing'
params['creditcardtype'] = creditcard.card_type.card_name
params['acct'] = creditcard.number
params['expdate'] = '%02d%04d' % (creditcard.month, creditcard.year)
params['firstname'] = creditcard.first_name
params['lastname'] = creditcard.last_name
wpp = PayPalWPP(options.get('request', {}))
try:
response = wpp.createRecurringPaymentsProfile(params, direct=True)
transaction_was_successful.send(sender=self,
type="purchase",
response=response)
except PayPalFailure as e:
transaction_was_unsuccessful.send(sender=self,
type="purchase",
response=e)
# Slight skewness because of the way django-paypal
# is implemented.
return {"status": "FAILURE", "response": e}
return {"status": response.ack.upper(), "response": response}
def store(self, creditcard, options=None):
raise NotImplementedError
def unstore(self, identification, options=None):
raise NotImplementedError
|
{
"content_hash": "d3dda042898bdf3b4a557e5181e82513",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 119,
"avg_line_length": 43.27891156462585,
"alnum_prop": 0.5910091166299906,
"repo_name": "agiliq/merchant",
"id": "1e1125d2964e795f9d434d453a7f819beb711f84",
"size": "6362",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "billing/gateways/pay_pal_gateway.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "22046"
},
{
"name": "Makefile",
"bytes": "409"
},
{
"name": "Python",
"bytes": "419711"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from docker_registry_client.Image import Image
from docker_registry_client._BaseClient import BaseClientV1
from tests.mock_registry import mock_v1_registry
class TestImage(object):
def test_init(self):
url = mock_v1_registry()
image_id = 'test_image_id'
image = Image(image_id, BaseClientV1(url))
assert image.image_id == image_id
|
{
"content_hash": "812cf5f39258a7e96ce575fdb9ce17b9",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 31.46153846153846,
"alnum_prop": 0.7090464547677262,
"repo_name": "twaugh/docker-registry-client",
"id": "ee44adc3e3e19797eb04a7f3bdbeebb714804af9",
"size": "409",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_image.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "30169"
}
],
"symlink_target": ""
}
|
"""
f2py2e - Fortran to Python C/API generator. 2nd Edition.
See __usage__ below.
Copyright 1999--2005 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 08:31:19 $
Pearu Peterson
"""
__version__ = "$Revision: 1.90 $"[10:-1]
import __version__
f2py_version = __version__.version
import sys
import os
import pprint
import shutil
import types
import re
errmess=sys.stderr.write
#outmess=sys.stdout.write
show=pprint.pprint
import crackfortran
import rules
import cb_rules
import common_rules
import auxfuncs
import cfuncs
import capi_maps
import func2subr
import f90mod_rules
outmess = auxfuncs.outmess
try:
from numpy import __version__ as numpy_version
except ImportError:
numpy_version = 'N/A'
__usage__ = """\
Usage:
1) To construct extension module sources:
f2py [<options>] <fortran files> [[[only:]||[skip:]] \\
<fortran functions> ] \\
[: <fortran files> ...]
2) To compile fortran files and build extension modules:
f2py -c [<options>, <build_flib options>, <extra options>] <fortran files>
3) To generate signature files:
f2py -h <filename.pyf> ...< same options as in (1) >
Description: This program generates a Python C/API file (<modulename>module.c)
that contains wrappers for given fortran functions so that they
can be called from Python. With the -c option the corresponding
extension modules are built.
Options:
--g3-numpy Use numpy.f2py.lib tool, the 3rd generation of F2PY,
with NumPy support.
--2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT]
--2d-numeric Use f2py2e tool with Numeric support.
--2d-numarray Use f2py2e tool with Numarray support.
-h <filename> Write signatures of the fortran routines to file <filename>
and exit. You can then edit <filename> and use it instead
of <fortran files>. If <filename>==stdout then the
signatures are printed to stdout.
<fortran functions> Names of fortran routines for which Python C/API
functions will be generated. Default is all that are found
in <fortran files>.
<fortran files> Paths to fortran/signature files that will be scanned for
<fortran functions> in order to determine their signatures.
skip: Ignore fortran functions that follow until `:'.
only: Use only fortran functions that follow until `:'.
: Get back to <fortran files> mode.
-m <modulename> Name of the module; f2py generates a Python/C API
file <modulename>module.c or extension module <modulename>.
Default is 'untitled'.
--[no-]lower Do [not] lower the cases in <fortran files>. By default,
--lower is assumed with -h key, and --no-lower without -h key.
--build-dir <dirname> All f2py generated files are created in <dirname>.
Default is tempfile.mktemp().
--overwrite-signature Overwrite existing signature file.
--[no-]latex-doc Create (or not) <modulename>module.tex.
Default is --no-latex-doc.
--short-latex Create 'incomplete' LaTeX document (without commands
\\documentclass, \\tableofcontents, and \\begin{document},
\\end{document}).
--[no-]rest-doc Create (or not) <modulename>module.rst.
Default is --no-rest-doc.
--debug-capi Create C/API code that reports the state of the wrappers
during runtime. Useful for debugging.
--[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77
functions. --wrap-functions is default because it ensures
maximum portability/compiler independence.
--include_paths <path1>:<path2>:... Search include files from the given
directories.
--help-link [..] List system resources found by system_info.py. See also
--link-<resource> switch below. [..] is optional list
of resources names. E.g. try 'f2py --help-link lapack_opt'.
--quiet Run quietly.
--verbose Run with extra verbosity.
-v Print f2py version ID and exit.
numpy.distutils options (only effective with -c):
--fcompiler= Specify Fortran compiler type by vendor
--compiler= Specify C compiler type (as defined by distutils)
--help-fcompiler List available Fortran compilers and exit
--f77exec= Specify the path to F77 compiler
--f90exec= Specify the path to F90 compiler
--f77flags= Specify F77 compiler flags
--f90flags= Specify F90 compiler flags
--opt= Specify optimization flags
--arch= Specify architecture specific optimization flags
--noopt Compile without optimization
--noarch Compile without arch-dependent optimization
--debug Compile with debugging information
Extra options (only effective with -c):
--link-<resource> Link extension module with <resource> as defined
by numpy.distutils/system_info.py. E.g. to link
with optimized LAPACK libraries (vecLib on MacOSX,
ATLAS elsewhere), use --link-lapack_opt.
See also --help-link switch.
-L/path/to/lib/ -l<libname>
-D<define> -U<name>
-I/path/to/include/
<filename>.o <filename>.so <filename>.a
Using the following macros may be required with non-gcc Fortran
compilers:
-DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN
-DUNDERSCORE_G77
When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY
interface is printed out at exit (platforms: Linux).
When using -DF2PY_REPORT_ON_ARRAY_COPY=<int>, a message is
sent to stderr whenever F2PY interface makes a copy of an
array. Integer <int> sets the threshold for array sizes when
a message should be shown.
Version: %s
numpy Version: %s
Requires: Python 2.3 or higher.
License: NumPy license (see LICENSE.txt in the NumPy source code)
Copyright 1999 - 2005 Pearu Peterson all rights reserved.
http://cens.ioc.ee/projects/f2py2e/"""%(f2py_version, numpy_version)
def scaninputline(inputline):
files,funcs,skipfuncs,onlyfuncs,debug=[],[],[],[],[]
f,f2,f3,f4,f5,f6,f7=1,0,0,0,0,0,0
verbose = 1
dolc=-1
dolatexdoc = 0
dorestdoc = 0
wrapfuncs = 1
buildpath = '.'
include_paths = []
signsfile,modulename=None,None
options = {'buildpath':buildpath}
for l in inputline:
if l=='': pass
elif l=='only:': f=0
elif l=='skip:': f=-1
elif l==':': f=1;f4=0
elif l[:8]=='--debug-': debug.append(l[8:])
elif l=='--lower': dolc=1
elif l=='--build-dir': f6=1
elif l=='--no-lower': dolc=0
elif l=='--quiet': verbose = 0
elif l=='--verbose': verbose += 1
elif l=='--latex-doc': dolatexdoc=1
elif l=='--no-latex-doc': dolatexdoc=0
elif l=='--rest-doc': dorestdoc=1
elif l=='--no-rest-doc': dorestdoc=0
elif l=='--wrap-functions': wrapfuncs=1
elif l=='--no-wrap-functions': wrapfuncs=0
elif l=='--short-latex': options['shortlatex']=1
elif l=='--overwrite-signature': options['h-overwrite']=1
elif l=='-h': f2=1
elif l=='-m': f3=1
elif l[:2]=='-v':
print f2py_version
sys.exit()
elif l=='--show-compilers':
f5=1
elif l[:8]=='-include':
cfuncs.outneeds['userincludes'].append(l[9:-1])
cfuncs.userincludes[l[9:-1]]='#include '+l[8:]
elif l[:15]=='--include_paths':
f7=1
elif l[0]=='-':
errmess('Unknown option %s\n'%`l`)
sys.exit()
elif f2: f2=0;signsfile=l
elif f3: f3=0;modulename=l
elif f6: f6=0;buildpath=l
elif f7: f7=0;include_paths.extend(l.split(os.pathsep))
elif f==1:
try:
open(l).close()
files.append(l)
except IOError,detail:
errmess('IOError: %s. Skipping file "%s".\n'%(str(detail),l))
elif f==-1: skipfuncs.append(l)
elif f==0: onlyfuncs.append(l)
if not f5 and not files and not modulename:
print __usage__
sys.exit()
if not os.path.isdir(buildpath):
if not verbose:
outmess('Creating build directory %s'%(buildpath))
os.mkdir(buildpath)
if signsfile:
signsfile = os.path.join(buildpath,signsfile)
if signsfile and os.path.isfile(signsfile) and not options.has_key('h-overwrite'):
errmess('Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n'%(signsfile))
sys.exit()
options['debug']=debug
options['verbose']=verbose
if dolc==-1 and not signsfile: options['do-lower']=0
else: options['do-lower']=dolc
if modulename: options['module']=modulename
if signsfile: options['signsfile']=signsfile
if onlyfuncs: options['onlyfuncs']=onlyfuncs
if skipfuncs: options['skipfuncs']=skipfuncs
options['dolatexdoc'] = dolatexdoc
options['dorestdoc'] = dorestdoc
options['wrapfuncs'] = wrapfuncs
options['buildpath']=buildpath
options['include_paths']=include_paths
return files,options
def callcrackfortran(files,options):
rules.options=options
funcs=[]
crackfortran.debug=options['debug']
crackfortran.verbose=options['verbose']
if options.has_key('module'):
crackfortran.f77modulename=options['module']
if options.has_key('skipfuncs'):
crackfortran.skipfuncs=options['skipfuncs']
if options.has_key('onlyfuncs'):
crackfortran.onlyfuncs=options['onlyfuncs']
crackfortran.include_paths[:]=options['include_paths']
crackfortran.dolowercase=options['do-lower']
postlist=crackfortran.crackfortran(files)
if options.has_key('signsfile'):
outmess('Saving signatures to file "%s"\n'%(options['signsfile']))
pyf=crackfortran.crack2fortran(postlist)
if options['signsfile'][-6:]=='stdout':
sys.stdout.write(pyf)
else:
f=open(options['signsfile'],'w')
f.write(pyf)
f.close()
return postlist
def buildmodules(list):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules,mnames,isusedby=[],[],{}
for i in range(len(list)):
if '__user__' in list[i]['name']:
cb_rules.buildcallbacks(list[i])
else:
if list[i].has_key('use'):
for u in list[i]['use'].keys():
if not isusedby.has_key(u): isusedby[u]=[]
isusedby[u].append(list[i]['name'])
modules.append(list[i])
mnames.append(list[i]['name'])
ret = {}
for i in range(len(mnames)):
if isusedby.has_key(mnames[i]):
outmess('\tSkipping module "%s" which is used by %s.\n'%(mnames[i],','.join(map(lambda s:'"%s"'%s,isusedby[mnames[i]]))))
else:
um=[]
if modules[i].has_key('use'):
for u in modules[i]['use'].keys():
if isusedby.has_key(u) and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess('\tModule "%s" uses nonexisting "%s" which will be ignored.\n'%(mnames[i],u))
ret[mnames[i]] = {}
dict_append(ret[mnames[i]],rules.buildmodule(modules[i],um))
return ret
def dict_append(d_out,d_in):
for (k,v) in d_in.items():
if not d_out.has_key(k):
d_out[k] = []
if type(v) is types.ListType:
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
def run_main(comline_list):
"""Run f2py as if string.join(comline_list,' ') is used as a command line.
In case of using -h flag, return None.
"""
reload(crackfortran)
f2pydir=os.path.dirname(os.path.abspath(cfuncs.__file__))
fobjhsrc = os.path.join(f2pydir,'src','fortranobject.h')
fobjcsrc = os.path.join(f2pydir,'src','fortranobject.c')
files,options=scaninputline(comline_list)
auxfuncs.options=options
postlist=callcrackfortran(files,options)
isusedby={}
for i in range(len(postlist)):
if postlist[i].has_key('use'):
for u in postlist[i]['use'].keys():
if not isusedby.has_key(u): isusedby[u]=[]
isusedby[u].append(postlist[i]['name'])
for i in range(len(postlist)):
if postlist[i]['block']=='python module' and '__user__' in postlist[i]['name']:
if isusedby.has_key(postlist[i]['name']):
#if not quiet:
outmess('Skipping Makefile build for module "%s" which is used by %s\n'%(postlist[i]['name'],','.join(map(lambda s:'"%s"'%s,isusedby[postlist[i]['name']]))))
if options.has_key('signsfile'):
if options['verbose']>1:
outmess('Stopping. Edit the signature file and then run f2py on the signature file: ')
outmess('%s %s\n'%(os.path.basename(sys.argv[0]),options['signsfile']))
return
for i in range(len(postlist)):
if postlist[i]['block']!='python module':
if not options.has_key('python module'):
errmess('Tip: If your original code is Fortran source then you must use -m option.\n')
raise TypeError,'All blocks must be python module blocks but got %s'%(`postlist[i]['block']`)
auxfuncs.debugoptions=options['debug']
f90mod_rules.options=options
auxfuncs.wrapfuncs=options['wrapfuncs']
ret=buildmodules(postlist)
for mn in ret.keys():
dict_append(ret[mn],{'csrc':fobjcsrc,'h':fobjhsrc})
return ret
def filter_files(prefix,suffix,files,remove_prefix=None):
"""
Filter files by prefix and suffix.
"""
filtered,rest = [],[]
match = re.compile(prefix+r'.*'+suffix+r'\Z').match
if remove_prefix:
ind = len(prefix)
else:
ind = 0
for file in [x.strip() for x in files]:
if match(file): filtered.append(file[ind:])
else: rest.append(file)
return filtered,rest
def get_prefix(module):
p = os.path.dirname(os.path.dirname(module.__file__))
return p
def run_compile():
"""
Do it all in one call!
"""
import tempfile,os,shutil
i = sys.argv.index('-c')
del sys.argv[i]
remove_build_dir = 0
try: i = sys.argv.index('--build-dir')
except ValueError: i=None
if i is not None:
build_dir = sys.argv[i+1]
del sys.argv[i+1]
del sys.argv[i]
else:
remove_build_dir = 1
build_dir = os.path.join(tempfile.mktemp())
sysinfo_flags = filter(re.compile(r'[-][-]link[-]').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=sysinfo_flags:a not in flags,sys.argv)
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
f2py_flags = filter(re.compile(r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=f2py_flags:a not in flags,sys.argv)
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
if a in ['only:','skip:']:
fl = 1
elif a==':':
fl = 0
if fl or a==':':
f2py_flags2.append(a)
if f2py_flags2 and f2py_flags2[-1]!=':':
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
sys.argv = filter(lambda a,flags=f2py_flags2:a not in flags,sys.argv)
flib_flags = filter(re.compile(r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=flib_flags:a not in flags,sys.argv)
fc_flags = filter(re.compile(r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=fc_flags:a not in flags,sys.argv)
if 1:
del_list = []
for s in flib_flags:
v = '--fcompiler='
if s[:len(v)]==v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
allowed_keys = fcompiler.fcompiler_class.keys()
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
try:
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
print 'Unknown vendor: "%s"' % (s[len(v):])
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
continue
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
assert len(flib_flags)<=2,`flib_flags`
setup_flags = filter(re.compile(r'[-][-](verbose)').match,sys.argv[1:])
sys.argv = filter(lambda a,flags=setup_flags:a not in flags,sys.argv)
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
modulename = 'untitled'
sources = sys.argv[1:]
if '-m' in sys.argv:
i = sys.argv.index('-m')
modulename = sys.argv[i+1]
del sys.argv[i+1],sys.argv[i]
sources = sys.argv[1:]
else:
from numpy.distutils.command.build_src import get_f2py_modulename
pyf_files,sources = filter_files('','[.]pyf([.]src|)',sources)
sources = pyf_files + sources
for f in pyf_files:
modulename = get_f2py_modulename(f)
if modulename:
break
extra_objects, sources = filter_files('','[.](o|a|so)',sources)
include_dirs, sources = filter_files('-I','',sources,remove_prefix=1)
library_dirs, sources = filter_files('-L','',sources,remove_prefix=1)
libraries, sources = filter_files('-l','',sources,remove_prefix=1)
undef_macros, sources = filter_files('-U','',sources,remove_prefix=1)
define_macros, sources = filter_files('-D','',sources,remove_prefix=1)
using_numarray = 0
using_numeric = 0
for i in range(len(define_macros)):
name_value = define_macros[i].split('=',1)
if len(name_value)==1:
name_value.append(None)
if len(name_value)==2:
define_macros[i] = tuple(name_value)
else:
print 'Invalid use of -D:',name_value
from numpy.distutils.system_info import get_info
num_include_dir = None
num_info = {}
#import numpy
#n = 'numpy'
#p = get_prefix(numpy)
#from numpy.distutils.misc_util import get_numpy_include_dirs
#num_info = {'include_dirs': get_numpy_include_dirs()}
if num_info:
include_dirs.extend(num_info.get('include_dirs',[]))
from numpy.distutils.core import setup,Extension
ext_args = {'name':modulename,'sources':sources,
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'define_macros': define_macros,
'undef_macros': undef_macros,
'extra_objects': extra_objects,
'f2py_options': f2py_flags,
}
if sysinfo_flags:
from numpy.distutils.misc_util import dict_append
for n in sysinfo_flags:
i = get_info(n)
if not i:
outmess('No %s resources found in system'\
' (try `f2py --help-link`)\n' % (`n`))
dict_append(ext_args,**i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + setup_flags
sys.argv.extend(['build',
'--build-temp',build_dir,
'--build-base',build_dir,
'--build-platlib','.'])
if fc_flags:
sys.argv.extend(['config_fc']+fc_flags)
if flib_flags:
sys.argv.extend(['build_ext']+flib_flags)
setup(ext_modules = [ext])
if remove_build_dir and os.path.exists(build_dir):
outmess('Removing build directory %s\n'%(build_dir))
shutil.rmtree(build_dir)
def main():
if '--help-link' in sys.argv[1:]:
sys.argv.remove('--help-link')
from numpy.distutils.system_info import show_all
show_all()
return
if '-c' in sys.argv[1:]:
run_compile()
else:
run_main(sys.argv[1:])
#if __name__ == "__main__":
# main()
# EOF
|
{
"content_hash": "4d6d8825f5d25a52f03e83005b3017f4",
"timestamp": "",
"source": "github",
"line_count": 565,
"max_line_length": 173,
"avg_line_length": 36.876106194690266,
"alnum_prop": 0.5859371250299976,
"repo_name": "santisiri/popego",
"id": "fd207b4eb9fc80fcf5d80ec9276108db8d4a2479",
"size": "20857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "envs/ALPHA-POPEGO/lib/python2.5/site-packages/numpy-1.0.4-py2.5-linux-x86_64.egg/numpy/f2py/f2py2e.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1246"
},
{
"name": "C",
"bytes": "504141"
},
{
"name": "C++",
"bytes": "26125"
},
{
"name": "CSS",
"bytes": "342653"
},
{
"name": "FORTRAN",
"bytes": "4872"
},
{
"name": "GAP",
"bytes": "13267"
},
{
"name": "Genshi",
"bytes": "407"
},
{
"name": "Groff",
"bytes": "17116"
},
{
"name": "HTML",
"bytes": "383181"
},
{
"name": "JavaScript",
"bytes": "1090769"
},
{
"name": "Makefile",
"bytes": "2441"
},
{
"name": "Mako",
"bytes": "376944"
},
{
"name": "Python",
"bytes": "20895618"
},
{
"name": "Ruby",
"bytes": "3380"
},
{
"name": "Shell",
"bytes": "23581"
},
{
"name": "Smarty",
"bytes": "522"
},
{
"name": "TeX",
"bytes": "35712"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(max_length=50)),
('ingress', models.TextField(blank=True, max_length=100)),
('content', models.TextField()),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('created',),
},
),
]
|
{
"content_hash": "3038a99adf320668f2556d4ec06fdb75",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 120,
"avg_line_length": 32.45161290322581,
"alnum_prop": 0.584493041749503,
"repo_name": "sardred/andlarweb",
"id": "18861a2aadb5ed46bb144bcc50bff644ad3d2057",
"size": "1079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/articles/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1336"
},
{
"name": "HTML",
"bytes": "1432"
},
{
"name": "JavaScript",
"bytes": "12629"
},
{
"name": "Python",
"bytes": "14442"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.