text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from __future__ import unicode_literals
import os
from setuptools import setup, find_packages
version = __import__('catalog').__version__
readme = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
setup(
name='django-shop-catalog',
version=version,
description='Catalog app for django SHOP',
long_description=readme,
author='Dino Perovic',
author_email='dino.perovic@gmail.com',
url='http://pypi.python.org/pypi/django-shop-catalog/',
packages=find_packages(exclude=('tests', 'tests.*')),
license='BSD',
install_requires=(
'django>=1.6,<1.8',
'django-cms>=3.0.1',
'django-shop>=0.2.0',
'django-filer>=0.9.5',
'django-mptt>=0.6.0',
'django-parler>=1.4',
'measurement>=1.7.2',
'django-currencies>=0.3.2',
),
include_package_data=True,
zip_safe=False,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
],
test_suite='runtests.main',
tests_require=(
'django-nose>=1.2',
),
)
|
{
"content_hash": "e64130775cb4af65248daa1d227fad64",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 75,
"avg_line_length": 28.622222222222224,
"alnum_prop": 0.5892857142857143,
"repo_name": "dinoperovic/django-shop-catalog",
"id": "adddad8655df8a3b07f9baa276c6a0956a63685b",
"size": "1334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4264"
},
{
"name": "Makefile",
"bytes": "178"
},
{
"name": "Python",
"bytes": "182299"
}
],
"symlink_target": ""
}
|
"""Collection of helper functions."""
import urlparse
from google.appengine.ext import db
from codereview.exceptions import FetchError
def make_url(base, filename, rev):
"""Helper to construct the URL to fetch.
Args:
base: The base property of the Issue to which the Patch belongs.
filename: The filename property of the Patch instance.
rev: Revision number, or None for head revision.
Returns:
A URL referring to the given revision of the file.
"""
scheme, netloc, path, _, _, _ = urlparse.urlparse(base)
if netloc.endswith(".googlecode.com"):
# Handle Google code repositories
if rev is None:
raise FetchError("Can't access googlecode.com without a revision")
if not path.startswith("/svn/"):
raise FetchError( "Malformed googlecode.com URL (%s)" % base)
path = path[5:] # Strip "/svn/"
url = "%s://%s/svn-history/r%d/%s/%s" % (scheme, netloc, rev,
path, filename)
return url
elif netloc.endswith("sourceforge.net") and rev is not None:
if path.strip().endswith("/"):
path = path.strip()[:-1]
else:
path = path.strip()
splitted_path = path.split("/")
url = "%s://%s/%s/!svn/bc/%d/%s/%s" % (scheme, netloc,
"/".join(splitted_path[1:3]), rev,
"/".join(splitted_path[3:]),
filename)
return url
# Default for viewvc-based URLs (svn.python.org)
url = base
if not url.endswith('/'):
url += '/'
url += filename
if rev is not None:
url += '?rev=%s' % rev
return url
def to_dbtext(text):
"""Helper to turn a string into a db.Text instance.
Args:
text: a string.
Returns:
A db.Text instance.
"""
if isinstance(text, unicode):
# A TypeError is raised if text is unicode and an encoding is given.
return db.Text(text)
else:
try:
return db.Text(text, encoding='utf-8')
except UnicodeDecodeError:
return db.Text(text, encoding='latin-1')
def unify_linebreaks(text):
"""Helper to return a string with all line breaks converted to LF.
Args:
text: a string.
Returns:
A string with all line breaks converted to LF.
"""
return text.replace('\r\n', '\n').replace('\r', '\n')
|
{
"content_hash": "bd34a70a29eac291f73caaa02889a33a",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 77,
"avg_line_length": 28.4390243902439,
"alnum_prop": 0.5943396226415094,
"repo_name": "nbodepallictr/touchites-test",
"id": "3cbb5950fcf1a6f953b6994c6cbcf692e58ede76",
"size": "2908",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "codereview/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18844"
},
{
"name": "Diff",
"bytes": "1199"
},
{
"name": "HTML",
"bytes": "77212"
},
{
"name": "JavaScript",
"bytes": "121613"
},
{
"name": "Makefile",
"bytes": "1320"
},
{
"name": "Python",
"bytes": "357390"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from warnings import warn
from django.shortcuts import render
from django.views.decorators.http import require_GET
from paypal.standard.pdt.forms import PayPalPDTForm
from paypal.standard.pdt.models import PayPalPDT
@require_GET
def pdt(request, template="pdt/pdt.html", context=None):
"""Standard implementation of a view that processes PDT and then renders a template
For more advanced uses, create your own view and call process_pdt.
"""
warn("Use of pdt view is deprecated. Instead you should create your\n"
"own view, and use the process_pdt helper function",
DeprecationWarning)
pdt_obj, failed = process_pdt(request)
context = context or {}
context.update({"failed": failed, "pdt_obj": pdt_obj})
return render(request, template, context)
def process_pdt(request):
"""
Payment data transfer implementation:
https://developer.paypal.com/webapps/developer/docs/classic/products/payment-data-transfer/
This function returns a tuple of (pdt_obj, failed)
pdt_obj is an object of type PayPalPDT
failed is a flag that is True if the input data didn't pass basic validation.
Note: even for failed=False You must still check the pdt_obj is not flagged i.e.
pdt_obj.flag == False
"""
pdt_obj = None
txn_id = request.GET.get('tx')
failed = False
if txn_id is not None:
# If an existing transaction with the id tx exists: use it
try:
pdt_obj = PayPalPDT.objects.get(txn_id=txn_id)
except PayPalPDT.DoesNotExist:
# This is a new transaction so we continue processing PDT request
pass
if pdt_obj is None:
form = PayPalPDTForm(request.GET)
if form.is_valid():
try:
pdt_obj = form.save(commit=False)
except Exception as e:
error = repr(e)
failed = True
else:
error = form.errors
failed = True
if failed:
pdt_obj = PayPalPDT()
pdt_obj.set_flag("Invalid form. %s" % error)
pdt_obj.initialize(request)
if not failed:
# The PDT object gets saved during verify
pdt_obj.verify()
else:
pass # we ignore any PDT requests that don't have a transaction id
return (pdt_obj, failed)
|
{
"content_hash": "38db7e039be808f915ad6f28147de384",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 95,
"avg_line_length": 32.85333333333333,
"alnum_prop": 0.6229707792207793,
"repo_name": "aldenjenkins/foobargamingwebsite",
"id": "cbd80f79fb2bc9a76fcfdab1579bb86a6d4cb9d3",
"size": "2510",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paypal/standard/pdt/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "654042"
},
{
"name": "HTML",
"bytes": "460378"
},
{
"name": "JavaScript",
"bytes": "664289"
},
{
"name": "Python",
"bytes": "1450807"
},
{
"name": "SourcePawn",
"bytes": "6208"
}
],
"symlink_target": ""
}
|
import m5
import runfs
import base64
import tempfile
import argparse
import sys
import os
from amd import AmdGPUOptions
from common import Options
from common import GPUTLBOptions
from ruby import Ruby
rodinia_runscript = '''\
export LD_LIBRARY_PATH=/opt/rocm/lib:$LD_LIBRARY_PATH
export HSA_ENABLE_SDMA=0
dmesg -n3
dd if=/root/roms/vega10.rom of=/dev/mem bs=1k seek=768 count=128
if [ ! -f /lib/modules/`uname -r`/updates/dkms/amdgpu.ko ]; then
echo "ERROR: Missing DKMS package for kernel `uname -r`. Exiting gem5."
/sbin/m5 exit
fi
modprobe -v amdgpu ip_block_mask=0xff ppfeaturemask=0 dpm=0 audio=0
echo "Running {}"
cd /home/gem5/HIP-Examples/rodinia_3.0/hip/{}/
make clean
make
make test
/sbin/m5 exit
'''
def addRodiniaOptions(parser):
parser.add_argument("-a", "--app", default=None,
choices=['b+tree',
'backprop',
'bfs',
'cfd',
'dwt2d',
'gaussian',
'heartwall',
'hotspot',
'hybridsort',
'kmeans',
'lavaMD',
'leukocyte',
'lud',
'myocyte',
'nn',
'nw',
'particlefilter',
'pathfinder',
'srad',
'streamcluster'],
help="GPU application to run")
parser.add_argument("-o", "--opts", default="",
help="GPU application arguments")
if __name__ == "__m5_main__":
parser = argparse.ArgumentParser()
runfs.addRunFSOptions(parser)
Options.addCommonOptions(parser)
AmdGPUOptions.addAmdGPUOptions(parser)
Ruby.define_options(parser)
GPUTLBOptions.tlb_options(parser)
addRodiniaOptions(parser)
# Parse now so we can override options
args = parser.parse_args()
# Create temp script to run application
if args.app is None:
print("No application given. Use %s -a <app>" % sys.argv[0])
sys.exit(1)
elif args.kernel is None:
print("No kernel path given. Use %s --kernel <vmlinux>" % sys.argv[0])
sys.exit(1)
elif args.disk_image is None:
print("No disk path given. Use %s --disk-image <linux>" % sys.argv[0])
sys.exit(1)
elif args.gpu_mmio_trace is None:
print("No MMIO trace path. Use %s --gpu-mmio-trace <path>"
% sys.argv[0])
sys.exit(1)
_, tempRunscript = tempfile.mkstemp()
with open(tempRunscript, 'w') as b64file:
runscriptStr = rodinia_runscript.format(args.app, args.app)
b64file.write(runscriptStr)
if args.second_disk == None:
args.second_disk = args.disk_image
# Defaults for Vega10
args.ruby = True
args.cpu_type = 'X86KvmCPU'
args.num_cpus = 1
args.mem_size = '3GB'
args.dgpu = True
args.dgpu_mem_size = '16GB'
args.dgpu_start = '0GB'
args.checkpoint_restore = 0
args.disjoint = True
args.timing_gpu = True
args.script = tempRunscript
args.dgpu_xor_low_bit = 0
print(args.disk_image)
# Run gem5
runfs.runGpuFSSystem(args)
|
{
"content_hash": "86e14289d7369a04f087af6876d3e677",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 78,
"avg_line_length": 31.672727272727272,
"alnum_prop": 0.5295637198622273,
"repo_name": "gem5/gem5",
"id": "3d7cef477f19110fdbe9c7722970a1dc3a55c7ba",
"size": "5034",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable",
"path": "configs/example/gpufs/hip_rodinia.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "145626"
},
{
"name": "Awk",
"bytes": "3386"
},
{
"name": "BASIC",
"bytes": "2884"
},
{
"name": "C",
"bytes": "3927153"
},
{
"name": "C++",
"bytes": "42960484"
},
{
"name": "CMake",
"bytes": "133888"
},
{
"name": "Dockerfile",
"bytes": "34102"
},
{
"name": "Emacs Lisp",
"bytes": "1914"
},
{
"name": "Forth",
"bytes": "354"
},
{
"name": "Fortran",
"bytes": "15436"
},
{
"name": "HTML",
"bytes": "146414"
},
{
"name": "Hack",
"bytes": "139769"
},
{
"name": "Java",
"bytes": "6966"
},
{
"name": "M4",
"bytes": "42624"
},
{
"name": "Makefile",
"bytes": "39573"
},
{
"name": "Perl",
"bytes": "23784"
},
{
"name": "Python",
"bytes": "8079781"
},
{
"name": "Roff",
"bytes": "8754"
},
{
"name": "SCSS",
"bytes": "2971"
},
{
"name": "SWIG",
"bytes": "173"
},
{
"name": "Scala",
"bytes": "5328"
},
{
"name": "Shell",
"bytes": "95638"
},
{
"name": "Starlark",
"bytes": "25668"
},
{
"name": "SuperCollider",
"bytes": "8869"
},
{
"name": "Vim Script",
"bytes": "4343"
},
{
"name": "sed",
"bytes": "3897"
}
],
"symlink_target": ""
}
|
import unittest, os, sys, new
from jpypetest import *
import jpype
import os.path
def suite() :
return unittest.TestSuite( (
numeric.suite(),
attr.suite(),
array.suite(),
objectwrapper.suite(),
proxy.suite(),
exc.suite(),
serial.suite(),
mro.suite(),
))
import jpype
def runTest() :
root = os.path.abspath(os.path.dirname(__file__))
print "Running testsuite using JVM", jpype.getDefaultJVMPath()
jpype.startJVM(jpype.getDefaultJVMPath(), "-ea", "-Xmx256M", "-Xms64M",
"-Djava.class.path=./classes%s%s%sclasses" % (os.pathsep, root, os.sep))
runner = unittest.TextTestRunner()
runner.run(suite())
s = slice(2, 4)
print s, dir(s)
jpype.shutdownJVM()
if __name__ == '__main__' :
runTest()
|
{
"content_hash": "7b3b3f5e06e1b1711af20ca56c9f456f",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 20.05128205128205,
"alnum_prop": 0.612531969309463,
"repo_name": "vanschelven/jpype_05",
"id": "dd36fdc0ab22d2f7c74c61b21f9a5f505912d8de",
"size": "1559",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/testsuite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "36086"
},
{
"name": "C++",
"bytes": "425703"
},
{
"name": "CSS",
"bytes": "1902"
},
{
"name": "Java",
"bytes": "63376"
},
{
"name": "Objective-C",
"bytes": "2974"
},
{
"name": "Objective-J",
"bytes": "5889"
},
{
"name": "Python",
"bytes": "150347"
},
{
"name": "Shell",
"bytes": "2123"
}
],
"symlink_target": ""
}
|
"""
WSGI config for expensify project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "expensify.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
{
"content_hash": "ea9f0bca16b869d27a0034f6c4919abe",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 79,
"avg_line_length": 40.714285714285715,
"alnum_prop": 0.8008771929824562,
"repo_name": "agiliq/expensify",
"id": "0f529cd341f349308683a3ca2c03a2ec54ea6e6a",
"size": "1140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expensify/wsgi.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "42622"
},
{
"name": "CoffeeScript",
"bytes": "1970"
},
{
"name": "HTML",
"bytes": "13547"
},
{
"name": "JavaScript",
"bytes": "135965"
},
{
"name": "Python",
"bytes": "27751"
},
{
"name": "Shell",
"bytes": "586"
}
],
"symlink_target": ""
}
|
import re
class FieldType():
def __init__(self, name, regex, heuristic_regex=None):
self._name = name
self._regex = regex
self._heuristic_regex = heuristic_regex
@property
def heuristic_regex(self):
return self._heuristic_regex if self._heuristic_regex else self.regex
@property
def name(self):
return self._name
@property
def regex(self):
return self._regex
def heuristic_match(self, field):
pattern = re.compile(self.heuristic_regex)
return pattern.match(field)
class Field(object):
def __init__(self, name="new_field", field_type_name="string", operations=None, multi_valued=False):
self.name = name
self.field_type_name = field_type_name
self.keep = True
self.operations = operations if operations else []
self.required = False
self.unique = False
self.multi_valued = multi_valued
self.show_properties = False
def to_dict(self):
return {
'name': self.name,
'type': self.field_type_name,
'unique': self.unique,
'keep': self.keep,
'operations': self.operations,
'required': self.required,
'multiValued': self.multi_valued,
'showProperties': self.show_properties,
'nested': [],
'level': 0,
'length': 100,
'keyType': 'string',
'isPartition': False,
'partitionValue': '',
'comment': '',
'scale': 0,
'precision': 10
}
FIELD_TYPES = [
FieldType('text_en', "^[\\s\\S]*$", heuristic_regex="^[\\s\\S]{101,}$"),
FieldType('string', "^[\\s\\S]*$", heuristic_regex="^[\\s\\S]{1,100}$"),
FieldType('double', "^([+-]?[0-9]+(\.[0-9]+)?(E[+-]?[0-9]+)?)$"),
FieldType('long', "^(?:[+-]?(?:[0-9]+))$"),
FieldType('date', "^([0-9]+-[0-9]+-[0-9]+(\s+|T)[0-9]+:[0-9]+:[0-9]+(\.[0-9]*)?Z?)$")
]
def get_field_type(type_name):
return [file_type for file_type in FIELD_TYPES if file_type.name == type_name][0]
def guess_field_type_from_samples(samples):
guesses = [_guess_field_type(sample) for sample in samples]
return _pick_best_field(guesses)
def _guess_field_type(field_val):
if field_val == "":
return None
for field_type in FIELD_TYPES[::-1]:
if field_type.heuristic_match(field_val):
return field_type.name
def _pick_best_field(types):
types = set(types)
for field in FIELD_TYPES:
if field.name in types:
return field.name
return "string"
|
{
"content_hash": "3274e32d39c9ada07f7407f6df39c7ad",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 102,
"avg_line_length": 26.820224719101123,
"alnum_prop": 0.6066191872643486,
"repo_name": "xq262144/hue",
"id": "454207a33208990614e9f78a61626b3f15f68d6f",
"size": "3171",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/libs/indexer/src/indexer/fields.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "41710"
},
{
"name": "C",
"bytes": "2692409"
},
{
"name": "C++",
"bytes": "199897"
},
{
"name": "CSS",
"bytes": "521820"
},
{
"name": "Emacs Lisp",
"bytes": "11704"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "Groff",
"bytes": "16669"
},
{
"name": "HTML",
"bytes": "24188238"
},
{
"name": "Java",
"bytes": "575404"
},
{
"name": "JavaScript",
"bytes": "4987047"
},
{
"name": "M4",
"bytes": "1377"
},
{
"name": "Makefile",
"bytes": "144341"
},
{
"name": "Mako",
"bytes": "3052598"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Perl",
"bytes": "3499"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "44291483"
},
{
"name": "Shell",
"bytes": "44147"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Thrift",
"bytes": "278712"
},
{
"name": "Visual Basic",
"bytes": "2884"
},
{
"name": "XSLT",
"bytes": "518588"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
from itertools import groupby
import six
import sqlalchemy as sa
from sqlalchemy.exc import NoInspectionAvailable
from sqlalchemy.orm import object_session
from sqlalchemy.schema import ForeignKeyConstraint, MetaData, Table
from ..query_chain import QueryChain
from .orm import get_column_key, get_mapper, get_tables
def get_foreign_key_values(fk, obj):
return dict(
(
fk.constraint.columns[index].key,
getattr(obj, element.column.key)
)
for
index, element
in
enumerate(fk.constraint.elements)
)
def group_foreign_keys(foreign_keys):
"""
Return a groupby iterator that groups given foreign keys by table.
:param foreign_keys: a sequence of foreign keys
::
foreign_keys = get_referencing_foreign_keys(User)
for table, fks in group_foreign_keys(foreign_keys):
# do something
pass
.. seealso:: :func:`get_referencing_foreign_keys`
.. versionadded: 0.26.1
"""
foreign_keys = sorted(
foreign_keys, key=lambda key: key.constraint.table.name
)
return groupby(foreign_keys, lambda key: key.constraint.table)
def get_referencing_foreign_keys(mixed):
"""
Returns referencing foreign keys for given Table object or declarative
class.
:param mixed:
SA Table object or SA declarative class
::
get_referencing_foreign_keys(User) # set([ForeignKey('user.id')])
get_referencing_foreign_keys(User.__table__)
This function also understands inheritance. This means it returns
all foreign keys that reference any table in the class inheritance tree.
Let's say you have three classes which use joined table inheritance,
namely TextItem, Article and BlogPost with Article and BlogPost inheriting
TextItem.
::
# This will check all foreign keys that reference either article table
# or textitem table.
get_referencing_foreign_keys(Article)
.. seealso:: :func:`get_tables`
"""
if isinstance(mixed, sa.Table):
tables = [mixed]
else:
tables = get_tables(mixed)
referencing_foreign_keys = set()
for table in mixed.metadata.tables.values():
if table not in tables:
for constraint in table.constraints:
if isinstance(constraint, sa.sql.schema.ForeignKeyConstraint):
for fk in constraint.elements:
if any(fk.references(t) for t in tables):
referencing_foreign_keys.add(fk)
return referencing_foreign_keys
def merge_references(from_, to, foreign_keys=None):
"""
Merge the references of an entity into another entity.
Consider the following models::
class User(self.Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String(255))
def __repr__(self):
return 'User(name=%r)' % self.name
class BlogPost(self.Base):
__tablename__ = 'blog_post'
id = sa.Column(sa.Integer, primary_key=True)
title = sa.Column(sa.String(255))
author_id = sa.Column(sa.Integer, sa.ForeignKey('user.id'))
author = sa.orm.relationship(User)
Now lets add some data::
john = self.User(name='John')
jack = self.User(name='Jack')
post = self.BlogPost(title='Some title', author=john)
post2 = self.BlogPost(title='Other title', author=jack)
self.session.add_all([
john,
jack,
post,
post2
])
self.session.commit()
If we wanted to merge all John's references to Jack it would be as easy as
::
merge_references(john, jack)
self.session.commit()
post.author # User(name='Jack')
post2.author # User(name='Jack')
:param from_: an entity to merge into another entity
:param to: an entity to merge another entity into
:param foreign_keys: A sequence of foreign keys. By default this is None
indicating all referencing foreign keys should be used.
.. seealso: :func:`dependent_objects`
.. versionadded: 0.26.1
"""
if from_.__tablename__ != to.__tablename__:
raise TypeError('The tables of given arguments do not match.')
session = object_session(from_)
foreign_keys = get_referencing_foreign_keys(from_)
for fk in foreign_keys:
old_values = get_foreign_key_values(fk, from_)
new_values = get_foreign_key_values(fk, to)
criteria = (
getattr(fk.constraint.table.c, key) == value
for key, value in six.iteritems(old_values)
)
try:
mapper = get_mapper(fk.constraint.table)
except ValueError:
query = (
fk.constraint.table
.update()
.where(sa.and_(*criteria))
.values(new_values)
)
session.execute(query)
else:
(
session.query(mapper.class_)
.filter_by(**old_values)
.update(
new_values,
'evaluate'
)
)
def dependent_objects(obj, foreign_keys=None):
"""
Return a :class:`~sqlalchemy_utils.query_chain.QueryChain` that iterates
through all dependent objects for given SQLAlchemy object.
Consider a User object is referenced in various articles and also in
various orders. Getting all these dependent objects is as easy as::
from sqlalchemy_utils import dependent_objects
dependent_objects(user)
If you expect an object to have lots of dependent_objects it might be good
to limit the results::
dependent_objects(user).limit(5)
The common use case is checking for all restrict dependent objects before
deleting parent object and inform the user if there are dependent objects
with ondelete='RESTRICT' foreign keys. If this kind of checking is not used
it will lead to nasty IntegrityErrors being raised.
In the following example we delete given user if it doesn't have any
foreign key restricted dependent objects::
from sqlalchemy_utils import get_referencing_foreign_keys
user = session.query(User).get(some_user_id)
deps = list(
dependent_objects(
user,
(
fk for fk in get_referencing_foreign_keys(User)
# On most databases RESTRICT is the default mode hence we
# check for None values also
if fk.ondelete == 'RESTRICT' or fk.ondelete is None
)
).limit(5)
)
if deps:
# Do something to inform the user
pass
else:
session.delete(user)
:param obj: SQLAlchemy declarative model object
:param foreign_keys:
A sequence of foreign keys to use for searching the dependent_objects
for given object. By default this is None, indicating that all foreign
keys referencing the object will be used.
.. note::
This function does not support exotic mappers that use multiple tables
.. seealso:: :func:`get_referencing_foreign_keys`
.. seealso:: :func:`merge_references`
.. versionadded: 0.26.0
"""
if foreign_keys is None:
foreign_keys = get_referencing_foreign_keys(obj)
session = object_session(obj)
chain = QueryChain([])
classes = obj.__class__._decl_class_registry
for table, keys in group_foreign_keys(foreign_keys):
keys = list(keys)
for class_ in classes.values():
try:
mapper = sa.inspect(class_)
except NoInspectionAvailable:
continue
parent_mapper = mapper.inherits
if (
table in mapper.tables and
not (parent_mapper and table in parent_mapper.tables)
):
query = session.query(class_).filter(
sa.or_(*_get_criteria(keys, class_, obj))
)
chain.queries.append(query)
return chain
def _get_criteria(keys, class_, obj):
criteria = []
visited_constraints = []
for key in keys:
if key.constraint in visited_constraints:
continue
visited_constraints.append(key.constraint)
subcriteria = []
for index, column in enumerate(key.constraint.columns):
foreign_column = (
key.constraint.elements[index].column
)
subcriteria.append(
getattr(class_, get_column_key(class_, column)) ==
getattr(
obj,
sa.inspect(type(obj))
.get_property_by_column(
foreign_column
).key
)
)
criteria.append(sa.and_(*subcriteria))
return criteria
def non_indexed_foreign_keys(metadata, engine=None):
"""
Finds all non indexed foreign keys from all tables of given MetaData.
Very useful for optimizing postgresql database and finding out which
foreign keys need indexes.
:param metadata: MetaData object to inspect tables from
"""
reflected_metadata = MetaData()
if metadata.bind is None and engine is None:
raise Exception(
'Either pass a metadata object with bind or '
'pass engine as a second parameter'
)
constraints = defaultdict(list)
for table_name in metadata.tables.keys():
table = Table(
table_name,
reflected_metadata,
autoload=True,
autoload_with=metadata.bind or engine
)
for constraint in table.constraints:
if not isinstance(constraint, ForeignKeyConstraint):
continue
if not is_indexed_foreign_key(constraint):
constraints[table.name].append(constraint)
return dict(constraints)
def is_indexed_foreign_key(constraint):
"""
Whether or not given foreign key constraint's columns have been indexed.
:param constraint: ForeignKeyConstraint object to check the indexes
"""
return any(
set(column.name for column in index.columns)
==
set(constraint.columns)
for index
in constraint.table.indexes
)
|
{
"content_hash": "f4b70e1074898cdd2eeab986412ac56b",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 79,
"avg_line_length": 29.12054794520548,
"alnum_prop": 0.5990215448301816,
"repo_name": "cheungpat/sqlalchemy-utils",
"id": "c9a0ef2b4f9f1fb2e08a93b7b406d2bf6baa34c3",
"size": "10629",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlalchemy_utils/functions/foreign_keys.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "484228"
}
],
"symlink_target": ""
}
|
from __future__ import division
from sympy.functions.elementary.trigonometric import TrigonometricFunction
from sympy.functions.elementary.trigonometric import InverseTrigonometricFunction
import sympy
from sympy import Poly
from sympy import ZZ,CC,QQ,RR
from sympy import Add,Mul,rcollect,Number,NumberSymbol,sin,cos,Pow,Integer,Symbol,gcd,div,degree, Derivative, discriminant, primitive, real_roots, sieve
from .monomial_form import *
from .form_utils import *
from .form_output import *
def is_fully_expanded_polynomial(expr, eval_trig=False):
'''Determines if a proper polynomial is fully expanded.
A polynomial that is fully expanded is defined as a sum of monomials
that cannot be expanded further.
Args:
expr: A standard sympy expression
Returns:
a tuple containing:
[0] - boolean result of the function
[1] - string describing the result
'''
result = is_monomial_form(expr)
if is_monomial_form(expr)[0]:
return True, result[1]
elif not isinstance(expr, Add):
return False, result[1]
result = const_divisible(expr)
if result[0]:
return False, result[1]
if all(is_monomial_form(i)[0] for i in expr.args):
return True, PolynomialOutput.strout("EXPANDED")
return False, PolynomialOutput.strout("NOT_EXPANDED")
def is_fully_factored_polynomial(expr, eval_trig=False, domain='RR'):
'''Determines if a proper polynomial is fully expanded.
A polynomial that is fully factored is defined as a sum or product of
polynomials that cannot be reduced further.
Args:
expr: A standard sympy expression
domain: (optional) determination of the field that the polynomial \
is to be either reducible or irreducible over. Domain \
specification is determined by two capital leterts to match \
sympy's style.
Options:
'RR' - Real numbers
'CC' - Complex numbers
TODO:
'QQ' - Rationals
'ZZ' - Integers
Returns:
a tuple containing:
[0] - boolean result of the function
[1] - string describing the result
'''
#If the expression is already a monomial or a singleton in the desired form
if is_monomial_form(expr)[0]:
return True, PolynomialOutput.strout("IS_MONOMIAL")
#Next, we check to see if individual terms in the polynomial are numerically
#reducible (i.e, 3/3, x/x x^2/x, etc.)
for i in mr_polynomial_terms(expr):
result = is_numerically_reducible_monomial(i)
if result[0]:
return False, result[1]
#Currently, no definition of polynomials allows for monomials that
#are combinable by integers or by bases, so we can filter those out
result = const_divisible(expr)
if result[0]:
return False, result[1]
#Finally, we analyze the reducibility of the polynomial according to the
#domain the user specified.
if domain == 'RR' or domain == RR:
result = real_field_reducible(expr)
return not result[0], result[1]
elif domain == 'CC' or domain == CC:
result = complex_field_reducible(expr)
return not result[0], result[1]
elif domain == 'ZZ' or domain == ZZ:
result = integer_field_reducible(expr)
return not result[0], result[1]
elif domain == 'QQ' or domain == QQ:
result = rational_field_reducible(expr)
return not result[0], result[1]
else:
return False, ErrorOutput.strout("ERROR")
def is_integer_content_free_polynomial(expr):
'''Determines if a polynomial is content-free. A polynomial that has
content is defined to have an integer gcd between all monomials that
is not equal to 1. Will always return false if there is only one term
in the expression,
Args:
expr: A standard sympy expression
Returns:
A tuple containing:
[0] - boolean result of the function
[1] - string describing the result
[2] - integer content of the polynomial
'''
if not isinstance(expr, Add):
return True, PolynomialOutput.strout("CONTENTFREE_MONOMIAL"), 1
result = primitive(expr)
if primitive(expr)[0] != 1:
return False, PolynomialOutput.strout("NOT_CONTENTFREE"), primitive(expr)[0]
return True, PolynomialOutput.strout("CONTENTFREE"), 1
def complex_field_reducible(expr):
'''Determines if the polynomial is reducible over the complex field.
According to the fundamental theorem of algebra, a polynomial is reducible
if and only if the degree is one. However, for this library, we won't count
monomials such as x^4, as being reducible.
Args:
expr: a standard Sympy expression
Returns:
a tuple containing:
[0] - boolean result of the function
[1] - string describing the result
'''
result = is_monomial_form(expr)
if result[0]:
return False, PolynomialOutput.strout("IS_MONOMIAL")
if isinstance(expr, Mul):
for i in expr.args:
result = complex_field_reducible(i)
if result[0]:
return result
return False, PolynomialOutput.strout("COMPLEX_FACTORED")
if isinstance(expr, Pow):
return complex_field_reducible(expr.args[0])
if degree(expr) > 1:
return True, PolynomialOutput.strout("COMPLEX_HIGH_DEGREE")
return False, PolynomialOutput.strout("COMPLEX_FACTORED")
def real_field_reducible(expr):
'''Determines if the polynomial is reducible over the real field.
According to the fundamental theorem of algebra, a polynomial is reducible
if and only if the following criterion are met:
1: Degree of polynomial is less than 3.
2: If degree of polynomial is 2, at least one of the roots are in
the complex field.
However, for this library, we won't count monomials, such as x^4,
as being reducible.
Args:
expr: a standard Sympy expression
Returns:
a tuple containing:
[0] - boolean result of the function
[1] - string describing the result
'''
result = is_monomial_form(expr)
if result[0]:
return False, PolynomialOutput.strout("IS_MONOMIAL")
if isinstance(expr, Mul):
for i in expr.args:
result = real_field_reducible(i)
if result[0]:
return result
return False, PolynomialOutput.strout("REAL_FACTORED")
if isinstance(expr, Pow):
return real_field_reducible(expr.args[0])
if degree(expr) > 2:
return True, PolynomialOutput.strout("REAL_HIGH_DEGREE")
if degree(expr) == 2 and discriminant(expr) >= 0:
return True, PolynomialOutput.strout("REAL_FACTORABLE_QUAD")
return False, PolynomialOutput.strout("REAL_FACTORED")
def integer_field_reducible(expr):
'''Determines if the polynomial is reducible over the field of integers.
A polynomial reducible ver the integers is one that has more than two \
integer roots or has integer content that can be factored.
However, for this library, we wholly exclude monomials, such as x^4,
as being reducible.
Args:
expr: a standard Sympy expression
Returns:
a tuple containing:
[0] - boolean result of the function
[1] - string describing the result
'''
result = is_monomial_form(expr)
if result[0]:
return False, PolynomialOutput.strout("IS_MONOMIAL")
if isinstance(expr, Add):
result = is_integer_content_free_polynomial(expr)
if not result[0]:
return True, result[1]
if isinstance(expr, Mul):
for i in expr.args:
result = integer_field_reducible(i)
if result[0]:
return result
if Poly(expr, domain=ZZ).is_irreducible:
return False, PolynomialOutput.strout("INTEGER_FACTORED")
return True, PolynomialOutput.strout("INTEGER_REDUCIBLE")
def rational_field_reducible(expr):
'''Determines if the polynomial is reducible over the field of integers.
A polynomial reducible over the rationals is one that has more than \
two rational roots or has rational content that can be factored.
However, for this library, we will wholly exclude monomials, such as x^4,
as being reducible.
Args:
expr: a standard Sympy expression
Returns:
a tuple containing:
[0] - boolean result of the function
[1] - string describing the result
'''
result = is_monomial_form(expr)
if result[0]:
return False, PolynomialOutput.strout("IS_MONOMIAL")
if isinstance(expr, Mul):
for i in expr.args:
result = rational_field_reducible(i)
if result[0]:
return result
if isinstance(expr, Pow):
return rational_field_reducible(expr.args[0])
if Poly(expr, domain=QQ).is_irreducible:
return False, PolynomialOutput.strout("RATIONAL_FACTORED")
return True, PolynomialOutput.strout("RATIONAL_REDUCIBLE")
|
{
"content_hash": "e18d3c227f3f9458ba78d1ddbd76b1f6",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 152,
"avg_line_length": 37.27091633466136,
"alnum_prop": 0.6400855157669695,
"repo_name": "lemmalearning/sympy-form-analysis",
"id": "4936b92a300dff39cfeef55a6acc4c4330083304",
"size": "9355",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "polynomial_form.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "65169"
}
],
"symlink_target": ""
}
|
import unittest
import json
import os
import sys
from unittest.mock import patch, MagicMock, mock_open
from systemops.opener import get_stooges
from settings import ROOT_DIR
class TestOpener(unittest.TestCase):
""" Tests opening and retrieving data from json """
@patch('systemops.opener.open', mock_open(read_data='{"beer":"duff"}'))
def test_get_stooges(self):
""" Checks whether the json getter returns the expected value """
result = get_stooges('homer.json')
expected_result = json.loads('{"beer":"duff"}')
self.assertEqual(expected_result, result)
def test_data_exists(self):
""" Checks that data file exists"""
self.assertTrue(get_stooges(os.path.join(ROOT_DIR, 'systemops/data/stooges.json')))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "b252cf9bad18df34fe0d1398ff775d99",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 91,
"avg_line_length": 32.68,
"alnum_prop": 0.6780905752753978,
"repo_name": "b-ritter/python-notes",
"id": "762b6811d08fffec9ff83daf9368d56df51dc09a",
"size": "817",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "systemops/test_opener.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15367"
},
{
"name": "Shell",
"bytes": "289"
}
],
"symlink_target": ""
}
|
"""Ftrl-proximal for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=["train.FtrlOptimizer"])
class FtrlOptimizer(optimizer.Optimizer):
"""Optimizer that implements the FTRL algorithm.
This version has support for both online L2 (McMahan et al., 2013) and
shrinkage-type L2, which is the addition of an L2 penalty
to the loss function.
References:
Ad-click prediction:
[McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200)
([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526))
"""
def __init__(self,
learning_rate,
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="Ftrl",
accum_name=None,
linear_name=None,
l2_shrinkage_regularization_strength=0.0,
beta=None):
r"""Construct a new FTRL optimizer.
Args:
learning_rate: A float value or a constant float `Tensor`.
learning_rate_power: A float value, must be less or equal to zero.
Controls how the learning rate decreases during training. Use zero for
a fixed learning rate. See section 3.1 in (McMahan et al., 2013).
initial_accumulator_value: The starting value for accumulators.
Only zero or positive values are allowed.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Ftrl".
accum_name: The suffix for the variable that keeps the gradient squared
accumulator. If not present, defaults to name.
linear_name: The suffix for the variable that keeps the linear gradient
accumulator. If not present, defaults to name + "_1".
l2_shrinkage_regularization_strength: A float value, must be greater than
or equal to zero. This differs from L2 above in that the L2 above is a
stabilization penalty, whereas this L2 shrinkage is a magnitude penalty.
The FTRL formulation can be written as:
w_{t+1} = argmin_w(\hat{g}_{1:t}w + L1*||w||_1 + L2*||w||_2^2), where
\hat{g} = g + (2*L2_shrinkage*w), and g is the gradient of the loss
function w.r.t. the weights w.
Specifically, in the absence of L1 regularization, it is equivalent to
the following update rule:
w_{t+1} = w_t - lr_t / (beta + 2*L2*lr_t) * g_t -
2*L2_shrinkage*lr_t / (beta + 2*L2*lr_t) * w_t
where lr_t is the learning rate at t.
When input is sparse shrinkage will only happen on the active weights.
beta: A float value; corresponds to the beta parameter in the paper.
Raises:
ValueError: If one of the arguments is invalid.
References:
Ad-click prediction:
[McMahan et al., 2013](https://dl.acm.org/citation.cfm?id=2488200)
([pdf](https://dl.acm.org/ft_gateway.cfm?id=2488200&ftid=1388399&dwn=1&CFID=32233078&CFTOKEN=d60fe57a294c056a-CB75C374-F915-E7A6-1573FBBC7BF7D526))
"""
super(FtrlOptimizer, self).__init__(use_locking, name)
if initial_accumulator_value < 0.0:
raise ValueError(
"initial_accumulator_value %f needs to be positive or zero" %
initial_accumulator_value)
if learning_rate_power > 0.0:
raise ValueError("learning_rate_power %f needs to be negative or zero" %
learning_rate_power)
if l1_regularization_strength < 0.0:
raise ValueError(
"l1_regularization_strength %f needs to be positive or zero" %
l1_regularization_strength)
if l2_regularization_strength < 0.0:
raise ValueError(
"l2_regularization_strength %f needs to be positive or zero" %
l2_regularization_strength)
if l2_shrinkage_regularization_strength < 0.0:
raise ValueError(
"l2_shrinkage_regularization_strength %f needs to be positive"
" or zero" % l2_shrinkage_regularization_strength)
self._learning_rate = learning_rate
self._learning_rate_power = learning_rate_power
self._initial_accumulator_value = initial_accumulator_value
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._beta = (0.0 if beta is None else beta)
self._l2_shrinkage_regularization_strength = (
l2_shrinkage_regularization_strength)
self._learning_rate_tensor = None
self._learning_rate_power_tensor = None
self._l1_regularization_strength_tensor = None
self._adjusted_l2_regularization_strength_tensor = None
self._l2_shrinkage_regularization_strength_tensor = None
self._accum_name = accum_name
self._linear_name = linear_name
def _create_slots(self, var_list):
# Create the "accum" and "linear" slots.
for v in var_list:
val = constant_op.constant(
self._initial_accumulator_value, dtype=v.dtype, shape=v.get_shape())
self._get_or_make_slot(v, val, "accum", self._accum_name or self._name)
self._zeros_slot(v, "linear", self._linear_name or self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
self._l1_regularization_strength_tensor = ops.convert_to_tensor(
self._l1_regularization_strength, name="l1_regularization_strength")
# L2 regularization strength with beta added in so that the underlying
# TensorFlow ops do not need to include that parameter.
self._adjusted_l2_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_regularization_strength + self._beta /
(2. * self._learning_rate),
name="adjusted_l2_regularization_strength")
assert self._adjusted_l2_regularization_strength_tensor is not None
self._beta_tensor = ops.convert_to_tensor(self._beta, name="beta")
self._l2_shrinkage_regularization_strength_tensor = ops.convert_to_tensor(
self._l2_shrinkage_regularization_strength,
name="l2_shrinkage_regularization_strength")
self._learning_rate_power_tensor = ops.convert_to_tensor(
self._learning_rate_power, name="learning_rate_power")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.apply_ftrl(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.apply_ftrl_v2(
var,
accum,
linear,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.sparse_apply_ftrl(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
else:
return training_ops.sparse_apply_ftrl_v2(
var,
accum,
linear,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
var.dtype.base_dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype.base_dtype),
math_ops.cast(self._learning_rate_power_tensor, var.dtype.base_dtype),
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
linear = self.get_slot(var, "linear")
if self._l2_shrinkage_regularization_strength <= 0.0:
return training_ops.resource_sparse_apply_ftrl(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
else:
return training_ops.resource_sparse_apply_ftrl_v2(
var.handle,
accum.handle,
linear.handle,
grad,
indices,
math_ops.cast(self._learning_rate_tensor, grad.dtype),
math_ops.cast(self._l1_regularization_strength_tensor, grad.dtype),
math_ops.cast(self._adjusted_l2_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._l2_shrinkage_regularization_strength_tensor,
grad.dtype),
math_ops.cast(self._learning_rate_power_tensor, grad.dtype),
use_locking=self._use_locking)
|
{
"content_hash": "71a2f0301d666cd215ebe48e53ca3b5d",
"timestamp": "",
"source": "github",
"line_count": 276,
"max_line_length": 155,
"avg_line_length": 45.82608695652174,
"alnum_prop": 0.6386780518659076,
"repo_name": "aldian/tensorflow",
"id": "6c8a6ceadc5ba1831be1cb9c017ab59d50f06c0b",
"size": "13337",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/ftrl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "8458"
},
{
"name": "C",
"bytes": "201402"
},
{
"name": "C++",
"bytes": "29667924"
},
{
"name": "CMake",
"bytes": "647100"
},
{
"name": "Go",
"bytes": "976514"
},
{
"name": "Java",
"bytes": "412117"
},
{
"name": "Jupyter Notebook",
"bytes": "1833675"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "38128"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "63210"
},
{
"name": "Perl",
"bytes": "6715"
},
{
"name": "Protocol Buffer",
"bytes": "275733"
},
{
"name": "PureBasic",
"bytes": "24932"
},
{
"name": "Python",
"bytes": "26424665"
},
{
"name": "Ruby",
"bytes": "327"
},
{
"name": "Shell",
"bytes": "373109"
}
],
"symlink_target": ""
}
|
"""
You can make the following assumptions about the inputs to the
'sum_of_multiples' function:
* All input numbers are non-negative 'int's, i.e. natural numbers
including zero.
* A list of factors must be given, and its elements are unique
and sorted in ascending order.
"""
import unittest
from sum_of_multiples import sum_of_multiples
# Tests adapted from `problem-specifications//canonical-data.json` @ v1.5.0
class SumOfMultiplesTest(unittest.TestCase):
def test_multiples_with_no_factors_in_limit(self):
self.assertEqual(sum_of_multiples(1, [3, 5]), 0)
def test_multiples_of_one_factor_within_limit(self):
self.assertEqual(sum_of_multiples(4, [3, 5]), 3)
def test_various_multiples_in_limit(self):
self.assertEqual(sum_of_multiples(7, [3]), 9)
def test_various_factors_with_multiples_in_limit(self):
self.assertEqual(sum_of_multiples(10, [3, 5]), 23)
def test_multiples_counted_only_once(self):
self.assertEqual(sum_of_multiples(100, [3, 5]), 2318)
def test_multiples_with_large_limit(self):
self.assertEqual(sum_of_multiples(1000, [3, 5]), 233168)
def test_multiples_with_three_factors(self):
self.assertEqual(sum_of_multiples(20, [7, 13, 17]), 51)
def test_multiples_with_factors_not_prime(self):
self.assertEqual(sum_of_multiples(15, [4, 6]), 30)
def test_multiples_with_factors_prime_and_not(self):
self.assertEqual(sum_of_multiples(150, [5, 6, 8]), 4419)
def test_multiples_with_similar_factors(self):
self.assertEqual(sum_of_multiples(51, [5, 25]), 275)
def test_multiples_with_large_factors(self):
self.assertEqual(sum_of_multiples(10000, [43, 47]), 2203160)
def test_multiples_of_one_will_be_all(self):
self.assertEqual(sum_of_multiples(100, [1]), 4950)
def test_multiples_of_an_empty_list(self):
self.assertEqual(sum_of_multiples(10000, []), 0)
def test_multiples_of_zero_will_be_none(self):
self.assertEqual(sum_of_multiples(1, [0]), 0)
def test_multiples_with_a_zero_factor(self):
self.assertEqual(sum_of_multiples(4, [0, 3]), 3)
def test_multiples_of_several_factors(self):
self.assertEqual(sum_of_multiples(10000,
[2, 3, 5, 7, 11]), 39614537)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "301e045a77b56bb99df1872a5b4850fc",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 75,
"avg_line_length": 34.31884057971015,
"alnum_prop": 0.6613175675675675,
"repo_name": "N-Parsons/exercism-python",
"id": "bde12f7527d4a758027aced8745218dde31fd985",
"size": "2368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/sum-of-multiples/sum_of_multiples_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "555991"
},
{
"name": "Shell",
"bytes": "1199"
}
],
"symlink_target": ""
}
|
"""This module contains the custom admin class that allows for a nicer admin
interface."""
# pylint: disable=maybe-no-member,too-few-public-methods
# Third-party imports
from flask_admin.contrib.sqla import ModelView
class CustomAdminView(ModelView): # pylint: disable=no-init
"""Define custom templates for each view."""
list_template = 'list.html'
create_template = 'create.html'
edit_template = 'edit.html'
column_display_pk = True
|
{
"content_hash": "24270a89eb69e3ba7d767eea2cf2deb4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 76,
"avg_line_length": 32.785714285714285,
"alnum_prop": 0.7298474945533769,
"repo_name": "jeffknupp/sandman2",
"id": "1c0e0d3c57853fc0c19f206020c4f3caeba41d33",
"size": "459",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sandman2/admin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "367"
},
{
"name": "HTML",
"bytes": "3118"
},
{
"name": "Makefile",
"bytes": "291"
},
{
"name": "Python",
"bytes": "49770"
},
{
"name": "Shell",
"bytes": "433"
}
],
"symlink_target": ""
}
|
"""Tools for setting up printing in interactive sessions. """
def _init_python_printing(stringify_func):
"""Setup printing in Python interactive session. """
import __builtin__, sys
def _displayhook(arg):
"""Python's pretty-printer display hook.
This function was adapted from:
http://www.python.org/dev/peps/pep-0217/
"""
if arg is not None:
__builtin__._ = None
print stringify_func(arg)
__builtin__._ = arg
sys.displayhook = _displayhook
def _init_ipython_printing(ip, stringify_func):
"""Setup printing in IPython interactive session. """
def _pretty_print(arg, p, cycle):
"""caller for pretty, for use in IPython 0.11"""
p.text(stringify_func(arg))
def _result_display(self, arg):
"""IPython's pretty-printer display hook, for use in IPython 0.10
This function was adapted from:
ipython/IPython/hooks.py:155
"""
if self.rc.pprint:
out = stringify_func(arg)
if '\n' in out:
print
print out
else:
print repr(arg)
import IPython
if IPython.__version__ >= '0.11':
formatter = ip.display_formatter.formatters['text/plain']
for cls in (object, tuple, list, set, frozenset, dict, str):
formatter.for_type(cls, _pretty_print)
# this loads pretty printing for objects that inherit from Basic or Matrix:
formatter.for_type_by_name(
'sympy.core.basic', 'Basic', _pretty_print
)
formatter.for_type_by_name(
'sympy.matrices.matrices', 'Matrix', _pretty_print
)
else:
ip.set_hook('result_display', _result_display)
def init_printing(pretty_print=True, order=None, use_unicode=None, wrap_line=None, num_columns=None, no_global=False, ip=None):
"""
Initializes pretty-printer depending on the environment.
Parameters
==========
pretty_print: boolean
If True, use pretty_print to stringify;
if False, use sstrrepr to stringify.
order: string or None
There are a few different settings for this parameter:
lex (default), which is lexographic order;
grlex, which is graded lexographic order;
grevlex, which is reversed graded lexographic order;
old, which is used for compatibility reasons and for long expressions;
None, which sets it to lex.
use_unicode: boolean or None
If True, use unicode characters;
if False, do not use unicode characters.
wrap_line: boolean
If True, lines will wrap at the end;
if False, they will not wrap but continue as one line.
num_columns: int or None
If int, number of columns before wrapping is set to num_columns;
if None, number of columns before wrapping is set to terminal width.
no_global: boolean
If True, the settings become system wide;
if False, use just for this console/session.
ip: An interactive console
This can either be an instance of IPython,
or a class that derives from code.InteractiveConsole.
Examples
========
>>> from sympy.interactive import init_printing
>>> from sympy import Symbol, sqrt
>>> from sympy.abc import x, y
>>> sqrt(5)
sqrt(5)
>>> init_printing(pretty_print=True) # doctest: +SKIP
>>> sqrt(5) # doctest: +SKIP
___
\/ 5
>>> theta = Symbol('theta') # doctest: +SKIP
>>> init_printing(use_unicode=True) # doctest: +SKIP
>>> theta # doctest: +SKIP
\u03b8
>>> init_printing(use_unicode=False) # doctest: +SKIP
>>> theta # doctest: +SKIP
theta
>>> init_printing(order='lex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grlex') # doctest: +SKIP
>>> str(y + x + y**2 + x**2) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(order='grevlex') # doctest: +SKIP
>>> str(y * x**2 + x * y**2) # doctest: +SKIP
x**2*y + x*y**2
>>> init_printing(order='old') # doctest: +SKIP
>>> str(x**2 + y**2 + x + y) # doctest: +SKIP
x**2 + x + y**2 + y
>>> init_printing(num_columns=10) # doctest: +SKIP
>>> x**2 + x + y**2 + y # doctest: +SKIP
x + y +
x**2 + y**2
"""
from sympy.printing.printer import Printer
if pretty_print:
from sympy.printing import pretty as stringify_func
else:
from sympy.printing import sstrrepr as stringify_func
if not no_global:
Printer.set_global_settings(order=order, use_unicode=use_unicode, wrap_line=wrap_line, num_columns=num_columns)
else:
_stringify_func = stringify_func
if pretty_print:
stringify_func = lambda expr: _stringify_func(expr, order=order, use_unicode=use_unicode, wrap_line=wrap_line, num_columns=num_columns)
else:
stringify_func = lambda expr: _stringify_func(expr, order=order)
if ip is not None and ip.__module__.startswith('IPython'):
_init_ipython_printing(ip, stringify_func)
else:
_init_python_printing(stringify_func)
|
{
"content_hash": "a7020117d58d44ee65fb8c27439f86ca",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 147,
"avg_line_length": 34.276315789473685,
"alnum_prop": 0.6028790786948176,
"repo_name": "flacjacket/sympy",
"id": "22b08dd18a27c4582fd665b9bc24ba2df7e570d2",
"size": "5210",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sympy/interactive/printing.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10293116"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
import sys
# versioneer: get __version__ attribute
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
# A bit of a hack, following numpy/sklearn: figure out whether we're being
# imported in the setup phase and so shouldn't try to import the compiled
# extensions.
try:
__CYFLANN_SETUP__
except NameError:
__CYFLANN_SETUP__ = False
if __CYFLANN_SETUP__:
sys.stderr.write("Partial import of cyflann during the build process.\n")
else:
from . import index
from .index import FLANNParameters, FLANNIndex, set_distance_type
from . import flann_info, extensions
from .flann_info import get_flann_info
from .extensions import FLANNExtension
# A test function, if we have nose.
try:
from numpy.testing import nosetester
test = nosetester.NoseTester().test
del nosetester
except ImportError:
pass
|
{
"content_hash": "db78aeb8e2bdaf990e4784a58929ec74",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 77,
"avg_line_length": 26.424242424242426,
"alnum_prop": 0.7327981651376146,
"repo_name": "dougalsutherland/cyflann",
"id": "64e9bc54ab5b8f8029150ad31ed7b1a7a2fd9abe",
"size": "872",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cyflann/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "132425"
},
{
"name": "Shell",
"bytes": "3128"
}
],
"symlink_target": ""
}
|
from tests.utils import service_ok
import pytest
from tests.utils import resource_file
import owslib
from owslib.csw import CatalogueServiceWeb
from owslib.wms import WebMapService
from owslib.wcs import WebCoverageService
from owslib.wfs import WebFeatureService
from owslib.util import OrderedDict
# TODO, we should run all these from local XML documents (as per the WMS and WFS services)
# CSW_SERVICE_URL = 'http://data.nodc.noaa.gov/geoportal/csw'
CSW_SERVICE_URL = 'http://demo.pycsw.org/cite/csw'
WCS_SERVICE_URL = 'http://thredds.ucar.edu/thredds/wcs/grib/NCEP/NAM/CONUS_80km/best'
@pytest.mark.online
@pytest.mark.skipif(not service_ok(CSW_SERVICE_URL),
reason='service is unreachable')
def test_ows_interfaces_csw():
service = CatalogueServiceWeb(CSW_SERVICE_URL)
# Check each service instance conforms to OWSLib interface
service.alias = 'CSW'
isinstance(service, owslib.csw.CatalogueServiceWeb)
# URL attribute
assert service.url == CSW_SERVICE_URL
# version attribute
assert service.version == '2.0.2'
# Identification object
assert hasattr(service, 'identification')
# Check all ServiceIdentification attributes
assert service.identification.type == 'CSW'
for attribute in ['type', 'version', 'title', 'abstract', 'keywords', 'accessconstraints', 'fees']:
assert hasattr(service.identification, attribute)
# Check all ServiceProvider attributes
for attribute in ['name', 'url', 'contact']:
assert hasattr(service.provider, attribute)
# Check all operations implement IOperationMetadata
for op in service.operations:
for attribute in ['name', 'formatOptions', 'methods']:
assert hasattr(op, attribute)
# Check all contents implement IContentMetadata as a dictionary
# CSW does not work in this way so use dummy
service.contents = {'dummy': '1'}
isinstance(service.contents, dict)
# Check any item (WCS coverage, WMS layer etc) from the contents of each service
# Check it conforms to IContentMetadata interface
# CSW does not conform to this
def test_ows_interfaces_wms():
wmsxml = open(resource_file('wms_JPLCapabilities.xml'), 'rb').read()
service = WebMapService('url', version='1.1.1', xml=wmsxml)
# Check each service instance conforms to OWSLib interface
service.alias = 'WMS'
isinstance(service, owslib.map.wms111.WebMapService_1_1_1)
# URL attribute
assert service.url == 'url'
# version attribute
assert service.version == '1.1.1'
# Identification object
assert hasattr(service, 'identification')
# Check all ServiceIdentification attributes
assert service.identification.type == 'OGC:WMS'
for attribute in ['type', 'version', 'title', 'abstract', 'keywords', 'accessconstraints', 'fees']:
assert hasattr(service.identification, attribute)
# Check all ServiceProvider attributes
for attribute in ['name', 'url', 'contact']:
assert hasattr(service.provider, attribute)
# Check all operations implement IOperationMetadata
for op in service.operations:
for attribute in ['name', 'formatOptions', 'methods']:
assert hasattr(op, attribute)
# Check all contents implement IContentMetadata as a dictionary
isinstance(service.contents, OrderedDict)
# Check any item (WCS coverage, WMS layer etc) from the contents of each service
# Check it conforms to IContentMetadata interface
# get random item from contents dictionary -has to be a nicer way to do this!
content = service.contents[list(service.contents.keys())[0]]
for attribute in ['id', 'title', 'boundingBox', 'boundingBoxWGS84', 'crsOptions', 'styles', 'timepositions']:
assert hasattr(content, attribute)
@pytest.mark.online
def test_ows_interfaces_wcs():
service = WebCoverageService(WCS_SERVICE_URL, version='1.0.0')
# Check each service instance conforms to OWSLib interface
service.alias = 'WCS'
isinstance(service, owslib.coverage.wcs100.WebCoverageService_1_0_0)
# URL attribute
assert service.url == WCS_SERVICE_URL
# version attribute
assert service.version == '1.0.0'
# Identification object
assert hasattr(service, 'identification')
# Check all ServiceIdentification attributes
assert service.identification.type == 'OGC:WCS'
for attribute in ['type', 'version', 'title', 'abstract', 'keywords', 'fees']:
assert hasattr(service.identification, attribute)
# Check all ServiceProvider attributes
for attribute in ['name', 'url', 'contact']:
assert hasattr(service.provider, attribute)
# Check all operations implement IOperationMetadata
for op in service.operations:
for attribute in ['name', 'methods']:
assert hasattr(op, attribute)
# Check all contents implement IContentMetadata as a dictionary
isinstance(service.contents, dict)
# Check any item (WCS coverage, WMS layer etc) from the contents of each service
# Check it conforms to IContentMetadata interface
# get random item from contents dictionary -has to be a nicer way to do this!
content = service.contents[list(service.contents.keys())[0]]
for attribute in ['id', 'title', 'boundingBox', 'boundingBoxWGS84', 'crsOptions', 'styles', 'timepositions']:
assert hasattr(content, attribute)
def test_ows_interfaces_wfs():
wfsxml = open(resource_file('mapserver-wfs-cap.xml'), 'rb').read()
service = WebFeatureService('url', version='1.0', xml=wfsxml)
# Check each service instance conforms to OWSLib interface
service.alias = 'CSW'
isinstance(service, owslib.feature.wfs100.WebFeatureService_1_0_0)
# URL attribute
assert service.url == 'url'
# version attribute
assert service.version == '1.0'
# Identification object
assert hasattr(service, 'identification')
# Check all ServiceIdentification attributes
assert service.identification.type == 'MapServer WFS'
for attribute in ['type', 'version', 'title', 'abstract', 'keywords', 'accessconstraints', 'fees']:
assert hasattr(service.identification, attribute)
# Check all ServiceProvider attributes
for attribute in ['name', 'url']:
assert hasattr(service.provider, attribute)
# Check all operations implement IOperationMetadata
for op in service.operations:
for attribute in ['name', 'formatOptions', 'methods']:
assert hasattr(op, attribute)
# Check all contents implement IContentMetadata as a dictionary
isinstance(service.contents, dict)
# Check any item (WCS coverage, WMS layer etc) from the contents of each service
# Check it conforms to IContentMetadata interface
# get random item from contents dictionary -has to be a nicer way to do this!
content = service.contents[list(service.contents.keys())[0]]
for attribute in ['id', 'title', 'boundingBox', 'boundingBoxWGS84', 'crsOptions', 'styles', 'timepositions']:
assert hasattr(content, attribute)
|
{
"content_hash": "07cf482f017b65a0906cefb0f30ec9a3",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 113,
"avg_line_length": 46.92666666666667,
"alnum_prop": 0.7106123028839324,
"repo_name": "bird-house/OWSLib",
"id": "9d7b424077aced6ba0b4f8cfc0ddbeb57e7eb906",
"size": "7201",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_ows_interfaces.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "67"
},
{
"name": "Python",
"bytes": "1001000"
}
],
"symlink_target": ""
}
|
import uuid
import mock
from oslo_config import cfg
import six
import wsme
from solum.common import exception
from solum.tests import base
class ExceptionTestCase(base.BaseTestCase):
"""Test cases for exception code."""
def test_with_kwargs(self):
exc = exception.ResourceNotFound(name='application', id='green_paint')
self.assertIn('green_paint could not be found.',
six.text_type(exc))
self.assertEqual(exc.code, 404)
def test_with_kwargs_ru(self):
exc = exception.ResourceNotFound(name='application',
id=u'зеленой_краской')
self.assertIn(u'зеленой_краской could not be found',
six.text_type(exc))
self.assertEqual(exc.code, 404)
def test_bad_kwargs_exception(self):
cfg.CONF.set_override('fatal_exception_format_errors', True)
self.assertRaises(KeyError,
exception.ResourceNotFound, a_field='green')
def test_bad_kwargs(self):
cfg.CONF.set_override('fatal_exception_format_errors', False)
exc = exception.ResourceNotFound(a_field='green')
self.assertIn('An unknown exception occurred', six.text_type(exc))
self.assertEqual(exc.code, 404)
def test_resource_exists(self):
exc = exception.ResourceExists(name='test')
self.assertIn("The test resource already exists.",
six.text_type(exc))
self.assertEqual(exc.code, 409)
def test_application_exists(self):
exc = exception.ResourceExists(name='test')
self.assertIn("The test resource already exists.",
six.text_type(exc))
self.assertEqual(exc.code, 409)
def test_not_implemented(self):
exc = exception.NotImplemented()
self.assertIn("The requested operation is not implemented.",
six.text_type(exc))
self.assertEqual(exc.code, 501)
def test_wrap_controller_exception_with_server_error(self):
exception.LOG.error = mock.Mock()
def error_func():
raise exception.NotImplemented()
correlation_id = None
try:
exception.wrap_wsme_controller_exception(error_func)()
except wsme.exc.ClientSideError as e:
correlation_id = six.text_type(e).split(":")[1].strip()
self.assertIsNotNone(correlation_id)
self.assertIsInstance(uuid.UUID(correlation_id), uuid.UUID)
self.assertTrue(exception.LOG.error.called)
(args, kargs) = exception.LOG.error.call_args
self.assertTrue(correlation_id in args)
self.assertTrue(correlation_id in str(args[0] % args[1:]))
def test_wrap_controller_exception_with_client_error(self):
error_args = dict(reason="foo")
expected_error_msg = six.text_type(
exception.BadRequest.msg_fmt % error_args)
def error_func():
raise exception.BadRequest(**error_args)
try:
exception.wrap_wsme_controller_exception(error_func)()
self.assertTrue(False)
except wsme.exc.ClientSideError as e:
self.assertEqual(e.msg, expected_error_msg)
def test_wrap_controller_exception_with_uncatched_error(self):
exception.LOG.error = mock.Mock()
def error_func():
value_error = ValueError('Hey!')
value_error.code = 500
raise value_error
correlation_id = None
try:
exception.wrap_wsme_controller_exception(error_func)()
except wsme.exc.ClientSideError as e:
correlation_id = six.text_type(e).split(":")[1].strip()
self.assertIsNotNone(correlation_id)
self.assertIsInstance(uuid.UUID(correlation_id), uuid.UUID)
self.assertTrue(exception.LOG.error.called)
(args, kargs) = exception.LOG.error.call_args
self.assertTrue(correlation_id in args)
self.assertTrue(correlation_id in str(args[0] % args[1:]))
|
{
"content_hash": "8a018456214860100f2fa729116e4661",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 78,
"avg_line_length": 35.575221238938056,
"alnum_prop": 0.6271144278606965,
"repo_name": "devdattakulkarni/test-solum",
"id": "def2513ba0c7137ab0f0390d2ebbfd955f7057ff",
"size": "4656",
"binary": false,
"copies": "1",
"ref": "refs/heads/add-virt-driver",
"path": "solum/tests/common/test_exception_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "958"
},
{
"name": "Python",
"bytes": "1240622"
},
{
"name": "Shell",
"bytes": "82235"
}
],
"symlink_target": ""
}
|
import io
from django.core.management import call_command
from django.test import TestCase, override_settings
class MigrationTestCase(TestCase):
@override_settings(MIGRATION_MODULES={})
def test_for_missing_migrations(self):
output = io.StringIO()
options = {
'interactive': False,
'dry_run': True,
'stdout': output,
'check_changes': True,
}
try:
call_command('makemigrations', **options)
except SystemExit as e:
status_code = str(e)
else:
# the "no changes" exit code is 0
status_code = '0'
if status_code == '1':
self.fail('There are missing migrations:\n {}'.format(output.getvalue()))
|
{
"content_hash": "2ed4e5210fea09d5a00274012ec1c8f4",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 85,
"avg_line_length": 27.392857142857142,
"alnum_prop": 0.5684485006518905,
"repo_name": "divio/django-cms",
"id": "3fc8c043a8cb6c1cf86d2eba655d52c6ad99a63e",
"size": "767",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "cms/tests/test_migrations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "132972"
},
{
"name": "HTML",
"bytes": "201508"
},
{
"name": "JavaScript",
"bytes": "1238070"
},
{
"name": "Python",
"bytes": "2360702"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
}
|
"""Add RTP task jobid entry to the M&C database with a start_time of "now".
This script can be used for either single obsid tasks or multiple obsid tasks. For
multiple obsid tasks, pass the list of obsids to the `--obsid_list` parameter.
This script adds entries with a start_time of "now" meaning that the job was just
started. To update the table with past times, use the appropriate methods on the
MCSession object.
"""
from astropy.time import Time
import hera_mc.mc as mc
import hera_mc.utils as mcutils
if __name__ == "__main__":
parser = mc.get_mc_argument_parser()
parser.add_argument(
"filename",
type=str,
help=(
"file processed by RTP corresponding to obsid, or obsid_start for "
"multiple obsid tasks."
),
)
parser.add_argument("task_name", type=str, help="RTP task name")
parser.add_argument("job_id", type=int, help="Slurm Job ID of the RTP task.")
parser.add_argument(
"--file_list",
dest="file_list",
nargs="+",
type=str,
default=None,
help="List of files included in this task, only used for multiple obsid tasks. "
"Will add entries to the `rtp_task_multiple_track` and "
"`rtp_task_multiple_resource_record` tables rather than to the "
"`rtp_task_jobid` table.",
)
args = parser.parse_args()
# extract obsid from input file
obsid = mcutils.get_obsid_from_file(args.filename)
if args.file_list is not None:
# extract obsid for each file
obsid_list = []
for filename in args.file_list:
oid = mcutils.get_obsid_from_file(filename)
obsid_list.append(oid)
db = mc.connect_to_mc_db(args)
with db.sessionmaker() as session:
if args.file_list is not None:
for oid in obsid_list:
# check to see if this has already been added
rows = session.get_rtp_task_multiple_track(
obsid_start=obsid, task_name=args.task_name, obsid=oid
)
if len(rows) == 0:
# add the mapping
session.add_rtp_task_multiple_track(
obsid_start=obsid,
task_name=args.task_name,
obsid=oid,
)
session.add_rtp_task_multiple_jobid(
obsid_start=obsid,
task_name=args.task_name,
start_time=Time.now(),
job_id=args.job_id,
)
else:
session.add_rtp_task_jobid(
obsid=obsid,
task_name=args.task_name,
start_time=Time.now(),
job_id=args.job_id,
)
|
{
"content_hash": "15fe8a9688a56a680c93df83039c335e",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 88,
"avg_line_length": 34.65,
"alnum_prop": 0.5642135642135642,
"repo_name": "HERA-Team/hera_mc",
"id": "72886eb21587ee09a4461df167bc54767a29f6fc",
"size": "2917",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "scripts/add_rtp_task_jobid.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1238267"
},
{
"name": "Shell",
"bytes": "458"
}
],
"symlink_target": ""
}
|
from django import forms
from django.contrib import admin
from i18n.forms import I18nTextarea, I18nTextInput
from .models import Page
class PageForm(forms.ModelForm):
class Meta:
model = Page
fields = "__all__"
widgets = {
"title": I18nTextInput,
"slug": I18nTextInput,
"content": I18nTextarea,
}
@admin.register(Page)
class PageAdmin(admin.ModelAdmin):
list_display = ("title", "published", "slug")
form = PageForm
|
{
"content_hash": "3de4933ea4914b29409666e545e9f1d0",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 50,
"avg_line_length": 22.772727272727273,
"alnum_prop": 0.6287425149700598,
"repo_name": "patrick91/pycon",
"id": "89dc341cf795afb040196137fcff45d63b6e640e",
"size": "501",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "backend/pages/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1456"
},
{
"name": "Python",
"bytes": "13911"
}
],
"symlink_target": ""
}
|
from sfa.util.faults import RecordNotFound
from sfa.util.method import Method
from sfa.trust.credential import Credential
from sfa.storage.parameter import Parameter, Mixed
class GetGids(Method):
"""
Get a list of record information (hrn, gid and type) for
the specified hrns.
@param cred credential string
@param cert certificate string
@return
"""
interfaces = ['registry']
accepts = [
Mixed(Parameter(str, "Human readable name (hrn or xrn)"),
Parameter(type([str]), "List of Human readable names (hrn or xrn)")),
Mixed(Parameter(str, "Credential string"),
Parameter(type([str]), "List of credentials")),
]
returns = [Parameter(dict, "Dictionary of gids keyed on hrn")]
def call(self, xrns, creds):
# validate the credential
valid_creds = self.api.auth.checkCredentials(creds, 'getgids')
# xxxpylintxxx origin_hrn is unused..
origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn()
# resolve the record
records = self.api.manager.Resolve(self.api, xrns, details = False)
if not records:
raise RecordNotFound(xrns)
allowed_fields = ['hrn', 'type', 'gid']
for record in records:
for key in record.keys():
if key not in allowed_fields:
del(record[key])
return records
|
{
"content_hash": "dbcaf2afdd9ed6f1ba0f729b2177641e",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 83,
"avg_line_length": 32.46666666666667,
"alnum_prop": 0.6091718001368925,
"repo_name": "onelab-eu/sfa",
"id": "623fbe92c933db95195c705ea877dd05a716655c",
"size": "1461",
"binary": false,
"copies": "2",
"ref": "refs/heads/geni-v3",
"path": "sfa/methods/GetGids.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "424"
},
{
"name": "Makefile",
"bytes": "14208"
},
{
"name": "Python",
"bytes": "1439281"
},
{
"name": "Shell",
"bytes": "19422"
},
{
"name": "XSLT",
"bytes": "15293"
}
],
"symlink_target": ""
}
|
import os
from django.conf.urls.defaults import *
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
(r'^admin/', include(admin.site.urls)),
)
if settings.DEBUG:
urlpatterns += patterns('',
# Static content
(r'^static/(.*)$', 'django.views.static.serve', {'document_root': os.path.join(settings.FILEROOT, "static")}),
)
urlpatterns += patterns('',
# Main app
(r'^', include('lastgui.urls')),
)
|
{
"content_hash": "6f6e5df3f3e9ee1fce8b6937a3eeea40",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 118,
"avg_line_length": 22.681818181818183,
"alnum_prop": 0.6472945891783567,
"repo_name": "andrewgodwin/lastgraph",
"id": "e0c35563548f2710112ee790d27241d067e0a330",
"size": "500",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "11877"
},
{
"name": "JavaScript",
"bytes": "4027"
},
{
"name": "Python",
"bytes": "70240"
}
],
"symlink_target": ""
}
|
from oslo_config import fixture as config
from oslotest import base as test_base
from oslo_service import _options
class ServiceBaseTestCase(test_base.BaseTestCase):
def setUp(self):
super(ServiceBaseTestCase, self).setUp()
self.conf_fixture = self.useFixture(config.Config())
self.conf_fixture.register_opts(_options.eventlet_backdoor_opts)
self.conf_fixture.register_opts(_options.service_opts)
self.conf_fixture.register_opts(_options.ssl_opts)
self.conf_fixture.register_opts(_options.periodic_opts)
self.conf = self.conf_fixture.conf
self.config = self.conf_fixture.config
|
{
"content_hash": "90093c6ee328864dd65dff3a0d48af2d",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 72,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.7204301075268817,
"repo_name": "citrix-openstack-build/oslo.service",
"id": "33243a96c380efb398f733787075f48e7427b789",
"size": "1257",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oslo_service/tests/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "108480"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
import logging
import pytz
def get_utc_datetime_from_user_input(tz_str, date_time_str):
"""Returns a datetime object from a string, taking into account the time zone for that string.
Note that the date_time_str must be in the one and only acceptable format %m-%d-%Y %I:%M %p"""
try:
tz = pytz.timezone(tz_str) # Convert the time zone string into a time zone object.
except:
logging.warn("Time zone string did NOT parse. tz_str = " + tz_str)
tz = pytz.utc
# Convert the string into a datetime object (time zone naive)
send_datetime_raw = datetime.strptime(date_time_str, "%m-%d-%Y %I:%M %p")
# Set the time zone to the user's preference value (make the datetime object time zone aware instead of time zone naive).
send_datetime_in_user_tz = send_datetime_raw.replace(tzinfo=tz)
# Adjust for Daylight Savings Time if appropriate (only an issue in the US March-November).
send_datetime_adj_for_dst = send_datetime_in_user_tz - tz.normalize(send_datetime_in_user_tz).dst()
# Shift the time to UTC time (all datetime objects stored on the server should always be in UTC no exceptions)
send_datetime_in_utc = send_datetime_adj_for_dst.astimezone(pytz.utc)
# Used during development to make sure I did the time zone stuff correctly.
# print("send_datetime = " + date_time_display_format(send_datetime_in_utc, "UTC"))
# print("now = " + date_time_display_format(datetime.now(), "UTC"))
# Then remove the tzinfo to make the datatime again time zone naive, which is how AppEngine stores the time. (naive but known to be UTC)
return send_datetime_in_utc.replace(tzinfo=None)
def is_within_next_24_hours(send_time):
"""Returns true if the datetime object passed in is within the next 24 hours."""
one_day = timedelta(1) # Create a timedelta object set to 1 day long
time_delta = send_time - datetime.utcnow() # Creates a timedelta ojbect that is the difference between now and the send_time
return time_delta.total_seconds() > 0 and time_delta < one_day
def get_seconds_since_epoch(datetime):
"""Returns the seconds since epoch. Note, this function is not used in this app I just like it."""
return int(datetime.strftime("%s"))
# ## Jinja filters
def date_time_input_format(value, tz_str):
"""Take a date time object and convert it into a string that uses the required input box format.
Note, this format MUST match the format used in the get_utc_datetime_from_user_input function."""
try:
tz = pytz.timezone(tz_str)
except:
tz = pytz.utc
value = value.replace(tzinfo=pytz.utc).astimezone(tz)
return value.strftime("%m-%d-%Y %I:%M %p")
def date_time_display_format(value, tz_str):
"""Take a date time object and convert it into a string that can be displayed in the text message event tables."""
try:
tz = pytz.timezone(tz_str)
except:
tz = pytz.utc
value = value.replace(tzinfo=pytz.utc).astimezone(tz)
if value.year == value.now(tz).year:
# current year
if value.month == value.now(tz).month and value.day == value.now(tz).day:
# today, just show the time
format_str = "Today %I:%M %p %Z"
else:
# otherwise show the month and day
format_str = "%b %d %I:%M %p %Z"
else:
# previous year, show the year, too
format_str = "%m/%d/%y %I:%M %p %Z"
return value.strftime(format_str)
|
{
"content_hash": "f58d06f78e0a7ba7e2b297969d02bc45",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 140,
"avg_line_length": 43.67948717948718,
"alnum_prop": 0.6935720575286175,
"repo_name": "xniccum/chorewheel",
"id": "4e6e1fb63bd01c802facc86fc605f2dd921b48b2",
"size": "3407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/date_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1052"
},
{
"name": "HTML",
"bytes": "23216"
},
{
"name": "JavaScript",
"bytes": "6157"
},
{
"name": "Python",
"bytes": "88700"
}
],
"symlink_target": ""
}
|
'''
***
Modified generic daemon class
***
Author: http://www.jejik.com/articles/2007/02/
a_simple_unix_linux_daemon_in_python/www.boxedice.com
License: http://creativecommons.org/licenses/by-sa/3.0/
Changes: 23rd Jan 2009 (David Mytton <david@boxedice.com>)
- Replaced hard coded '/dev/null in __init__ with os.devnull
- Added OS check to conditionally remove code that doesn't
work on OS X
- Added output to console on completion
- Tidied up formatting
11th Mar 2009 (David Mytton <david@boxedice.com>)
- Fixed problem with daemon exiting on Python 2.4
(before SystemExit was part of the Exception base)
13th Aug 2010 (David Mytton <david@boxedice.com>
- Fixed unhandled exception if PID file is empty
'''
# Core modules
import atexit
import os
import sys
import time
import signal
class Daemon(object):
"""
A generic daemon class.
Usage: subclass the Daemon class and override the run() method
"""
def __init__(self, pidfile, stdin=os.devnull,
stdout=os.devnull, stderr=os.devnull,
home_dir='.', umask=22, verbose=1, use_gevent=False):
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.pidfile = pidfile
self.home_dir = home_dir
self.verbose = verbose
self.umask = umask
self.daemon_alive = True
self.use_gevent = use_gevent
def daemonize(self):
"""
Do the UNIX double-fork magic, see Stevens' "Advanced
Programming in the UNIX Environment" for details (ISBN 0201563177)
http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
"""
try:
pid = os.fork()
if pid > 0:
# Exit first parent
sys.exit(0)
except OSError as e:
sys.stderr.write(
"fork #1 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment
os.chdir(self.home_dir)
os.setsid()
os.umask(self.umask)
# Do second fork
try:
pid = os.fork()
if pid > 0:
# Exit from second parent
sys.exit(0)
except OSError as e:
sys.stderr.write(
"fork #2 failed: %d (%s)\n" % (e.errno, e.strerror))
sys.exit(1)
if sys.platform != 'darwin': # This block breaks on OS X
# Redirect standard file descriptors
sys.stdout.flush()
sys.stderr.flush()
si = open(self.stdin, 'r')
so = open(self.stdout, 'a+')
if self.stderr:
se = open(self.stderr, 'a+', 0)
else:
se = so
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def sigtermhandler(signum, frame):
self.daemon_alive = False
sys.exit()
if self.use_gevent:
import gevent
gevent.reinit()
gevent.signal(signal.SIGTERM, sigtermhandler, signal.SIGTERM, None)
gevent.signal(signal.SIGINT, sigtermhandler, signal.SIGINT, None)
else:
signal.signal(signal.SIGTERM, sigtermhandler)
signal.signal(signal.SIGINT, sigtermhandler)
if self.verbose >= 1:
print("Started")
# Write pidfile
atexit.register(
self.delpid) # Make sure pid file is removed if we quit
pid = str(os.getpid())
open(self.pidfile, 'w+').write("%s\n" % pid)
def delpid(self):
os.remove(self.pidfile)
def start(self, *args, **kwargs):
"""
Start the daemon
"""
if self.verbose >= 1:
print("Starting...")
# Check for a pidfile to see if the daemon already runs
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
if pid:
message = "pidfile %s already exists. Is it already running?\n"
sys.stderr.write(message % self.pidfile)
sys.exit(1)
# Start the daemon
self.daemonize()
self.run(*args, **kwargs)
def stop(self):
"""
Stop the daemon
"""
if self.verbose >= 1:
print("Stopping...")
# Get the pid from the pidfile
pid = self.get_pid()
if not pid:
message = "pidfile %s does not exist. Not running?\n"
sys.stderr.write(message % self.pidfile)
# Just to be sure. A ValueError might occur if the PID file is
# empty but does actually exist
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
return # Not an error in a restart
# Try killing the daemon process
try:
i = 0
while 1:
os.kill(pid, signal.SIGTERM)
time.sleep(0.1)
i = i + 1
if i % 10 == 0:
os.kill(pid, signal.SIGHUP)
except OSError as err:
err = str(err)
if err.find("No such process") > 0:
if os.path.exists(self.pidfile):
os.remove(self.pidfile)
else:
print(str(err))
sys.exit(1)
if self.verbose >= 1:
print("Stopped")
def restart(self):
"""
Restart the daemon
"""
self.stop()
self.start()
def get_pid(self):
try:
pf = open(self.pidfile, 'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
except SystemExit:
pid = None
return pid
def is_running(self):
pid = self.get_pid()
if pid is None:
print('Process is stopped')
elif os.path.exists('/proc/%d' % pid):
print('Process (pid %d) is running...' % pid)
else:
print('Process (pid %d) is killed' % pid)
return pid and os.path.exists('/proc/%d' % pid)
def run(self):
"""
You should override this method when you subclass Daemon.
It will be called after the process has been
daemonized by start() or restart().
"""
raise NotImplementedError
|
{
"content_hash": "7d53ef982779e7e14bdafbbcc94fc56a",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 79,
"avg_line_length": 30.116071428571427,
"alnum_prop": 0.5128965312777942,
"repo_name": "soasme/blackgate",
"id": "bd8ba2e1bcb648dfec177d519fb5878d7774f351",
"size": "6746",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blackgate/daemon.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "194"
},
{
"name": "Python",
"bytes": "23906"
}
],
"symlink_target": ""
}
|
import unittest
from unittest import mock
import pytest
import telegram
import airflow
from airflow.models import Connection
from airflow.providers.telegram.operators.telegram import TelegramOperator
from airflow.utils import db
TELEGRAM_TOKEN = "xxx:xxx"
class TestTelegramOperator(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id='telegram_default',
conn_type='http',
password=TELEGRAM_TOKEN,
)
)
db.merge_conn(
Connection(
conn_id='telegram_default-with-chat-id',
conn_type='http',
password=TELEGRAM_TOKEN,
host="-420913222",
)
)
@mock.patch('airflow.providers.telegram.operators.telegram.TelegramHook')
def test_should_send_message_when_all_parameters_are_provided(self, mock_telegram_hook):
mock_telegram_hook.return_value = mock.Mock()
mock_telegram_hook.return_value.send_message.return_value = True
hook = TelegramOperator(
telegram_conn_id='telegram_default',
chat_id='-420913222',
task_id='telegram',
text="some non empty text",
)
hook.execute(None)
mock_telegram_hook.assert_called_once_with(
telegram_conn_id='telegram_default',
chat_id='-420913222',
token=None,
)
mock_telegram_hook.return_value.send_message.assert_called_once_with(
{'text': 'some non empty text'},
)
def test_should_throw_exception_if_connection_id_is_none(self):
with pytest.raises(airflow.exceptions.AirflowException) as ctx:
TelegramOperator(task_id="telegram", telegram_conn_id=None)
assert "No valid Telegram connection id supplied." == str(ctx.value)
@mock.patch('airflow.providers.telegram.operators.telegram.TelegramHook')
def test_should_throw_exception_if_telegram_hook_throws_any_exception(self, mock_telegram_hook):
def side_effect(*args, **kwargs):
raise telegram.error.TelegramError("cosmic rays caused bit flips")
mock_telegram_hook.return_value = mock.Mock()
mock_telegram_hook.return_value.send_message.side_effect = side_effect
with pytest.raises(telegram.error.TelegramError) as ctx:
hook = TelegramOperator(
telegram_conn_id='telegram_default',
task_id='telegram',
text="some non empty text",
)
hook.execute(None)
assert "cosmic rays caused bit flips" == str(ctx.value)
@mock.patch('airflow.providers.telegram.operators.telegram.TelegramHook')
def test_should_forward_all_args_to_telegram(self, mock_telegram_hook):
mock_telegram_hook.return_value = mock.Mock()
mock_telegram_hook.return_value.send_message.return_value = True
hook = TelegramOperator(
telegram_conn_id='telegram_default',
chat_id='-420913222',
task_id='telegram',
text="some non empty text",
telegram_kwargs={"custom_arg": "value"},
)
hook.execute(None)
mock_telegram_hook.assert_called_once_with(
telegram_conn_id='telegram_default',
chat_id='-420913222',
token=None,
)
mock_telegram_hook.return_value.send_message.assert_called_once_with(
{'custom_arg': 'value', 'text': 'some non empty text'},
)
@mock.patch('airflow.providers.telegram.operators.telegram.TelegramHook')
def test_should_give_precedence_to_text_passed_in_constructor(self, mock_telegram_hook):
mock_telegram_hook.return_value = mock.Mock()
mock_telegram_hook.return_value.send_message.return_value = True
hook = TelegramOperator(
telegram_conn_id='telegram_default',
chat_id='-420913222',
task_id='telegram',
text="some non empty text - higher precedence",
telegram_kwargs={"custom_arg": "value", "text": "some text, that will be ignored"},
)
hook.execute(None)
mock_telegram_hook.assert_called_once_with(
telegram_conn_id='telegram_default',
chat_id='-420913222',
token=None,
)
mock_telegram_hook.return_value.send_message.assert_called_once_with(
{'custom_arg': 'value', 'text': 'some non empty text - higher precedence'},
)
def test_should_return_template_fields(self):
hook = TelegramOperator(
telegram_conn_id='telegram_default',
chat_id='-420913222',
task_id='telegram',
text="some non empty text - higher precedence",
telegram_kwargs={"custom_arg": "value", "text": "some text, that will be ignored"},
)
assert ('text', 'chat_id') == hook.template_fields
@mock.patch('airflow.providers.telegram.operators.telegram.TelegramHook')
def test_should_return_templatized_text_field(self, mock_hook):
operator = TelegramOperator(
telegram_conn_id='telegram_default',
chat_id='-420913222',
task_id='telegram',
text="execution date is {{ ds }}",
telegram_kwargs={"custom_arg": "value", "text": "should be ignored"},
)
operator.render_template_fields({"ds": "2021-02-04"})
operator.execute(None)
assert operator.text == "execution date is 2021-02-04"
assert 'text' in operator.telegram_kwargs
assert operator.telegram_kwargs['text'] == "execution date is 2021-02-04"
def test_should_return_templatized_chat_id_field(self):
operator = TelegramOperator(
telegram_conn_id='telegram_default',
chat_id='{{ chat_id }}',
task_id='telegram',
text="text",
telegram_kwargs={"custom_arg": "value", "text": "should be ignored"},
)
operator.render_template_fields({"chat_id": "1234567"})
assert operator.chat_id == "1234567"
|
{
"content_hash": "85206a29a4204efdc49d1a2a713cd6ee",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 100,
"avg_line_length": 38.275,
"alnum_prop": 0.6077726975832789,
"repo_name": "mistercrunch/airflow",
"id": "8d4fde034185eb4b003aa827d0c90ceda58c46a6",
"size": "6911",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "tests/providers/telegram/operators/test_telegram.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "36341"
},
{
"name": "HTML",
"bytes": "99243"
},
{
"name": "JavaScript",
"bytes": "891460"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "773270"
},
{
"name": "Shell",
"bytes": "5659"
}
],
"symlink_target": ""
}
|
import mock
from oslo.config import cfg
from neutron.db import api as db
from neutron.extensions import providernet as provider
from neutron.openstack.common import importutils
from neutron.plugins.cisco.common import cisco_constants as const
from neutron.plugins.cisco.common import cisco_exceptions as cisco_exc
from neutron.plugins.cisco.common import config as cisco_config
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.cisco.nexus import cisco_nexus_plugin_v2
from neutron.tests import base
NEXUS_IP_ADDRESS = '1.1.1.1'
HOSTNAME1 = 'testhost1'
HOSTNAME2 = 'testhost2'
HOSTNAME3 = 'testhost3'
INSTANCE1 = 'testvm1'
INSTANCE2 = 'testvm2'
INSTANCE3 = 'testvm3'
NEXUS_PORT1 = '1/10'
NEXUS_PORT2 = '1/20'
NEXUS_PC_IP_ADDRESS = '2.2.2.2'
NEXUS_PORTCHANNELS = 'portchannel:2'
PC_HOSTNAME = 'testpchost'
NEXUS_SSH_PORT = '22'
NEXUS_DRIVER = ('neutron.plugins.cisco.nexus.'
'cisco_nexus_network_driver_v2.CiscoNEXUSDriver')
NET_ATTRS = [const.NET_ID,
const.NET_NAME,
const.NET_VLAN_NAME,
const.NET_VLAN_ID]
class TestCiscoNexusPlugin(base.BaseTestCase):
def setUp(self):
"""Set up function."""
super(TestCiscoNexusPlugin, self).setUp()
self.tenant_id = "test_tenant_cisco1"
self.net_name = "test_network_cisco1"
self.net_id = 7
self.vlan_name = "q-" + str(self.net_id) + "vlan"
self.vlan_id = 267
self.second_tenant_id = "test_tenant_2"
self.second_net_name = "test_network_cisco2"
self.second_net_id = 5
self.second_vlan_name = "q-" + str(self.second_net_id) + "vlan"
self.second_vlan_id = 265
self._pchostname = PC_HOSTNAME
self.attachment1 = {
const.TENANT_ID: self.tenant_id,
const.INSTANCE_ID: INSTANCE1,
const.HOST_NAME: HOSTNAME1,
}
self.attachment2 = {
const.TENANT_ID: self.second_tenant_id,
const.INSTANCE_ID: INSTANCE2,
const.HOST_NAME: HOSTNAME2,
}
self.attachment3 = {
const.TENANT_ID: self.second_tenant_id,
const.INSTANCE_ID: INSTANCE3,
const.HOST_NAME: HOSTNAME3,
}
self.network1 = {
const.NET_ID: self.net_id,
const.NET_NAME: self.net_name,
const.NET_VLAN_NAME: self.vlan_name,
const.NET_VLAN_ID: self.vlan_id,
}
self.network2 = {
const.NET_ID: self.second_net_id,
const.NET_NAME: self.second_net_name,
const.NET_VLAN_NAME: self.second_vlan_name,
const.NET_VLAN_ID: self.second_vlan_id,
}
self.network3 = {
const.NET_ID: 8,
const.NET_NAME: 'vpc_net',
const.NET_VLAN_NAME: 'q-268',
const.NET_VLAN_ID: '268',
}
self.delete_port_args_1 = [
self.attachment1[const.INSTANCE_ID],
self.network1[const.NET_VLAN_ID],
]
self.providernet = {
const.NET_ID: 9,
const.NET_NAME: 'pnet1',
const.NET_VLAN_NAME: 'p-300',
const.NET_VLAN_ID: 300,
provider.NETWORK_TYPE: 'vlan',
provider.PHYSICAL_NETWORK: self.net_name + '200:299',
provider.SEGMENTATION_ID: 300,
}
def new_nexus_init(self):
self._client = importutils.import_object(NEXUS_DRIVER)
self._client.nexus_switches = {
(NEXUS_IP_ADDRESS, HOSTNAME1): NEXUS_PORT1,
(NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
(NEXUS_IP_ADDRESS, HOSTNAME2): NEXUS_PORT2,
(NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
(NEXUS_PC_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
}
self._nexus_switches = {
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME1): NEXUS_PORT1,
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME2): NEXUS_PORT2,
('NEXUS_SWITCH', NEXUS_PC_IP_ADDRESS, HOSTNAME3):
NEXUS_PORTCHANNELS,
('NEXUS_SWITCH', NEXUS_PC_IP_ADDRESS, 'ssh_port'):
NEXUS_SSH_PORT,
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, HOSTNAME3):
NEXUS_PORTCHANNELS,
('NEXUS_SWITCH', NEXUS_IP_ADDRESS, 'ssh_port'): NEXUS_SSH_PORT,
}
self._client.credentials = {
NEXUS_IP_ADDRESS: {
'username': 'admin',
'password': 'pass1234'
},
NEXUS_PC_IP_ADDRESS: {
'username': 'admin',
'password': 'password'
},
}
db.configure_db()
self.addCleanup(db.clear_db)
# Use a mock netconf client
self.mock_ncclient = mock.Mock()
self.patch_obj = mock.patch.dict('sys.modules',
{'ncclient': self.mock_ncclient})
self.patch_obj.start()
self.addCleanup(self.patch_obj.stop)
with mock.patch.object(cisco_nexus_plugin_v2.NexusPlugin,
'__init__', new=new_nexus_init):
self._cisco_nexus_plugin = cisco_nexus_plugin_v2.NexusPlugin()
# Set the Cisco config module's first configured device IP address
# according to the preceding switch config.
mock.patch.object(cisco_config, 'first_device_ip',
new=NEXUS_IP_ADDRESS).start()
self.addCleanup(mock.patch.stopall)
def test_create_delete_networks(self):
"""Tests creation of two new Virtual Networks."""
new_net_dict = self._cisco_nexus_plugin.create_network(
self.network1, self.attachment1)
for attr in NET_ATTRS:
self.assertEqual(new_net_dict[attr], self.network1[attr])
expected_instance_id = self._cisco_nexus_plugin.delete_port(
INSTANCE1, self.vlan_id)
self.assertEqual(expected_instance_id, INSTANCE1)
new_net_dict = self._cisco_nexus_plugin.create_network(
self.network2, self.attachment1)
for attr in NET_ATTRS:
self.assertEqual(new_net_dict[attr], self.network2[attr])
expected_instance_id = self._cisco_nexus_plugin.delete_port(
INSTANCE1, self.second_vlan_id)
self.assertEqual(expected_instance_id, INSTANCE1)
def _create_delete_providernet(self, auto_create, auto_trunk):
cfg.CONF.set_override(
'provider_vlan_auto_create', auto_create, 'CISCO')
cfg.CONF.set_override(
'provider_vlan_auto_trunk', auto_trunk, 'CISCO')
self.addCleanup(cfg.CONF.reset)
with mock.patch.object(cdb, 'is_provider_vlan',
return_value=True) as mock_db:
# Create a provider network
new_net_dict = self._cisco_nexus_plugin.create_network(
self.providernet, self.attachment1)
mock_db.assert_called_once()
for attr in NET_ATTRS:
self.assertEqual(new_net_dict[attr], self.providernet[attr])
# Delete the provider network
instance_id = self._cisco_nexus_plugin.delete_port(
self.attachment1[const.INSTANCE_ID],
self.providernet[const.NET_VLAN_ID])
self.assertEqual(instance_id,
self.attachment1[const.INSTANCE_ID])
def test_create_delete_providernet(self):
self._create_delete_providernet(auto_create=True, auto_trunk=True)
def test_create_delete_provider_vlan_network_cfg_auto_man(self):
self._create_delete_providernet(auto_create=True, auto_trunk=False)
def test_create_delete_provider_vlan_network_cfg_man_auto(self):
self._create_delete_providernet(auto_create=False, auto_trunk=True)
def test_create_delete_provider_vlan_network_cfg_man_man(self):
self._create_delete_providernet(auto_create=False, auto_trunk=False)
def test_create_delete_network_portchannel(self):
"""Tests creation of a network over a portchannel."""
new_net_dict = self._cisco_nexus_plugin.create_network(
self.network3, self.attachment3)
self.assertEqual(new_net_dict[const.NET_ID],
self.network3[const.NET_ID])
self.assertEqual(new_net_dict[const.NET_NAME],
self.network3[const.NET_NAME])
self.assertEqual(new_net_dict[const.NET_VLAN_NAME],
self.network3[const.NET_VLAN_NAME])
self.assertEqual(new_net_dict[const.NET_VLAN_ID],
self.network3[const.NET_VLAN_ID])
self._cisco_nexus_plugin.delete_port(
INSTANCE3, self.network3[const.NET_VLAN_ID]
)
def _add_router_interface(self):
"""Add a router interface using fixed (canned) parameters."""
vlan_name = self.vlan_name
vlan_id = self.vlan_id
gateway_ip = '10.0.0.1/24'
router_id = '00000R1'
subnet_id = '00001'
return self._cisco_nexus_plugin.add_router_interface(
vlan_name, vlan_id, subnet_id, gateway_ip, router_id)
def _remove_router_interface(self):
"""Remove a router interface created with _add_router_interface."""
vlan_id = self.vlan_id
router_id = '00000R1'
return self._cisco_nexus_plugin.remove_router_interface(vlan_id,
router_id)
def test_nexus_add_remove_router_interface(self):
"""Tests addition of a router interface."""
self.assertTrue(self._add_router_interface())
self.assertEqual(self._remove_router_interface(), '00000R1')
def test_nexus_dup_add_router_interface(self):
"""Tests a duplicate add of a router interface."""
self._add_router_interface()
try:
self.assertRaises(
cisco_exc.SubnetInterfacePresent,
self._add_router_interface)
finally:
self._remove_router_interface()
def test_nexus_no_svi_switch_exception(self):
"""Tests failure to find a Nexus switch for SVI placement."""
# Clear the Nexus switches dictionary.
with mock.patch.dict(self._cisco_nexus_plugin._client.nexus_switches,
{}, clear=True):
# Clear the first Nexus IP address discovered in config
with mock.patch.object(cisco_config, 'first_device_ip',
new=None):
self.assertRaises(cisco_exc.NoNexusSviSwitch,
self._add_router_interface)
def test_nexus_add_port_after_router_interface(self):
"""Tests creating a port after a router interface.
Test creating a port after an SVI router interface has
been created. Only a trunk call should be invoked and the
plugin should not attempt to recreate the vlan.
"""
self._add_router_interface()
# Create a network on the switch
self._cisco_nexus_plugin.create_network(
self.network1, self.attachment1)
# Grab a list of all mock calls from ncclient
last_cfgs = (self.mock_ncclient.manager.connect.return_value.
edit_config.mock_calls)
# The last ncclient call should be for trunking and the second
# to last call should be creating the SVI interface
last_cfg = last_cfgs[-1][2]['config']
self.assertIn('allowed', last_cfg)
slast_cfg = last_cfgs[-2][2]['config']
self.assertIn('10.0.0.1/24', slast_cfg)
|
{
"content_hash": "38ba52ae93672469f176bd8cfdb6dd82",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 79,
"avg_line_length": 40.701388888888886,
"alnum_prop": 0.5876130353182051,
"repo_name": "Juniper/contrail-dev-neutron",
"id": "6a959d6095c5d33a835f2a40c60e3e099db3df7a",
"size": "12313",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/cisco/test_nexus_plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67930"
},
{
"name": "Python",
"bytes": "8281839"
},
{
"name": "Shell",
"bytes": "8920"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
}
|
"""
This file contains tests for misc.py
"""
import sys
import pytest
import numpy as np
import matmodlab2
import matmodlab2.core.misc as misc
def test_is_listlike():
"""Is item list like?"""
assert not misc.is_listlike('aaa')
assert misc.is_listlike([0,1,2])
assert misc.is_listlike((0,1,2))
assert not misc.is_listlike(None)
def test_is_stringlike():
"""Is item string like?"""
assert misc.is_stringlike('aaa')
assert not misc.is_stringlike([0,1,2])
assert not misc.is_stringlike((0,1,2))
def test_is_scalarlike():
"""Is item scalar like?"""
assert misc.is_scalarlike(5)
assert misc.is_scalarlike(5.)
assert misc.is_scalarlike(np.array(5.))
assert misc.is_scalarlike(np.array(5))
assert not misc.is_scalarlike([1,2])
assert not misc.is_scalarlike(np.array([1,2]))
if __name__ == '__main__':
test_import()
|
{
"content_hash": "16ca1df9706c0accc3704abd9990e179",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 50,
"avg_line_length": 25.88235294117647,
"alnum_prop": 0.6590909090909091,
"repo_name": "matmodlab/matmodlab2",
"id": "353adc107eebb7d34e21236ef994a1d050d1d7d9",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_core_misc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Fortran",
"bytes": "419488"
},
{
"name": "Jupyter Notebook",
"bytes": "1458750"
},
{
"name": "Python",
"bytes": "400440"
}
],
"symlink_target": ""
}
|
"""The tests for the Switch component."""
# pylint: disable=protected-access
import unittest
from homeassistant.setup import setup_component, async_setup_component
from homeassistant import core, loader
from homeassistant.components import switch
from homeassistant.const import STATE_ON, STATE_OFF, CONF_PLATFORM
from tests.common import get_test_home_assistant
from tests.components.switch import common
class TestSwitch(unittest.TestCase):
"""Test the switch module."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
platform = loader.get_component(self.hass, 'switch.test')
platform.init()
# Switch 1 is ON, switch 2 is OFF
self.switch_1, self.switch_2, self.switch_3 = \
platform.DEVICES
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_methods(self):
"""Test is_on, turn_on, turn_off methods."""
assert setup_component(
self.hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: 'test'}}
)
assert switch.is_on(self.hass)
assert STATE_ON == \
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state
assert switch.is_on(self.hass, self.switch_1.entity_id)
assert not switch.is_on(self.hass, self.switch_2.entity_id)
assert not switch.is_on(self.hass, self.switch_3.entity_id)
common.turn_off(self.hass, self.switch_1.entity_id)
common.turn_on(self.hass, self.switch_2.entity_id)
self.hass.block_till_done()
assert switch.is_on(self.hass)
assert not switch.is_on(self.hass, self.switch_1.entity_id)
assert switch.is_on(self.hass, self.switch_2.entity_id)
# Turn all off
common.turn_off(self.hass)
self.hass.block_till_done()
assert not switch.is_on(self.hass)
assert STATE_OFF == \
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state
assert not switch.is_on(self.hass, self.switch_1.entity_id)
assert not switch.is_on(self.hass, self.switch_2.entity_id)
assert not switch.is_on(self.hass, self.switch_3.entity_id)
# Turn all on
common.turn_on(self.hass)
self.hass.block_till_done()
assert switch.is_on(self.hass)
assert STATE_ON == \
self.hass.states.get(switch.ENTITY_ID_ALL_SWITCHES).state
assert switch.is_on(self.hass, self.switch_1.entity_id)
assert switch.is_on(self.hass, self.switch_2.entity_id)
assert switch.is_on(self.hass, self.switch_3.entity_id)
def test_setup_two_platforms(self):
"""Test with bad configuration."""
# Test if switch component returns 0 switches
test_platform = loader.get_component(self.hass, 'switch.test')
test_platform.init(True)
loader.set_component(self.hass, 'switch.test2', test_platform)
test_platform.init(False)
assert setup_component(
self.hass, switch.DOMAIN, {
switch.DOMAIN: {CONF_PLATFORM: 'test'},
'{} 2'.format(switch.DOMAIN): {CONF_PLATFORM: 'test2'},
}
)
async def test_switch_context(hass, hass_admin_user):
"""Test that switch context works."""
assert await async_setup_component(hass, 'switch', {
'switch': {
'platform': 'test'
}
})
state = hass.states.get('switch.ac')
assert state is not None
await hass.services.async_call('switch', 'toggle', {
'entity_id': state.entity_id,
}, True, core.Context(user_id=hass_admin_user.id))
state2 = hass.states.get('switch.ac')
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == hass_admin_user.id
|
{
"content_hash": "99eca857053890ca1c19f1dc4bd218c9",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 35.00892857142857,
"alnum_prop": 0.6360622290232084,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "d39c5a24ddc53ae7963b2cce8c7c971e4f1d4aea",
"size": "3921",
"binary": false,
"copies": "5",
"ref": "refs/heads/dev",
"path": "tests/components/switch/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import django
from django.contrib.auth.models import User
from django.template import Context, RequestContext, Template
from ..base import BaseTestCase
from ..models import NonAsciiRepr
class TemplatesPanelTestCase(BaseTestCase):
def setUp(self):
super(TemplatesPanelTestCase, self).setUp()
self.panel = self.toolbar.get_panel_by_id('TemplatesPanel')
self.panel.enable_instrumentation()
self.sql_panel = self.toolbar.get_panel_by_id('SQLPanel')
self.sql_panel.enable_instrumentation()
def tearDown(self):
self.sql_panel.disable_instrumentation()
self.panel.disable_instrumentation()
super(TemplatesPanelTestCase, self).tearDown()
def test_queryset_hook(self):
t = Template("No context variables here!")
c = Context({
'queryset': User.objects.all(),
'deep_queryset': {
'queryset': User.objects.all(),
}
})
t.render(c)
# ensure the query was NOT logged
self.assertEqual(len(self.sql_panel._queries), 0)
base_ctx_idx = 1 if django.VERSION[:2] >= (1, 5) else 0
ctx = self.panel.templates[0]['context'][base_ctx_idx]
self.assertIn('<<queryset of auth.User>>', ctx)
self.assertIn('<<triggers database query>>', ctx)
def test_object_with_non_ascii_repr_in_context(self):
self.panel.process_request(self.request)
t = Template("{{ object }}")
c = Context({'object': NonAsciiRepr()})
t.render(c)
self.panel.process_response(self.request, self.response)
self.panel.generate_stats(self.request, self.response)
self.assertIn('nôt åscíì', self.panel.content)
def test_insert_content(self):
"""
Test that the panel only inserts content after generate_stats and
not the process_response.
"""
t = Template("{{ object }}")
c = Context({'object': NonAsciiRepr()})
t.render(c)
self.panel.process_response(self.request, self.response)
# ensure the panel does not have content yet.
self.assertNotIn('nôt åscíì', self.panel.content)
self.panel.generate_stats(self.request, self.response)
# ensure the panel renders correctly.
self.assertIn('nôt åscíì', self.panel.content)
def test_custom_context_processor(self):
self.panel.process_request(self.request)
t = Template("{{ content }}")
c = RequestContext(self.request, processors=[context_processor])
t.render(c)
self.panel.process_response(self.request, self.response)
self.panel.generate_stats(self.request, self.response)
self.assertIn('tests.panels.test_template.context_processor', self.panel.content)
def test_disabled(self):
config = {
'DISABLE_PANELS': set([
'debug_toolbar.panels.templates.TemplatesPanel'])
}
self.assertTrue(self.panel.enabled)
with self.settings(DEBUG_TOOLBAR_CONFIG=config):
self.assertFalse(self.panel.enabled)
def context_processor(request):
return {'content': 'set by processor'}
|
{
"content_hash": "8f29f7f3ae76310cb4bf1f64583787d2",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 89,
"avg_line_length": 37.08045977011494,
"alnum_prop": 0.6394916305021698,
"repo_name": "calvinpy/django-debug-toolbar",
"id": "cdf12f9ca31c692f194ffb9bd098edaf1569a604",
"size": "3255",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/panels/test_template.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "20246"
},
{
"name": "HTML",
"bytes": "27090"
},
{
"name": "JavaScript",
"bytes": "16563"
},
{
"name": "Makefile",
"bytes": "846"
},
{
"name": "Python",
"bytes": "155653"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from base import QueryOperation
from params import StaticParam, MultiParam, SingleParam
from models import RevisionInfo, Revision
from utils import OperationExample
DEFAULT_PROPS = 'ids|flags|timestamp|user|userid|size|sha1|comment|parsedcomment|tags'
class GetPageRevisionInfos(QueryOperation):
"""
Fetch revisions for pages.
"""
field_prefix = 'rv'
input_field = MultiParam('titles', key_prefix=False)
fields = [StaticParam('prop', 'revisions'),
MultiParam('prop', DEFAULT_PROPS)]
output_type = [RevisionInfo]
examples = [OperationExample('Coffee', 10)]
def extract_results(self, query_resp):
ret = []
pages = [p for p in query_resp.get('pages', {}).values()
if 'missing' not in p]
for pid_dict in pages:
for rev in pid_dict.get('revisions', []):
rev_dict = dict(pid_dict)
rev_dict.update(rev)
rev_info = RevisionInfo.from_query(rev_dict,
source=self.source)
ret.append(rev_info)
return ret
class GetRevisionInfos(GetPageRevisionInfos):
"""
Fetch information about specific revision.
"""
input_field = MultiParam('revids', attr='rev_id', key_prefix=False)
output_type = RevisionInfo
examples = [OperationExample(['538903663', '539916351', '531458383'])]
def prepare_params(self, *a, **kw):
ret = super(GetRevisionInfos, self).prepare_params()
ret.pop(self.field_prefix + 'limit', None)
return ret
class GetCurrentContent(QueryOperation):
"""
Fetch full content for current (top) revision.
"""
input_field = MultiParam('titles', key_prefix=False, attr='title')
field_prefix = 'rv'
fields = [StaticParam('prop', 'revisions'),
MultiParam('prop', DEFAULT_PROPS + '|content'),
SingleParam('parse', False),
SingleParam('redirects', True, key_prefix=False)]
examples = [OperationExample('This page does not exist'),
OperationExample('Coffee')]
output_type = Revision
def extract_results(self, query_resp):
ret = []
#redirect_list = query_resp.get('redirects', []) # TODO
#redirects = dict([(r['from'], r['to']) for r in redirect_list])
requested_title = self.input_param
is_parsed = self.kwargs.get('rvparse', False)
pages = query_resp.get('pages', {})
for page_id, pid_dict in pages.iteritems():
if int(page_id) < 0:
continue
rev_dict = dict(pid_dict)
rev_dict.update(pid_dict['revisions'][0])
revision = Revision.from_query(rev_dict,
source=self.source,
is_parsed=is_parsed)
revision.req_title = requested_title
ret.append(revision)
return ret
class GetRevisionContent(GetCurrentContent):
input_field = SingleParam('revids', key_prefix=False, attr='rev_id')
fields = [StaticParam('prop', 'revisions'),
MultiParam('prop', DEFAULT_PROPS + '|content'),
SingleParam('parse', False)]
examples = [OperationExample('539916351')]
class GetCurrentTalkContent(GetCurrentContent):
"""
The simple prefix behavior means this operation will only work on
namespace 0 pages. I wouldn't rely on this operation being around
for long.
"""
input_field = MultiParam('titles', val_prefix='Talk:', key_prefix=False)
examples = [OperationExample('This page does not exist'),
OperationExample('Coffee')]
|
{
"content_hash": "395d938b9f8c36afee4e272ae38d870e",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 86,
"avg_line_length": 36.9009900990099,
"alnum_prop": 0.6026294606922458,
"repo_name": "mahmoud/wapiti",
"id": "4131258a5399acd0bad93174dd75992cbc61b46a",
"size": "3751",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wapiti/operations/revisions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "146918"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(PROJECT_ROOT, os.pardir))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demo.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "2f558972e44306c38a71b038f93354ac",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 65,
"avg_line_length": 28,
"alnum_prop": 0.7261904761904762,
"repo_name": "alexsilva/django-xadmin",
"id": "bc0d0959994d54f5ca5116d0fdc9850586508481",
"size": "358",
"binary": false,
"copies": "1",
"ref": "refs/heads/python3-dj32",
"path": "demo_app/manage.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "20599"
},
{
"name": "HTML",
"bytes": "130392"
},
{
"name": "JavaScript",
"bytes": "79612"
},
{
"name": "Python",
"bytes": "424173"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
}
|
from runner.koan import *
import random
class DiceSet(object):
def __init__(self):
self._values = None
@property
def values(self):
return self._values
def roll(self, n):
# Needs implementing!
# Tip: random.randint(min, max) can be used to generate random numbers
self._values = [random.randint(1, 6) for i in range(n)]
class AboutDiceProject(Koan):
def test_can_create_a_dice_set(self):
dice = DiceSet()
self.assertTrue(dice)
def test_rolling_the_dice_returns_a_set_of_integers_between_1_and_6(self):
dice = DiceSet()
dice.roll(5)
self.assertTrue(isinstance(dice.values, list), "should be a list")
self.assertEqual(5, len(dice.values))
for value in dice.values:
self.assertTrue(
value >= 1 and value <= 6,
"value " + str(value) + " must be between 1 and 6")
def test_dice_values_do_not_change_unless_explicitly_rolled(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
second_time = dice.values
self.assertEqual(first_time, second_time)
def test_dice_values_should_change_between_rolls(self):
dice = DiceSet()
dice.roll(5)
first_time = dice.values
dice.roll(5)
second_time = dice.values
self.assertNotEqual(first_time, second_time, \
"Two rolls should not be equal")
# THINK ABOUT IT:
#
# If the rolls are random, then it is possible (although not
# likely) that two consecutive rolls are equal. What would be a
# better way to test this?
def test_you_can_roll_different_numbers_of_dice(self):
dice = DiceSet()
dice.roll(3)
self.assertEqual(3, len(dice.values))
dice.roll(1)
self.assertEqual(1, len(dice.values))
|
{
"content_hash": "ce7041b32c3ce51962479080ba15435a",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 27.926470588235293,
"alnum_prop": 0.5945234333859927,
"repo_name": "ruzhytskyi/Koans",
"id": "6c629882aa00112d379b6bdc690ec782e201d5b9",
"size": "1946",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python2/koans/about_dice_project.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "323153"
},
{
"name": "Shell",
"bytes": "1603"
}
],
"symlink_target": ""
}
|
class AbstractIsExaminer(object):
""" Abstract class implemented by all classes where it is natural to
need to check if a user is examiner. """
@classmethod
def q_published(cls, old=True, active=True):
"""
Return a django.models.Q object which matches all items of this type
where :attr:`Assignment.publishing_time` is in the past.
:param old: Include assignments where :attr:`Period.end_time`
is in the past?
:param active: Include assignments where :attr:`Period.end_time`
is in the future?
"""
raise NotImplementedError()
@classmethod
def q_is_examiner(cls, user_obj):
"""
Return a django.models.Q object which matches items
where the given user is examiner.
"""
raise NotImplementedError()
@classmethod
def where_is_examiner(cls, user_obj):
""" Get all items of this type where the given ``user_obj`` is
examiner on one of the assignment groups.
:param user_obj: A User object.
:rtype: QuerySet
"""
return cls.objects.filter(
cls.q_is_examiner(user_obj)
).distinct()
@classmethod
def published_where_is_examiner(cls, user_obj, old=True, active=True):
"""
Get all published items of this type
where the given ``user_obj`` is examiner on one of the assignment
groups. Combines :meth:`q_is_examiner` and :meth:`q_published`.
:param user_obj: :meth:`q_is_examiner`.
:param old: :meth:`q_published`.
:param active: :meth:`q_published`.
:return: A django.db.models.QuerySet with duplicate
assignments eliminated.
"""
return cls.objects.filter(
cls.q_published(old=old, active=active) &
cls.q_is_examiner(user_obj)
).distinct()
@classmethod
def active_where_is_examiner(cls, user_obj):
"""
Shortcut for :meth:`published_where_is_examiner` with
``old=False``.
"""
return cls.published_where_is_examiner(user_obj, old=False,
active=True)
@classmethod
def old_where_is_examiner(cls, user_obj):
"""
Shortcut for :meth:`published_where_is_examiner` with
``active=False``.
"""
return cls.published_where_is_examiner(user_obj, active=False)
|
{
"content_hash": "a5398bf33f4cde97734135c7cedd663e",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 76,
"avg_line_length": 34.29577464788732,
"alnum_prop": 0.5934291581108829,
"repo_name": "devilry/devilry-django",
"id": "11c795c6bcfc73c955e512b7d21282bdfc8f06a2",
"size": "2435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devilry/apps/core/models/abstract_is_examiner.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "513510"
},
{
"name": "Dockerfile",
"bytes": "211"
},
{
"name": "HTML",
"bytes": "421969"
},
{
"name": "JavaScript",
"bytes": "756713"
},
{
"name": "Less",
"bytes": "166670"
},
{
"name": "PLpgSQL",
"bytes": "397986"
},
{
"name": "Python",
"bytes": "6507968"
},
{
"name": "Shell",
"bytes": "10328"
}
],
"symlink_target": ""
}
|
def FileChangeFunction():
""" new version """
|
{
"content_hash": "c839ac5a42106f5cb07ceaaa2b0efc9b",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 25,
"avg_line_length": 25,
"alnum_prop": 0.62,
"repo_name": "lerina/livecoding",
"id": "64e41463c749b453391d73447d6c64d7318bba04",
"size": "57",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts2/fileChange_After.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "111265"
}
],
"symlink_target": ""
}
|
""" Machine limits for Float32 and Float64 and (long double) if available...
"""
__all__ = ['finfo','iinfo']
from machar import MachAr
import numeric
import numerictypes as ntypes
from numeric import array
def _frz(a):
"""fix rank-0 --> rank-1"""
if a.ndim == 0: a.shape = (1,)
return a
_convert_to_float = {
ntypes.csingle: ntypes.single,
ntypes.complex_: ntypes.float_,
ntypes.clongfloat: ntypes.longfloat
}
class finfo(object):
"""
finfo(dtype)
Machine limits for floating point types.
Attributes
----------
eps : float
The smallest representable positive number such that
``1.0 + eps != 1.0``. Type of `eps` is an appropriate floating
point type.
epsneg : floating point number of the appropriate type
The smallest representable positive number such that
``1.0 - epsneg != 1.0``.
iexp : int
The number of bits in the exponent portion of the floating point
representation.
machar : MachAr
The object which calculated these parameters and holds more
detailed information.
machep : int
The exponent that yields `eps`.
max : floating point number of the appropriate type
The largest representable number.
maxexp : int
The smallest positive power of the base (2) that causes overflow.
min : floating point number of the appropriate type
The smallest representable number, typically ``-max``.
minexp : int
The most negative power of the base (2) consistent with there
being no leading 0's in the mantissa.
negep : int
The exponent that yields `epsneg`.
nexp : int
The number of bits in the exponent including its sign and bias.
nmant : int
The number of bits in the mantissa.
precision : int
The approximate number of decimal digits to which this kind of
float is precise.
resolution : floating point number of the appropriate type
The approximate decimal resolution of this type, i.e.,
``10**-precision``.
tiny : float
The smallest positive usable number. Type of `tiny` is an
appropriate floating point type.
Parameters
----------
dtype : float, dtype, or instance
Kind of floating point data-type about which to get information.
See Also
--------
MachAr : The implementation of the tests that produce this information.
iinfo : The equivalent for integer data types.
Notes
-----
For developers of NumPy: do not instantiate this at the module level.
The initial calculation of these parameters is expensive and negatively
impacts import times. These objects are cached, so calling ``finfo()``
repeatedly inside your functions is not a problem.
"""
_finfo_cache = {}
def __new__(cls, dtype):
try:
dtype = numeric.dtype(dtype)
except TypeError:
# In case a float instance was given
dtype = numeric.dtype(type(dtype))
obj = cls._finfo_cache.get(dtype,None)
if obj is not None:
return obj
dtypes = [dtype]
newdtype = numeric.obj2sctype(dtype)
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
if not issubclass(dtype, numeric.inexact):
raise ValueError("data type %r not inexact" % (dtype))
obj = cls._finfo_cache.get(dtype,None)
if obj is not None:
return obj
if not issubclass(dtype, numeric.floating):
newdtype = _convert_to_float[dtype]
if newdtype is not dtype:
dtypes.append(newdtype)
dtype = newdtype
obj = cls._finfo_cache.get(dtype,None)
if obj is not None:
return obj
obj = object.__new__(cls)._init(dtype)
for dt in dtypes:
cls._finfo_cache[dt] = obj
return obj
def _init(self, dtype):
self.dtype = numeric.dtype(dtype)
if dtype is ntypes.double:
itype = ntypes.int64
fmt = '%24.16e'
precname = 'double'
elif dtype is ntypes.single:
itype = ntypes.int32
fmt = '%15.7e'
precname = 'single'
elif dtype is ntypes.longdouble:
itype = ntypes.longlong
fmt = '%s'
precname = 'long double'
elif dtype is ntypes.half:
itype = ntypes.int16
fmt = '%12.5e'
precname = 'half'
else:
raise ValueError(repr(dtype))
machar = MachAr(lambda v:array([v], dtype),
lambda v:_frz(v.astype(itype))[0],
lambda v:array(_frz(v)[0], dtype),
lambda v: fmt % array(_frz(v)[0], dtype),
'numpy %s precision floating point number' % precname)
for word in ['precision', 'iexp',
'maxexp','minexp','negep',
'machep']:
setattr(self,word,getattr(machar, word))
for word in ['tiny','resolution','epsneg']:
setattr(self,word,getattr(machar, word).flat[0])
self.max = machar.huge.flat[0]
self.min = -self.max
self.eps = machar.eps.flat[0]
self.nexp = machar.iexp
self.nmant = machar.it
self.machar = machar
self._str_tiny = machar._str_xmin.strip()
self._str_max = machar._str_xmax.strip()
self._str_epsneg = machar._str_epsneg.strip()
self._str_eps = machar._str_eps.strip()
self._str_resolution = machar._str_resolution.strip()
return self
def __str__(self):
return '''\
Machine parameters for %(dtype)s
---------------------------------------------------------------------
precision=%(precision)3s resolution= %(_str_resolution)s
machep=%(machep)6s eps= %(_str_eps)s
negep =%(negep)6s epsneg= %(_str_epsneg)s
minexp=%(minexp)6s tiny= %(_str_tiny)s
maxexp=%(maxexp)6s max= %(_str_max)s
nexp =%(nexp)6s min= -max
---------------------------------------------------------------------
''' % self.__dict__
class iinfo(object):
"""
iinfo(type)
Machine limits for integer types.
Attributes
----------
min : int
The smallest integer expressible by the type.
max : int
The largest integer expressible by the type.
Parameters
----------
type : integer type, dtype, or instance
The kind of integer data type to get information about.
See Also
--------
finfo : The equivalent for floating point data types.
Examples
--------
With types:
>>> ii16 = np.iinfo(np.int16)
>>> ii16.min
-32768
>>> ii16.max
32767
>>> ii32 = np.iinfo(np.int32)
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
With instances:
>>> ii32 = np.iinfo(np.int32(10))
>>> ii32.min
-2147483648
>>> ii32.max
2147483647
"""
_min_vals = {}
_max_vals = {}
def __init__(self, int_type):
try:
self.dtype = numeric.dtype(int_type)
except TypeError:
self.dtype = numeric.dtype(type(int_type))
self.kind = self.dtype.kind
self.bits = self.dtype.itemsize * 8
self.key = "%s%d" % (self.kind, self.bits)
if not self.kind in 'iu':
raise ValueError("Invalid integer data type.")
def min(self):
"""Minimum value of given dtype."""
if self.kind == 'u':
return 0
else:
try:
val = iinfo._min_vals[self.key]
except KeyError:
val = int(-(1L << (self.bits-1)))
iinfo._min_vals[self.key] = val
return val
min = property(min)
def max(self):
"""Maximum value of given dtype."""
try:
val = iinfo._max_vals[self.key]
except KeyError:
if self.kind == 'u':
val = int((1L << self.bits) - 1)
else:
val = int((1L << (self.bits-1)) - 1)
iinfo._max_vals[self.key] = val
return val
max = property(max)
def __str__(self):
"""String representation."""
return '''\
Machine parameters for %(dtype)s
---------------------------------------------------------------------
min = %(min)s
max = %(max)s
---------------------------------------------------------------------
''' % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
if __name__ == '__main__':
f = finfo(ntypes.single)
print 'single epsilon:',f.eps
print 'single tiny:',f.tiny
f = finfo(ntypes.float)
print 'float epsilon:',f.eps
print 'float tiny:',f.tiny
f = finfo(ntypes.longfloat)
print 'longfloat epsilon:',f.eps
print 'longfloat tiny:',f.tiny
|
{
"content_hash": "cc2c37860000845a72486206b8c80ad1",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 78,
"avg_line_length": 30.48122866894198,
"alnum_prop": 0.5473071324599709,
"repo_name": "matthew-brett/numpy",
"id": "0cb1fd7f40a8aa0f3a73e61f71a73f6cf2c3663c",
"size": "8931",
"binary": false,
"copies": "1",
"ref": "refs/heads/np-doctest-refactor",
"path": "numpy/core/getlimits.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5867348"
},
{
"name": "C++",
"bytes": "272452"
},
{
"name": "Fortran",
"bytes": "14157"
},
{
"name": "Objective-C",
"bytes": "135"
},
{
"name": "Perl",
"bytes": "458"
},
{
"name": "Python",
"bytes": "5193106"
},
{
"name": "Shell",
"bytes": "3545"
}
],
"symlink_target": ""
}
|
import copy
import os
from zopeskel.plone import Plone
from zopeskel.plone2_theme import theme_vars
from zopeskel.base import get_var, EXPERT
from zopeskel.plone2_theme import cleanupStylsheets
class Plone25Theme(Plone):
_template_dir = 'templates/plone2.5_theme'
summary = "A theme for Plone 2.5"
help = """
This creates a project for a theme for Plone 2.5
"""
category = "Plone Theme Development"
required_templates = ['plone']
use_cheetah = True
vars = copy.deepcopy(Plone.vars)
get_var(vars, 'namespace_package').default = 'Products'
get_var(vars, 'description').default = 'An installable theme for Plone 2.5'
get_var(vars, 'keywords').default = 'web zope plone theme'
#add_profile should always default to True for theme packages
get_var(vars, 'add_profile').default = True
#add_profile need not appear as a question for theme packages
get_var(vars, 'add_profile').modes = ()
vars = vars[:3] + theme_vars + vars[3:]
def pre(self, command, output_dir, vars):
if vars['skinname'] == '':
# A title is needed in profiles.zcml otherwise adding a
# Plone Site will throw an error when displaying the
# extension profiles.
vars['skinname'] = 'Custom Theme'
super(Plone25Theme, self).pre(command, output_dir, vars)
def post(self, command, output_dir, vars):
if str(vars['empty_styles']) == 'False':
np, p = vars['namespace_package'], vars['package']
cleanupStylsheets(os.path.join(output_dir, np, p, 'skins'))
super(Plone25Theme, self).post(command, output_dir, vars)
|
{
"content_hash": "8d557b6d7e01a055ab518b1e4baa9a94",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 79,
"avg_line_length": 40.048780487804876,
"alnum_prop": 0.6607795371498173,
"repo_name": "jean/ZopeSkel",
"id": "8a4b00a34e5d29c849bf2d7f2ac533526b02ea8a",
"size": "1642",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zopeskel/plone25_theme.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "189680"
}
],
"symlink_target": ""
}
|
from typing import Sequence, cast, List
import networkx as nx
import numpy as np
import pytest
import cirq
import cirq.contrib.acquaintance as cca
from cirq_google import Sycamore23
from recirq.qaoa.gates_and_compilation import compile_problem_unitary_to_arbitrary_zz, \
compile_driver_unitary_to_rx
from recirq.qaoa.placement import place_line_on_device, place_on_device, \
min_weight_simple_paths_brute_force, min_weight_simple_path_greedy, path_weight, \
min_weight_simple_path_anneal, RouteCQC
from recirq.qaoa.problem_circuits import get_generic_qaoa_circuit
if RouteCQC is NotImplemented:
from recirq.qaoa.placement import pytket
def permute_gate(qubits: Sequence[cirq.Qid], permutation: List[int]):
return cca.LinearPermutationGate(
num_qubits=len(qubits),
permutation={i: permutation[i] for i in range(len(permutation))}
).on(*qubits)
@pytest.mark.skipif(
RouteCQC is NotImplemented and pytket is NotImplemented,
reason='Pytket and RouteCQC are both not installed.'
)
def test_place_on_device():
problem_graph = nx.random_regular_graph(d=3, n=10)
nx.set_edge_attributes(problem_graph, values=1, name='weight')
circuit_qubits = cirq.LineQubit.range(10)
gammas = np.random.randn(2)
betas = np.random.randn(2)
circuit = get_generic_qaoa_circuit(
problem_graph=problem_graph,
qubits=circuit_qubits,
gammas=gammas,
betas=betas)
# TODO: high-level function for getting low-level qaoa circuit
circuit = compile_problem_unitary_to_arbitrary_zz(circuit)
circuit = compile_driver_unitary_to_rx(circuit)
device = Sycamore23
routed_circuit, initial_qubit_map, final_qubit_map = place_on_device(circuit, device)
# Check that constraints are not violated
for _, op in routed_circuit.findall_operations(lambda op: op._num_qubits_() == 2):
a, b = op.qubits
a = cast(cirq.GridQubit, a)
b = cast(cirq.GridQubit, b)
assert a.is_adjacent(b)
def test_min_weight_simple_paths_brute_force():
test_graph = nx.grid_2d_graph(4, 4)
test_graph.remove_node((3, 0))
test_graph.remove_node((0, 3))
for e in test_graph.edges:
test_graph[e[0]][e[1]]['weight'] = np.random.rand()
bp_brute = min_weight_simple_paths_brute_force(test_graph)
for n in range(2, 14):
assert nx.is_simple_path(test_graph, bp_brute[n])
assert 14 not in bp_brute
def test_min_weight_simple_path_greedy():
test_graph = nx.grid_2d_graph(4, 4)
test_graph.remove_node((3, 0))
test_graph.remove_node((0, 3))
for e in test_graph.edges:
test_graph[e[0]][e[1]]['weight'] = np.random.rand()
weights = [w for u, v, w in test_graph.edges.data('weight')]
# it better return the lowest weight edge for a path consisting of 2 nodes
path = min_weight_simple_path_greedy(test_graph, 2)
assert path_weight(test_graph, path) == min(weights)
# it should return simple paths
path = min_weight_simple_path_greedy(test_graph, 5)
assert nx.is_simple_path(test_graph, path)
# there should not exist a simple path of 14 nodes
assert min_weight_simple_path_greedy(test_graph, 14) is None
def test_min_weight_simple_path_anneal():
test_graph = nx.grid_2d_graph(4, 4)
test_graph.remove_node((3, 0))
test_graph.remove_node((0, 3))
for e in test_graph.edges:
test_graph[e[0]][e[1]]['weight'] = np.random.rand()
# it should return simple paths
path = min_weight_simple_path_anneal(test_graph, 5)
assert nx.is_simple_path(test_graph, path)
# there should not exist a simple path of 14 nodes
assert min_weight_simple_path_anneal(test_graph, 14) is None
def _fake_calib_data():
err_graph = Sycamore23.metadata.nx_graph
nx.set_edge_attributes(err_graph, 0.005, 'weight')
nx.set_node_attributes(err_graph, 0.05, 'weight')
return err_graph
@pytest.mark.parametrize('n', [3, 8, 13])
@pytest.mark.parametrize('method', ['brute_force', 'random', 'greedy',
'anneal', 'mst', 'mixed'])
def test_on_device(n, method):
err_graph = _fake_calib_data()
path = place_line_on_device('Sycamore23', n=n,
line_placement_strategy=method,
err_graph=err_graph)
if n == 13:
if method == 'greedy' or method == 'mst':
assert path is None
return
assert nx.is_simple_path(err_graph, path)
|
{
"content_hash": "7ee5477445cb9ddc433edf286cb2a9ec",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 89,
"avg_line_length": 35.30708661417323,
"alnum_prop": 0.6634701159678859,
"repo_name": "quantumlib/ReCirq",
"id": "50ab8cd34f0b50fbd268340bd7677ad717c045ea",
"size": "4484",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recirq/qaoa/placement_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "365"
},
{
"name": "Dockerfile",
"bytes": "300"
},
{
"name": "Jupyter Notebook",
"bytes": "22201"
},
{
"name": "Makefile",
"bytes": "670"
},
{
"name": "Python",
"bytes": "989707"
},
{
"name": "Shell",
"bytes": "2189"
}
],
"symlink_target": ""
}
|
"""Apache plugin constants."""
import pkg_resources
from letsencrypt import le_util
CLI_DEFAULTS_DEBIAN = dict(
server_root="/etc/apache2",
vhost_root="/etc/apache2/sites-available",
vhost_files="*",
version_cmd=['apache2ctl', '-v'],
define_cmd=['apache2ctl', '-t', '-D', 'DUMP_RUN_CFG'],
restart_cmd=['apache2ctl', 'graceful'],
conftest_cmd=['apache2ctl', 'configtest'],
enmod="a2enmod",
dismod="a2dismod",
le_vhost_ext="-le-ssl.conf",
handle_mods=True,
handle_sites=True,
challenge_location="/etc/apache2",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"letsencrypt_apache", "options-ssl-apache.conf")
)
CLI_DEFAULTS_CENTOS = dict(
server_root="/etc/httpd",
vhost_root="/etc/httpd/conf.d",
vhost_files="*.conf",
version_cmd=['apachectl', '-v'],
define_cmd=['apachectl', '-t', '-D', 'DUMP_RUN_CFG'],
restart_cmd=['apachectl', 'graceful'],
conftest_cmd=['apachectl', 'configtest'],
enmod=None,
dismod=None,
le_vhost_ext="-le-ssl.conf",
handle_mods=False,
handle_sites=False,
challenge_location="/etc/httpd/conf.d",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"letsencrypt_apache", "centos-options-ssl-apache.conf")
)
CLI_DEFAULTS_GENTOO = dict(
server_root="/etc/apache2",
vhost_root="/etc/apache2/vhosts.d",
vhost_files="*.conf",
version_cmd=['/usr/sbin/apache2', '-v'],
define_cmd=['/usr/sbin/apache2', '-t', '-D', 'DUMP_RUN_CFG'],
restart_cmd=['apache2ctl', 'graceful'],
conftest_cmd=['apache2ctl', 'configtest'],
enmod=None,
dismod=None,
le_vhost_ext="-le-ssl.conf",
handle_mods=False,
handle_sites=False,
challenge_location="/etc/apache2/vhosts.d",
MOD_SSL_CONF_SRC=pkg_resources.resource_filename(
"letsencrypt_apache", "options-ssl-apache.conf")
)
CLI_DEFAULTS = {
"debian": CLI_DEFAULTS_DEBIAN,
"ubuntu": CLI_DEFAULTS_DEBIAN,
"centos": CLI_DEFAULTS_CENTOS,
"centos linux": CLI_DEFAULTS_CENTOS,
"fedora": CLI_DEFAULTS_CENTOS,
"red hat enterprise linux server": CLI_DEFAULTS_CENTOS,
"gentoo base system": CLI_DEFAULTS_GENTOO
}
"""CLI defaults."""
MOD_SSL_CONF_DEST = "options-ssl-apache.conf"
"""Name of the mod_ssl config file as saved in `IConfig.config_dir`."""
AUGEAS_LENS_DIR = pkg_resources.resource_filename(
"letsencrypt_apache", "augeas_lens")
"""Path to the Augeas lens directory"""
REWRITE_HTTPS_ARGS = [
"^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[L,QSA,R=permanent]"]
"""Apache version<2.3.9 rewrite rule arguments used for redirections to
https vhost"""
REWRITE_HTTPS_ARGS_WITH_END = [
"^", "https://%{SERVER_NAME}%{REQUEST_URI}", "[END,QSA,R=permanent]"]
"""Apache version >= 2.3.9 rewrite rule arguments used for redirections to
https vhost"""
HSTS_ARGS = ["always", "set", "Strict-Transport-Security",
"\"max-age=31536000\""]
"""Apache header arguments for HSTS"""
UIR_ARGS = ["always", "set", "Content-Security-Policy",
"upgrade-insecure-requests"]
HEADER_ARGS = {"Strict-Transport-Security": HSTS_ARGS,
"Upgrade-Insecure-Requests": UIR_ARGS}
def os_constant(key):
"""Get a constant value for operating system
:param key: name of cli constant
:return: value of constant for active os
"""
os_info = le_util.get_os_info()
try:
constants = CLI_DEFAULTS[os_info[0].lower()]
except KeyError:
constants = CLI_DEFAULTS["debian"]
return constants[key]
|
{
"content_hash": "359eae52b566137bd1d84f4a2e566023",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 74,
"avg_line_length": 33.056603773584904,
"alnum_prop": 0.6486872146118722,
"repo_name": "TheBoegl/letsencrypt",
"id": "50156444b063000a8ac2e6392c593871d35220f2",
"size": "3504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "letsencrypt-apache/letsencrypt_apache/constants.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "50413"
},
{
"name": "Augeas",
"bytes": "4997"
},
{
"name": "Batchfile",
"bytes": "35037"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37309"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "1388093"
},
{
"name": "Shell",
"bytes": "104220"
}
],
"symlink_target": ""
}
|
import warnings
from textwrap import indent
import astropy.units as u
import numpy as np
from astropy.constants import c
from astropy.coordinates import (ICRS,
CartesianDifferential,
CartesianRepresentation, SkyCoord)
from astropy.coordinates.spectral_quantity import SpectralQuantity
from astropy.coordinates.baseframe import (BaseCoordinateFrame,
frame_transform_graph)
from astropy.utils.exceptions import AstropyUserWarning
__all__ = ['SpectralCoord']
class NoVelocityWarning(AstropyUserWarning):
pass
class NoDistanceWarning(AstropyUserWarning):
pass
KMS = u.km / u.s
ZERO_VELOCITIES = CartesianDifferential([0, 0, 0] * KMS)
# Default distance to use for target when none is provided
DEFAULT_DISTANCE = 1e6 * u.kpc
# We don't want to run doctests in the docstrings we inherit from Quantity
__doctest_skip__ = ['SpectralCoord.*']
def _apply_relativistic_doppler_shift(scoord, velocity):
"""
Given a `SpectralQuantity` and a velocity, return a new `SpectralQuantity`
that is Doppler shifted by this amount.
Note that the Doppler shift applied is the full relativistic one, so
`SpectralQuantity` currently expressed in velocity and not using the
relativistic convention will temporarily be converted to use the
relativistic convention while the shift is applied.
Positive velocities are assumed to redshift the spectral quantity,
while negative velocities blueshift the spectral quantity.
"""
# NOTE: we deliberately don't keep sub-classes of SpectralQuantity intact
# since we can't guarantee that their metadata would be correct/consistent.
squantity = scoord.view(SpectralQuantity)
beta = velocity / c
doppler_factor = np.sqrt((1 + beta) / (1 - beta))
if squantity.unit.is_equivalent(u.m): # wavelength
return squantity * doppler_factor
elif (squantity.unit.is_equivalent(u.Hz) or
squantity.unit.is_equivalent(u.eV) or
squantity.unit.is_equivalent(1 / u.m)):
return squantity / doppler_factor
elif squantity.unit.is_equivalent(KMS): # velocity
return (squantity.to(u.Hz) / doppler_factor).to(squantity.unit)
else: # pragma: no cover
raise RuntimeError(f"Unexpected units in velocity shift: {squantity.unit}. "
"This should not happen, so please report this in the "
"astropy issue tracker!")
def update_differentials_to_match(original, velocity_reference, preserve_observer_frame=False):
"""
Given an original coordinate object, update the differentials so that
the final coordinate is at the same location as the original coordinate
but co-moving with the velocity reference object.
If preserve_original_frame is set to True, the resulting object will be in
the frame of the original coordinate, otherwise it will be in the frame of
the velocity reference.
"""
if not velocity_reference.data.differentials:
raise ValueError("Reference frame has no velocities")
# If the reference has an obstime already defined, we should ignore
# it and stick with the original observer obstime.
if 'obstime' in velocity_reference.frame_attributes and hasattr(original, 'obstime'):
velocity_reference = velocity_reference.replicate(obstime=original.obstime)
# We transform both coordinates to ICRS for simplicity and because we know
# it's a simple frame that is not time-dependent (it could be that both
# the original and velocity_reference frame are time-dependent)
original_icrs = original.transform_to(ICRS())
velocity_reference_icrs = velocity_reference.transform_to(ICRS())
differentials = velocity_reference_icrs.data.represent_as(CartesianRepresentation,
CartesianDifferential).differentials
data_with_differentials = (original_icrs.data.represent_as(CartesianRepresentation)
.with_differentials(differentials))
final_icrs = original_icrs.realize_frame(data_with_differentials)
if preserve_observer_frame:
final = final_icrs.transform_to(original)
else:
final = final_icrs.transform_to(velocity_reference)
return final.replicate(representation_type=CartesianRepresentation,
differential_type=CartesianDifferential)
def attach_zero_velocities(coord):
"""
Set the differentials to be stationary on a coordinate object.
"""
new_data = coord.cartesian.with_differentials(ZERO_VELOCITIES)
return coord.realize_frame(new_data)
def _get_velocities(coord):
if 's' in coord.data.differentials:
return coord.velocity
else:
return ZERO_VELOCITIES
class SpectralCoord(SpectralQuantity):
"""
A spectral coordinate with its corresponding unit.
.. note:: The |SpectralCoord| class is new in Astropy v4.1 and should be
considered experimental at this time. Note that we do not fully
support cases where the observer and target are moving
relativistically relative to each other, so care should be taken
in those cases. It is possible that there will be API changes in
future versions of Astropy based on user feedback. If you have
specific ideas for how it might be improved, please let us know
on the `astropy-dev mailing list`_ or at
http://feedback.astropy.org.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : unit-like
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer. If no velocities
are present on this object, the observer is assumed to be stationary
relative to the frame origin.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target. If no velocities
are present on this object, the target is assumed to be stationary
relative to the frame origin.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The radial velocity of the target with respect to the observer. This
can only be specified if ``redshift`` is not specified.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
This can only be specified if ``radial_velocity`` cannot be specified.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
"""
@u.quantity_input(radial_velocity=u.km/u.s)
def __new__(cls, value, unit=None,
observer=None, target=None,
radial_velocity=None, redshift=None,
**kwargs):
obj = super().__new__(cls, value, unit=unit, **kwargs)
# There are two main modes of operation in this class. Either the
# observer and target are both defined, in which case the radial
# velocity and redshift are automatically computed from these, or
# only one of the observer and target are specified, along with a
# manually specified radial velocity or redshift. So if a target and
# observer are both specified, we can't also accept a radial velocity
# or redshift.
if target is not None and observer is not None:
if radial_velocity is not None or redshift is not None:
raise ValueError("Cannot specify radial velocity or redshift if both "
"target and observer are specified")
# We only deal with redshifts here and in the redshift property.
# Otherwise internally we always deal with velocities.
if redshift is not None:
if radial_velocity is not None:
raise ValueError("Cannot set both a radial velocity and redshift")
redshift = u.Quantity(redshift)
# For now, we can't specify redshift=u.one in quantity_input above
# and have it work with plain floats, but if that is fixed, for
# example as in https://github.com/astropy/astropy/pull/10232, we
# can remove the check here and add redshift=u.one to the decorator
if not redshift.unit.is_equivalent(u.one):
raise u.UnitsError('redshift should be dimensionless')
radial_velocity = redshift.to(u.km / u.s, u.doppler_redshift())
# If we're initializing from an existing SpectralCoord, keep any
# parameters that aren't being overridden
if observer is None:
observer = getattr(value, 'observer', None)
if target is None:
target = getattr(value, 'target', None)
# As mentioned above, we should only specify the radial velocity
# manually if either or both the observer and target are not
# specified.
if observer is None or target is None:
if radial_velocity is None:
radial_velocity = getattr(value, 'radial_velocity', None)
obj._radial_velocity = radial_velocity
obj._observer = cls._validate_coordinate(observer, label='observer')
obj._target = cls._validate_coordinate(target, label='target')
return obj
def __array_finalize__(self, obj):
super().__array_finalize__(obj)
self._radial_velocity = getattr(obj, '_radial_velocity', None)
self._observer = getattr(obj, '_observer', None)
self._target = getattr(obj, '_target', None)
@staticmethod
def _validate_coordinate(coord, label=''):
"""
Checks the type of the frame and whether a velocity differential and a
distance has been defined on the frame object.
If no distance is defined, the target is assumed to be "really far
away", and the observer is assumed to be "in the solar system".
Parameters
----------
coord : `~astropy.coordinates.BaseCoordinateFrame`
The new frame to be used for target or observer.
label : str, optional
The name of the object being validated (e.g. 'target' or 'observer'),
which is then used in error messages.
"""
if coord is None:
return
if not issubclass(coord.__class__, BaseCoordinateFrame):
if isinstance(coord, SkyCoord):
coord = coord.frame
else:
raise TypeError(f"{label} must be a SkyCoord or coordinate frame instance")
# If the distance is not well-defined, ensure that it works properly
# for generating differentials
# TODO: change this to not set the distance and yield a warning once
# there's a good way to address this in astropy.coordinates
# https://github.com/astropy/astropy/issues/10247
with np.errstate(all='ignore'):
distance = getattr(coord, 'distance', None)
if distance is not None and distance.unit.physical_type == 'dimensionless':
coord = SkyCoord(coord, distance=DEFAULT_DISTANCE)
warnings.warn(
"Distance on coordinate object is dimensionless, an "
f"arbitrary distance value of {DEFAULT_DISTANCE} will be set instead.",
NoDistanceWarning)
# If the observer frame does not contain information about the
# velocity of the system, assume that the velocity is zero in the
# system.
if 's' not in coord.data.differentials:
warnings.warn(
f"No velocity defined on frame, assuming {ZERO_VELOCITIES}.",
NoVelocityWarning)
coord = attach_zero_velocities(coord)
return coord
def replicate(self, value=None, unit=None,
observer=None, target=None,
radial_velocity=None, redshift=None,
doppler_convention=None, doppler_rest=None,
copy=False):
"""
Return a replica of the `SpectralCoord`, optionally changing the
values or attributes.
Note that no conversion is carried out by this method - this keeps
all the values and attributes the same, except for the ones explicitly
passed to this method which are changed.
If ``copy`` is set to `True` then a full copy of the internal arrays
will be made. By default the replica will use a reference to the
original arrays when possible to save memory.
Parameters
----------
value : ndarray or `~astropy.units.Quantity` or `SpectralCoord`, optional
Spectral values, which should be either wavelength, frequency,
energy, wavenumber, or velocity values.
unit : unit-like
Unit for the given spectral values.
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of observer.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`, optional
The coordinate (position and velocity) of target.
radial_velocity : `~astropy.units.Quantity` ['speed'], optional
The radial velocity of the target with respect to the observer.
redshift : float, optional
The relativistic redshift of the target with respect to the observer.
doppler_rest : `~astropy.units.Quantity`, optional
The rest value to use when expressing the spectral value as a velocity.
doppler_convention : str, optional
The Doppler convention to use when expressing the spectral value as a velocity.
copy : bool, optional
If `True`, and ``value`` is not specified, the values are copied to
the new `SkyCoord` - otherwise a reference to the same values is used.
Returns
-------
sc : `SpectralCoord` object
Replica of this object
"""
if isinstance(value, u.Quantity):
if unit is not None:
raise ValueError("Cannot specify value as a Quantity and also specify unit")
else:
value, unit = value.value, value.unit
value = value if value is not None else self.value
unit = unit or self.unit
observer = self._validate_coordinate(observer) or self.observer
target = self._validate_coordinate(target) or self.target
doppler_convention = doppler_convention or self.doppler_convention
doppler_rest = doppler_rest or self.doppler_rest
# If value is being taken from self and copy is Tru
if copy:
value = value.copy()
# Only include radial_velocity if it is not auto-computed from the
# observer and target.
if (self.observer is None or self.target is None) and radial_velocity is None and redshift is None:
radial_velocity = self.radial_velocity
with warnings.catch_warnings():
warnings.simplefilter('ignore', NoVelocityWarning)
return self.__class__(value=value, unit=unit,
observer=observer, target=target,
radial_velocity=radial_velocity, redshift=redshift,
doppler_convention=doppler_convention, doppler_rest=doppler_rest, copy=False)
@property
def quantity(self):
"""
Convert the ``SpectralCoord`` to a `~astropy.units.Quantity`.
Equivalent to ``self.view(u.Quantity)``.
Returns
-------
`~astropy.units.Quantity`
This object viewed as a `~astropy.units.Quantity`.
"""
return self.view(u.Quantity)
@property
def observer(self):
"""
The coordinates of the observer.
If set, and a target is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the observation.
"""
return self._observer
@observer.setter
def observer(self, value):
if self.observer is not None:
raise ValueError("observer has already been set")
self._observer = self._validate_coordinate(value, label='observer')
# Switch to auto-computing radial velocity
if self._target is not None:
self._radial_velocity = None
@property
def target(self):
"""
The coordinates of the target being observed.
If set, and an observer is set as well, this will override any explicit
radial velocity passed in.
Returns
-------
`~astropy.coordinates.BaseCoordinateFrame`
The astropy coordinate frame representing the target.
"""
return self._target
@target.setter
def target(self, value):
if self.target is not None:
raise ValueError("target has already been set")
self._target = self._validate_coordinate(value, label='target')
# Switch to auto-computing radial velocity
if self._observer is not None:
self._radial_velocity = None
@property
def radial_velocity(self):
"""
Radial velocity of target relative to the observer.
Returns
-------
`~astropy.units.Quantity` ['speed']
Radial velocity of target.
Notes
-----
This is different from the ``.radial_velocity`` property of a
coordinate frame in that this calculates the radial velocity with
respect to the *observer*, not the origin of the frame.
"""
if self._observer is None or self._target is None:
if self._radial_velocity is None:
return 0 * KMS
else:
return self._radial_velocity
else:
return self._calculate_radial_velocity(self._observer, self._target,
as_scalar=True)
@property
def redshift(self):
"""
Redshift of target relative to observer. Calculated from the radial
velocity.
Returns
-------
`astropy.units.Quantity`
Redshift of target.
"""
return self.radial_velocity.to(u.dimensionless_unscaled, u.doppler_redshift())
@staticmethod
def _calculate_radial_velocity(observer, target, as_scalar=False):
"""
Compute the line-of-sight velocity from the observer to the target.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the observer.
target : `~astropy.coordinates.BaseCoordinateFrame`
The frame of the target.
as_scalar : bool
If `True`, the magnitude of the velocity vector will be returned,
otherwise the full vector will be returned.
Returns
-------
`~astropy.units.Quantity` ['speed']
The radial velocity of the target with respect to the observer.
"""
# Convert observer and target to ICRS to avoid finite differencing
# calculations that lack numerical precision.
observer_icrs = observer.transform_to(ICRS())
target_icrs = target.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
d_vel = target_icrs.velocity - observer_icrs.velocity
vel_mag = pos_hat.dot(d_vel)
if as_scalar:
return vel_mag
else:
return vel_mag * pos_hat
@staticmethod
def _normalized_position_vector(observer, target):
"""
Calculate the normalized position vector between two frames.
Parameters
----------
observer : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame or coordinate.
target : `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The target frame or coordinate.
Returns
-------
pos_hat : `BaseRepresentation`
Position representation.
"""
d_pos = (target.cartesian.without_differentials() -
observer.cartesian.without_differentials())
dp_norm = d_pos.norm()
# Reset any that are 0 to 1 to avoid nans from 0/0
dp_norm[dp_norm == 0] = 1 * dp_norm.unit
pos_hat = d_pos / dp_norm
return pos_hat
@u.quantity_input(velocity=u.km/u.s)
def with_observer_stationary_relative_to(self, frame, velocity=None, preserve_observer_frame=False):
"""
A new `SpectralCoord` with the velocity of the observer altered,
but not the position.
If a coordinate frame is specified, the observer velocities will be
modified to be stationary in the specified frame. If a coordinate
instance is specified, optionally with non-zero velocities, the
observer velocities will be updated so that the observer is co-moving
with the specified coordinates.
Parameters
----------
frame : str, `~astropy.coordinates.BaseCoordinateFrame` or `~astropy.coordinates.SkyCoord`
The observation frame in which the observer will be stationary. This
can be the name of a frame (e.g. 'icrs'), a frame class, frame instance
with no data, or instance with data. This can optionally include
velocities.
velocity : `~astropy.units.Quantity` or `~astropy.coordinates.CartesianDifferential`, optional
If ``frame`` does not contain velocities, these can be specified as
a 3-element `~astropy.units.Quantity`. In the case where this is
also not specified, the velocities default to zero.
preserve_observer_frame : bool
If `True`, the final observer frame class will be the same as the
original one, and if `False` it will be the frame of the velocity
reference class.
Returns
-------
new_coord : `SpectralCoord`
The new coordinate object representing the spectral data
transformed based on the observer's new velocity frame.
"""
if self.observer is None or self.target is None:
raise ValueError("This method can only be used if both observer "
"and target are defined on the SpectralCoord.")
# Start off by extracting frame if a SkyCoord was passed in
if isinstance(frame, SkyCoord):
frame = frame.frame
if isinstance(frame, BaseCoordinateFrame):
if not frame.has_data:
frame = frame.realize_frame(CartesianRepresentation(0 * u.km, 0 * u.km, 0 * u.km))
if frame.data.differentials:
if velocity is not None:
raise ValueError('frame already has differentials, cannot also specify velocity')
# otherwise frame is ready to go
else:
if velocity is None:
differentials = ZERO_VELOCITIES
else:
differentials = CartesianDifferential(velocity)
frame = frame.realize_frame(frame.data.with_differentials(differentials))
if isinstance(frame, (type, str)):
if isinstance(frame, type):
frame_cls = frame
elif isinstance(frame, str):
frame_cls = frame_transform_graph.lookup_name(frame)
if velocity is None:
velocity = 0 * u.m / u.s, 0 * u.m / u.s, 0 * u.m / u.s
elif velocity.shape != (3,):
raise ValueError('velocity should be a Quantity vector with 3 elements')
frame = frame_cls(0 * u.m, 0 * u.m, 0 * u.m,
*velocity,
representation_type='cartesian',
differential_type='cartesian')
observer = update_differentials_to_match(self.observer, frame,
preserve_observer_frame=preserve_observer_frame)
# Calculate the initial and final los velocity
init_obs_vel = self._calculate_radial_velocity(self.observer, self.target, as_scalar=True)
fin_obs_vel = self._calculate_radial_velocity(observer, self.target, as_scalar=True)
# Apply transformation to data
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
new_coord = self.replicate(value=new_data, observer=observer)
return new_coord
def with_radial_velocity_shift(self, target_shift=None, observer_shift=None):
"""
Apply a velocity shift to this spectral coordinate.
The shift can be provided as a redshift (float value) or radial
velocity (`~astropy.units.Quantity` with physical type of 'speed').
Parameters
----------
target_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current target.
observer_shift : float or `~astropy.units.Quantity` ['speed']
Shift value to apply to current observer.
Returns
-------
`SpectralCoord`
New spectral coordinate with the target/observer velocity changed
to incorporate the shift. This is always a new object even if
``target_shift`` and ``observer_shift`` are both `None`.
"""
if observer_shift is not None and (self.target is None or
self.observer is None):
raise ValueError("Both an observer and target must be defined "
"before applying a velocity shift.")
for arg in [x for x in [target_shift, observer_shift] if x is not None]:
if isinstance(arg, u.Quantity) and not arg.unit.is_equivalent((u.one, KMS)):
raise u.UnitsError("Argument must have unit physical type "
"'speed' for radial velocty or "
"'dimensionless' for redshift.")
# The target or observer value is defined but is not a quantity object,
# assume it's a redshift float value and convert to velocity
if target_shift is None:
if self._observer is None or self._target is None:
return self.replicate()
target_shift = 0 * KMS
else:
target_shift = u.Quantity(target_shift)
if target_shift.unit.physical_type == 'dimensionless':
target_shift = target_shift.to(u.km / u.s, u.doppler_redshift())
if self._observer is None or self._target is None:
return self.replicate(value=_apply_relativistic_doppler_shift(self, target_shift),
radial_velocity=self.radial_velocity + target_shift)
if observer_shift is None:
observer_shift = 0 * KMS
else:
observer_shift = u.Quantity(observer_shift)
if observer_shift.unit.physical_type == 'dimensionless':
observer_shift = observer_shift.to(u.km / u.s, u.doppler_redshift())
target_icrs = self._target.transform_to(ICRS())
observer_icrs = self._observer.transform_to(ICRS())
pos_hat = SpectralCoord._normalized_position_vector(observer_icrs, target_icrs)
target_velocity = _get_velocities(target_icrs) + target_shift * pos_hat
observer_velocity = _get_velocities(observer_icrs) + observer_shift * pos_hat
target_velocity = CartesianDifferential(target_velocity.xyz)
observer_velocity = CartesianDifferential(observer_velocity.xyz)
new_target = (target_icrs
.realize_frame(target_icrs.cartesian.with_differentials(target_velocity))
.transform_to(self._target))
new_observer = (observer_icrs
.realize_frame(observer_icrs.cartesian.with_differentials(observer_velocity))
.transform_to(self._observer))
init_obs_vel = self._calculate_radial_velocity(observer_icrs, target_icrs, as_scalar=True)
fin_obs_vel = self._calculate_radial_velocity(new_observer, new_target, as_scalar=True)
new_data = _apply_relativistic_doppler_shift(self, fin_obs_vel - init_obs_vel)
return self.replicate(value=new_data,
observer=new_observer,
target=new_target)
def to_rest(self):
"""
Transforms the spectral axis to the rest frame.
"""
if self.observer is not None and self.target is not None:
return self.with_observer_stationary_relative_to(self.target)
result = _apply_relativistic_doppler_shift(self, -self.radial_velocity)
return self.replicate(value=result, radial_velocity=0. * KMS, redshift=None)
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
try:
radial_velocity = self.radial_velocity
redshift = self.redshift
except ValueError:
radial_velocity = redshift = 'Undefined'
repr_items = [f'{prefixstr}']
if self.observer is not None:
observer_repr = indent(repr(self.observer), 14 * ' ').lstrip()
repr_items.append(f' observer: {observer_repr}')
if self.target is not None:
target_repr = indent(repr(self.target), 12 * ' ').lstrip()
repr_items.append(f' target: {target_repr}')
if (self._observer is not None and self._target is not None) or self._radial_velocity is not None:
if self.observer is not None and self.target is not None:
repr_items.append(' observer to target (computed from above):')
else:
repr_items.append(' observer to target:')
repr_items.append(f' radial_velocity={radial_velocity}')
repr_items.append(f' redshift={redshift}')
if self.doppler_rest is not None or self.doppler_convention is not None:
repr_items.append(f' doppler_rest={self.doppler_rest}')
repr_items.append(f' doppler_convention={self.doppler_convention}')
arrstr = np.array2string(self.view(np.ndarray), separator=', ',
prefix=' ')
if len(repr_items) == 1:
repr_items[0] += f'{arrstr}{self._unitstr:s}'
else:
repr_items[1] = ' (' + repr_items[1].lstrip()
repr_items[-1] += ')'
repr_items.append(f' {arrstr}{self._unitstr:s}')
return '\n'.join(repr_items) + '>'
|
{
"content_hash": "1b5339c55ea77220ebbf789a178a2aaf",
"timestamp": "",
"source": "github",
"line_count": 754,
"max_line_length": 111,
"avg_line_length": 41.84615384615385,
"alnum_prop": 0.6232251521298174,
"repo_name": "saimn/astropy",
"id": "829698bbb8c621e836d0f8b3962dbfb231b107a3",
"size": "31552",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astropy/coordinates/spectral_coordinate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11034753"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78631"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52457"
},
{
"name": "Python",
"bytes": "12214998"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
"""
Views which allow users to create and activate accounts.
"""
from django.shortcuts import redirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from registration.backends import get_backend
from seahub.settings import USER_PASSWORD_MIN_LENGTH, \
USER_STRONG_PASSWORD_REQUIRED, USER_PASSWORD_STRENGTH_LEVEL
def activate(request, backend,
template_name='registration/activate.html',
success_url=None, extra_context=None, **kwargs):
"""
Activate a user's account.
The actual activation of the account will be delegated to the
backend specified by the ``backend`` keyword argument (see below);
the backend's ``activate()`` method will be called, passing any
keyword arguments captured from the URL, and will be assumed to
return a ``User`` if activation was successful, or a value which
evaluates to ``False`` in boolean context if not.
Upon successful activation, the backend's
``post_activation_redirect()`` method will be called, passing the
``HttpRequest`` and the activated ``User`` to determine the URL to
redirect the user to. To override this, pass the argument
``success_url`` (see below).
On unsuccessful activation, will render the template
``registration/activate.html`` to display an error message; to
override thise, pass the argument ``template_name`` (see below).
**Arguments**
``backend``
The dotted Python import path to the backend class to
use. Required.
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context. Optional.
``success_url``
The name of a URL pattern to redirect to on successful
acivation. This is optional; if not specified, this will be
obtained by calling the backend's
``post_activation_redirect()`` method.
``template_name``
A custom template to use. This is optional; if not specified,
this will default to ``registration/activate.html``.
``\*\*kwargs``
Any keyword arguments captured from the URL, such as an
activation key, which will be passed to the backend's
``activate()`` method.
**Context:**
The context will be populated from the keyword arguments captured
in the URL, and any extra variables supplied in the
``extra_context`` argument (see above).
**Template:**
registration/activate.html or ``template_name`` keyword argument.
"""
backend = get_backend(backend)
account = backend.activate(request, **kwargs)
if account:
if success_url is None:
to, args, kwargs = backend.post_activation_redirect(request, account)
return redirect(to, *args, **kwargs)
else:
return redirect(success_url)
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
return render_to_response(template_name,
kwargs,
context_instance=context)
def register(request, backend, success_url=None, form_class=None,
disallowed_url='registration_disallowed',
template_name='registration/registration_form.html',
extra_context=None):
"""
Allow a new user to register an account.
The actual registration of the account will be delegated to the
backend specified by the ``backend`` keyword argument (see below);
it will be used as follows:
1. The backend's ``registration_allowed()`` method will be called,
passing the ``HttpRequest``, to determine whether registration
of an account is to be allowed; if not, a redirect is issued to
the view corresponding to the named URL pattern
``registration_disallowed``. To override this, see the list of
optional arguments for this view (below).
2. The form to use for account registration will be obtained by
calling the backend's ``get_form_class()`` method, passing the
``HttpRequest``. To override this, see the list of optional
arguments for this view (below).
3. If valid, the form's ``cleaned_data`` will be passed (as
keyword arguments, and along with the ``HttpRequest``) to the
backend's ``register()`` method, which should return the new
``User`` object.
4. Upon successful registration, the backend's
``post_registration_redirect()`` method will be called, passing
the ``HttpRequest`` and the new ``User``, to determine the URL
to redirect the user to. To override this, see the list of
optional arguments for this view (below).
**Required arguments**
None.
**Optional arguments**
``backend``
The dotted Python import path to the backend class to use.
``disallowed_url``
URL to redirect to if registration is not permitted for the
current ``HttpRequest``. Must be a value which can legally be
passed to ``django.shortcuts.redirect``. If not supplied, this
will be whatever URL corresponds to the named URL pattern
``registration_disallowed``.
``form_class``
The form class to use for registration. If not supplied, this
will be retrieved from the registration backend.
``extra_context``
A dictionary of variables to add to the template context. Any
callable object in this dictionary will be called to produce
the end result which appears in the context.
``success_url``
URL to redirect to after successful registration. Must be a
value which can legally be passed to
``django.shortcuts.redirect``. If not supplied, this will be
retrieved from the registration backend.
``template_name``
A custom template to use. If not supplied, this will default
to ``registration/registration_form.html``.
**Context:**
``form``
The registration form.
Any extra variables supplied in the ``extra_context`` argument
(see above).
**Template:**
registration/registration_form.html or ``template_name`` keyword
argument.
"""
backend = get_backend(backend)
if not backend.registration_allowed(request):
return redirect(disallowed_url)
if form_class is None:
form_class = backend.get_form_class(request)
if request.method == 'POST':
form = form_class(data=request.POST, files=request.FILES)
if form.is_valid():
new_user = backend.register(request, **form.cleaned_data)
if success_url is None:
to, args, kwargs = backend.post_registration_redirect(request, new_user)
return redirect(to, *args, **kwargs)
else:
return redirect(success_url)
else:
userid = request.REQUEST.get('userid', '')
form = form_class(initial={'userid': userid })
if extra_context is None:
extra_context = {}
context = RequestContext(request)
for key, value in extra_context.items():
context[key] = callable(value) and value() or value
src = request.GET.get('src', '')
if src:
form = form_class(initial={'email': src})
return render_to_response(template_name, {
'form': form,
'min_len': USER_PASSWORD_MIN_LENGTH,
'strong_pwd_required': USER_STRONG_PASSWORD_REQUIRED,
'level': USER_PASSWORD_STRENGTH_LEVEL,
}, context_instance=context)
|
{
"content_hash": "dfe8b6a9990f1401633ae9d0782174a1",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 88,
"avg_line_length": 36.305164319248824,
"alnum_prop": 0.6584766584766585,
"repo_name": "cloudcopy/seahub",
"id": "dc36c4f9c6a7aec44f60e7c037a6592d5461c652",
"size": "7733",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "thirdpart/registration/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "231001"
},
{
"name": "HTML",
"bytes": "756152"
},
{
"name": "JavaScript",
"bytes": "2430927"
},
{
"name": "PLSQL",
"bytes": "16796"
},
{
"name": "Python",
"bytes": "1508638"
},
{
"name": "Shell",
"bytes": "9365"
}
],
"symlink_target": ""
}
|
import android_commands
import constants
import logging
import os
import subprocess
import time
class FakeDns(object):
"""Wrapper class for the fake_dns tool."""
_FAKE_DNS_PATH = constants.TEST_EXECUTABLE_DIR + '/fake_dns'
def __init__(self, adb):
"""
Args:
adb: the AndroidCommands to use.
"""
self._adb = adb
self._fake_dns = None
self._original_dns = None
def _PushAndStartFakeDns(self):
"""Starts the fake_dns server that replies all name queries 127.0.0.1.
Returns:
subprocess instance connected to the fake_dns process on the device.
"""
self._adb.PushIfNeeded(
os.path.join(constants.GetOutDirectory(), 'fake_dns'),
FakeDns._FAKE_DNS_PATH)
return subprocess.Popen(
['adb', '-s', self._adb._adb.GetSerialNumber(),
'shell', '%s -D' % FakeDns._FAKE_DNS_PATH])
def SetUp(self):
"""Configures the system to point to a DNS server that replies 127.0.0.1.
This can be used in combination with the forwarder to forward all web
traffic to a replay server.
The TearDown() method will perform all cleanup.
"""
self._adb.RunShellCommand('ip route add 8.8.8.0/24 via 127.0.0.1 dev lo')
self._fake_dns = self._PushAndStartFakeDns()
self._original_dns = self._adb.RunShellCommand('getprop net.dns1')[0]
self._adb.RunShellCommand('setprop net.dns1 127.0.0.1')
time.sleep(2) # Time for server to start and the setprop to take effect.
def TearDown(self):
"""Shuts down the fake_dns."""
if self._fake_dns:
if not self._original_dns or self._original_dns == '127.0.0.1':
logging.warning('Bad original DNS, falling back to Google DNS.')
self._original_dns = '8.8.8.8'
self._adb.RunShellCommand('setprop net.dns1 %s' % self._original_dns)
self._fake_dns.kill()
self._adb.RunShellCommand('ip route del 8.8.8.0/24 via 127.0.0.1 dev lo')
|
{
"content_hash": "236dd2bc41e1e6dfeea40e8b5eea28ae",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 79,
"avg_line_length": 33.719298245614034,
"alnum_prop": 0.6529656607700313,
"repo_name": "mogoweb/chromium-crosswalk",
"id": "c0e12e1dd311b49eb3f546dfb75bebb0e3a1e5d4",
"size": "2089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/android/pylib/fake_dns.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "54831"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "40940503"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "182703853"
},
{
"name": "CSS",
"bytes": "799795"
},
{
"name": "DOT",
"bytes": "1873"
},
{
"name": "Java",
"bytes": "4807735"
},
{
"name": "JavaScript",
"bytes": "20714038"
},
{
"name": "Mercury",
"bytes": "10299"
},
{
"name": "Objective-C",
"bytes": "985558"
},
{
"name": "Objective-C++",
"bytes": "6205987"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "1213389"
},
{
"name": "Python",
"bytes": "9735121"
},
{
"name": "Rebol",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1305641"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "14650"
}
],
"symlink_target": ""
}
|
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class NumberList(ListResource):
""" """
def __init__(self, version):
"""
Initialize the NumberList
:param Version version: Version that contains the resource
:returns: twilio.rest.pricing.v1.voice.number.NumberList
:rtype: twilio.rest.pricing.v1.voice.number.NumberList
"""
super(NumberList, self).__init__(version)
# Path Solution
self._solution = {}
def get(self, number):
"""
Constructs a NumberContext
:param number: The phone number to fetch
:returns: twilio.rest.pricing.v1.voice.number.NumberContext
:rtype: twilio.rest.pricing.v1.voice.number.NumberContext
"""
return NumberContext(self._version, number=number, )
def __call__(self, number):
"""
Constructs a NumberContext
:param number: The phone number to fetch
:returns: twilio.rest.pricing.v1.voice.number.NumberContext
:rtype: twilio.rest.pricing.v1.voice.number.NumberContext
"""
return NumberContext(self._version, number=number, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Pricing.V1.NumberList>'
class NumberPage(Page):
""" """
def __init__(self, version, response, solution):
"""
Initialize the NumberPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:returns: twilio.rest.pricing.v1.voice.number.NumberPage
:rtype: twilio.rest.pricing.v1.voice.number.NumberPage
"""
super(NumberPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of NumberInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.pricing.v1.voice.number.NumberInstance
:rtype: twilio.rest.pricing.v1.voice.number.NumberInstance
"""
return NumberInstance(self._version, payload, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Pricing.V1.NumberPage>'
class NumberContext(InstanceContext):
""" """
def __init__(self, version, number):
"""
Initialize the NumberContext
:param Version version: Version that contains the resource
:param number: The phone number to fetch
:returns: twilio.rest.pricing.v1.voice.number.NumberContext
:rtype: twilio.rest.pricing.v1.voice.number.NumberContext
"""
super(NumberContext, self).__init__(version)
# Path Solution
self._solution = {'number': number, }
self._uri = '/Voice/Numbers/{number}'.format(**self._solution)
def fetch(self):
"""
Fetch a NumberInstance
:returns: Fetched NumberInstance
:rtype: twilio.rest.pricing.v1.voice.number.NumberInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return NumberInstance(self._version, payload, number=self._solution['number'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Pricing.V1.NumberContext {}>'.format(context)
class NumberInstance(InstanceResource):
""" """
def __init__(self, version, payload, number=None):
"""
Initialize the NumberInstance
:returns: twilio.rest.pricing.v1.voice.number.NumberInstance
:rtype: twilio.rest.pricing.v1.voice.number.NumberInstance
"""
super(NumberInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'number': payload.get('number'),
'country': payload.get('country'),
'iso_country': payload.get('iso_country'),
'outbound_call_price': payload.get('outbound_call_price'),
'inbound_call_price': payload.get('inbound_call_price'),
'price_unit': payload.get('price_unit'),
'url': payload.get('url'),
}
# Context
self._context = None
self._solution = {'number': number or self._properties['number'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: NumberContext for this NumberInstance
:rtype: twilio.rest.pricing.v1.voice.number.NumberContext
"""
if self._context is None:
self._context = NumberContext(self._version, number=self._solution['number'], )
return self._context
@property
def number(self):
"""
:returns: The phone number
:rtype: unicode
"""
return self._properties['number']
@property
def country(self):
"""
:returns: The name of the country
:rtype: unicode
"""
return self._properties['country']
@property
def iso_country(self):
"""
:returns: The ISO country code
:rtype: unicode
"""
return self._properties['iso_country']
@property
def outbound_call_price(self):
"""
:returns: The OutboundCallPrice record
:rtype: unicode
"""
return self._properties['outbound_call_price']
@property
def inbound_call_price(self):
"""
:returns: The InboundCallPrice record
:rtype: unicode
"""
return self._properties['inbound_call_price']
@property
def price_unit(self):
"""
:returns: The currency in which prices are measured, in ISO 4127 format (e.g. usd, eur, jpy)
:rtype: unicode
"""
return self._properties['price_unit']
@property
def url(self):
"""
:returns: The absolute URL of the resource
:rtype: unicode
"""
return self._properties['url']
def fetch(self):
"""
Fetch a NumberInstance
:returns: Fetched NumberInstance
:rtype: twilio.rest.pricing.v1.voice.number.NumberInstance
"""
return self._proxy.fetch()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Pricing.V1.NumberInstance {}>'.format(context)
|
{
"content_hash": "4bdcded5cfbca95fc73adc4e6bd8d1bb",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 100,
"avg_line_length": 28.098859315589355,
"alnum_prop": 0.5901217861975643,
"repo_name": "tysonholub/twilio-python",
"id": "992cdaeeb09fbf616c6c3aba2a56f9ea40fb23f7",
"size": "7405",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twilio/rest/pricing/v1/voice/number.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "173"
},
{
"name": "Makefile",
"bytes": "2081"
},
{
"name": "Python",
"bytes": "8063586"
}
],
"symlink_target": ""
}
|
from minio import Minio
client = Minio(
"play.min.io",
access_key="Q3AM3UQ867SPQQA43P2F",
secret_key="zuf+tfteSlswRu7BJ86wekitnifILbZam1KYY3TG",
)
if client.bucket_exists("my-bucket"):
print("my-bucket exists")
else:
print("my-bucket does not exist")
|
{
"content_hash": "3c80202ddeb7e7db3a426c900a65e377",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 58,
"avg_line_length": 22.75,
"alnum_prop": 0.706959706959707,
"repo_name": "minio/minio-py",
"id": "13a7df8876aa527cb4d03d7dcbb3c75d60b5411d",
"size": "930",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/bucket_exists.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "603"
},
{
"name": "Python",
"bytes": "452355"
},
{
"name": "Shell",
"bytes": "1978"
}
],
"symlink_target": ""
}
|
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField
from wtforms.validators import Required, Length, Email, Regexp
class EmailForm(FlaskForm):
email = StringField('Please input admin email', validators=[Required(), Length(1, 64), Email()])
info = TextAreaField('Please input information ', validators=[Required()])
submit = SubmitField('Send')
class NameForm(FlaskForm):
name = StringField('Please input username to find')
submit = SubmitField('Search')
|
{
"content_hash": "a0be25670ff704cbe54c9fcd3030d112",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 100,
"avg_line_length": 39.69230769230769,
"alnum_prop": 0.748062015503876,
"repo_name": "ASaiun/saiun_bysj",
"id": "798e9b33c9686b999d300106f9bc8af179214d7b",
"size": "516",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/main/forms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "18"
},
{
"name": "CSS",
"bytes": "2457"
},
{
"name": "HTML",
"bytes": "9231"
},
{
"name": "JavaScript",
"bytes": "58987"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "19598"
},
{
"name": "Shell",
"bytes": "262"
}
],
"symlink_target": ""
}
|
"""XML-RPC methods of Zinnia Pingback"""
from urllib.error import HTTPError
from urllib.error import URLError
from urllib.parse import urlsplit
from urllib.request import urlopen
from bs4 import BeautifulSoup
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.urls import Resolver404
from django.urls import resolve
from django.utils.html import strip_tags
from django.utils.translation import gettext as _
import django_comments as comments
from django_xmlrpc.decorators import xmlrpc_func
from zinnia.flags import PINGBACK
from zinnia.flags import get_user_flagger
from zinnia.models.entry import Entry
from zinnia.settings import PINGBACK_CONTENT_LENGTH
from zinnia.signals import pingback_was_posted
from zinnia.spam_checker import check_is_spam
UNDEFINED_ERROR = 0
SOURCE_DOES_NOT_EXIST = 16
SOURCE_DOES_NOT_LINK = 17
TARGET_DOES_NOT_EXIST = 32
TARGET_IS_NOT_PINGABLE = 33
PINGBACK_ALREADY_REGISTERED = 48
PINGBACK_IS_SPAM = 51
class FakeRequest(object):
META = {}
def generate_pingback_content(soup, target, max_length, trunc_char='...'):
"""
Generate a description text for the pingback.
"""
link = soup.find('a', href=target)
content = strip_tags(str(link.findParent()))
index = content.index(link.string)
if len(content) > max_length:
middle = max_length // 2
start = index - middle
end = index + middle
if start <= 0:
end -= start
extract = content[0:end]
else:
extract = '%s%s' % (trunc_char, content[start:end])
if end < len(content):
extract += trunc_char
return extract
return content
@xmlrpc_func(returns='string', args=['string', 'string'])
def pingback_ping(source, target):
"""
pingback.ping(sourceURI, targetURI) => 'Pingback message'
Notifies the server that a link has been added to sourceURI,
pointing to targetURI.
See: http://hixie.ch/specs/pingback/pingback-1.0
"""
try:
if source == target:
return UNDEFINED_ERROR
site = Site.objects.get_current()
try:
document = ''.join(map(
lambda byte_line: byte_line.decode('utf-8'),
urlopen(source).readlines()))
except (HTTPError, URLError):
return SOURCE_DOES_NOT_EXIST
if target not in document:
return SOURCE_DOES_NOT_LINK
target_splitted = urlsplit(target)
if target_splitted.netloc != site.domain:
return TARGET_DOES_NOT_EXIST
try:
view, args, kwargs = resolve(target_splitted.path)
except Resolver404:
return TARGET_DOES_NOT_EXIST
try:
entry = Entry.published.get(
slug=kwargs['slug'],
publication_date__year=kwargs['year'],
publication_date__month=kwargs['month'],
publication_date__day=kwargs['day'])
if not entry.pingbacks_are_open:
return TARGET_IS_NOT_PINGABLE
except (KeyError, Entry.DoesNotExist):
return TARGET_IS_NOT_PINGABLE
soup = BeautifulSoup(document, 'html.parser')
title = str(soup.find('title'))
title = title and strip_tags(title) or _('No title')
description = generate_pingback_content(soup, target,
PINGBACK_CONTENT_LENGTH)
pingback_klass = comments.get_model()
pingback_datas = {
'content_type': ContentType.objects.get_for_model(Entry),
'object_pk': entry.pk,
'site': site,
'user_url': source,
'user_name': title,
'comment': description
}
pingback = pingback_klass(**pingback_datas)
if check_is_spam(pingback, entry, FakeRequest()):
return PINGBACK_IS_SPAM
pingback_defaults = {'comment': pingback_datas.pop('comment'),
'user_name': pingback_datas.pop('user_name')}
pingback, created = pingback_klass.objects.get_or_create(
defaults=pingback_defaults,
**pingback_datas)
if created:
pingback.flags.create(user=get_user_flagger(), flag=PINGBACK)
pingback_was_posted.send(pingback.__class__,
pingback=pingback,
entry=entry)
return 'Pingback from %s to %s registered.' % (source, target)
return PINGBACK_ALREADY_REGISTERED
except Exception:
return UNDEFINED_ERROR
@xmlrpc_func(returns='string[]', args=['string'])
def pingback_extensions_get_pingbacks(target):
"""
pingback.extensions.getPingbacks(url) => '[url, url, ...]'
Returns an array of URLs that link to the specified url.
See: http://www.aquarionics.com/misc/archives/blogite/0198.html
"""
site = Site.objects.get_current()
target_splitted = urlsplit(target)
if target_splitted.netloc != site.domain:
return TARGET_DOES_NOT_EXIST
try:
view, args, kwargs = resolve(target_splitted.path)
except Resolver404:
return TARGET_DOES_NOT_EXIST
try:
entry = Entry.published.get(
slug=kwargs['slug'],
publication_date__year=kwargs['year'],
publication_date__month=kwargs['month'],
publication_date__day=kwargs['day'])
except (KeyError, Entry.DoesNotExist):
return TARGET_IS_NOT_PINGABLE
return [pingback.user_url for pingback in entry.pingbacks]
|
{
"content_hash": "83ccc3a2bb3b670e89028971100c41f2",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 74,
"avg_line_length": 31.954545454545453,
"alnum_prop": 0.6191322901849218,
"repo_name": "Fantomas42/django-blog-zinnia",
"id": "c67a8c92463d1fea2cfed2e652903ae07407df9c",
"size": "5624",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "zinnia/xmlrpc/pingback.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "24255"
},
{
"name": "HTML",
"bytes": "78415"
},
{
"name": "JavaScript",
"bytes": "87448"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "543985"
}
],
"symlink_target": ""
}
|
"""The media player tests for the forked_daapd media player platform."""
from unittest.mock import patch
import pytest
from homeassistant.components.forked_daapd.const import (
CONF_LIBRESPOT_JAVA_PORT,
CONF_MAX_PLAYLISTS,
CONF_TTS_PAUSE_TIME,
CONF_TTS_VOLUME,
DOMAIN,
SIGNAL_UPDATE_OUTPUTS,
SIGNAL_UPDATE_PLAYER,
SIGNAL_UPDATE_QUEUE,
SOURCE_NAME_CLEAR,
SOURCE_NAME_DEFAULT,
SUPPORTED_FEATURES,
SUPPORTED_FEATURES_ZONE,
)
from homeassistant.components.media_player import (
SERVICE_CLEAR_PLAYLIST,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_MEDIA_SEEK,
SERVICE_MEDIA_STOP,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOURCE,
SERVICE_SHUFFLE_SET,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_SET,
)
from homeassistant.components.media_player.const import (
ATTR_INPUT_SOURCE,
ATTR_MEDIA_ALBUM_ARTIST,
ATTR_MEDIA_ALBUM_NAME,
ATTR_MEDIA_ARTIST,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_SEEK_POSITION,
ATTR_MEDIA_SHUFFLE,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_TRACK,
ATTR_MEDIA_VOLUME_LEVEL,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN as MP_DOMAIN,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
)
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
ATTR_SUPPORTED_FEATURES,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
STATE_ON,
STATE_PAUSED,
STATE_UNAVAILABLE,
)
from tests.common import MockConfigEntry, async_mock_signal
TEST_MASTER_ENTITY_NAME = "media_player.forked_daapd_server"
TEST_ZONE_ENTITY_NAMES = [
"media_player.forked_daapd_output_" + x
for x in ["kitchen", "computer", "daapd_fifo"]
]
OPTIONS_DATA = {
CONF_LIBRESPOT_JAVA_PORT: "123",
CONF_MAX_PLAYLISTS: 8,
CONF_TTS_PAUSE_TIME: 0,
CONF_TTS_VOLUME: 0.25,
}
SAMPLE_PLAYER_PAUSED = {
"state": "pause",
"repeat": "off",
"consume": False,
"shuffle": False,
"volume": 20,
"item_id": 12322,
"item_length_ms": 50,
"item_progress_ms": 5,
}
SAMPLE_PLAYER_PLAYING = {
"state": "play",
"repeat": "off",
"consume": False,
"shuffle": False,
"volume": 50,
"item_id": 12322,
"item_length_ms": 50,
"item_progress_ms": 5,
}
SAMPLE_PLAYER_STOPPED = {
"state": "stop",
"repeat": "off",
"consume": False,
"shuffle": False,
"volume": 0,
"item_id": 12322,
"item_length_ms": 50,
"item_progress_ms": 5,
}
SAMPLE_QUEUE_TTS = {
"version": 833,
"count": 1,
"items": [
{
"id": 12322,
"position": 0,
"track_id": 1234,
"title": "Short TTS file",
"artist": "Google",
"album": "No album",
"album_artist": "The xx",
"artwork_url": "http://art",
"length_ms": 0,
"track_number": 1,
"media_kind": "music",
"data_kind": "url",
"uri": "tts_proxy_somefile.mp3",
}
],
}
SAMPLE_QUEUE_PIPE = {
"version": 833,
"count": 1,
"items": [
{
"id": 12322,
"title": "librespot-java",
"artist": "some artist",
"album": "some album",
"album_artist": "The xx",
"length_ms": 0,
"track_number": 1,
"media_kind": "music",
"data_kind": "pipe",
"uri": "pipeuri",
}
],
}
SAMPLE_CONFIG = {
"websocket_port": 3688,
"version": "25.0",
"buildoptions": [
"ffmpeg",
"iTunes XML",
"Spotify",
"LastFM",
"MPD",
"Device verification",
"Websockets",
"ALSA",
],
}
SAMPLE_CONFIG_NO_WEBSOCKET = {
"websocket_port": 0,
"version": "25.0",
"buildoptions": [
"ffmpeg",
"iTunes XML",
"Spotify",
"LastFM",
"MPD",
"Device verification",
"Websockets",
"ALSA",
],
}
SAMPLE_OUTPUTS_ON = (
{
"id": "123456789012345",
"name": "kitchen",
"type": "AirPlay",
"selected": True,
"has_password": False,
"requires_auth": False,
"needs_auth_key": False,
"volume": 50,
},
{
"id": "0",
"name": "Computer",
"type": "ALSA",
"selected": True,
"has_password": False,
"requires_auth": False,
"needs_auth_key": False,
"volume": 19,
},
{
"id": "100",
"name": "daapd-fifo",
"type": "fifo",
"selected": False,
"has_password": False,
"requires_auth": False,
"needs_auth_key": False,
"volume": 0,
},
)
SAMPLE_OUTPUTS_UNSELECTED = [
{
"id": "123456789012345",
"name": "kitchen",
"type": "AirPlay",
"selected": False,
"has_password": False,
"requires_auth": False,
"needs_auth_key": False,
"volume": 0,
},
{
"id": "0",
"name": "Computer",
"type": "ALSA",
"selected": False,
"has_password": False,
"requires_auth": False,
"needs_auth_key": False,
"volume": 19,
},
{
"id": "100",
"name": "daapd-fifo",
"type": "fifo",
"selected": False,
"has_password": False,
"requires_auth": False,
"needs_auth_key": False,
"volume": 0,
},
]
SAMPLE_PIPES = [
{
"id": 1,
"title": "librespot-java",
"media_kind": "music",
"data_kind": "pipe",
"path": "/music/srv/input.pipe",
"uri": "library:track:1",
}
]
SAMPLE_PLAYLISTS = [{"id": 7, "name": "test_playlist", "uri": "library:playlist:2"}]
@pytest.fixture(name="config_entry")
def config_entry_fixture():
"""Create hass config_entry fixture."""
data = {
CONF_HOST: "192.168.1.1",
CONF_PORT: "2345",
CONF_PASSWORD: "",
}
return MockConfigEntry(
version=1,
domain=DOMAIN,
title="",
data=data,
options={CONF_TTS_PAUSE_TIME: 0},
source=SOURCE_USER,
entry_id=1,
)
@pytest.fixture(name="get_request_return_values")
async def get_request_return_values_fixture():
"""Get request return values we can change later."""
return {
"config": SAMPLE_CONFIG,
"outputs": SAMPLE_OUTPUTS_ON,
"player": SAMPLE_PLAYER_PAUSED,
"queue": SAMPLE_QUEUE_TTS,
}
@pytest.fixture(name="mock_api_object")
async def mock_api_object_fixture(hass, config_entry, get_request_return_values):
"""Create mock api fixture."""
async def get_request_side_effect(update_type):
if update_type == "outputs":
return {"outputs": get_request_return_values["outputs"]}
return get_request_return_values[update_type]
with patch(
"homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI",
autospec=True,
) as mock_api:
mock_api.return_value.get_request.side_effect = get_request_side_effect
mock_api.return_value.full_url.return_value = ""
mock_api.return_value.get_pipes.return_value = SAMPLE_PIPES
mock_api.return_value.get_playlists.return_value = SAMPLE_PLAYLISTS
config_entry.add_to_hass(hass)
await config_entry.async_setup(hass)
await hass.async_block_till_done()
mock_api.return_value.start_websocket_handler.assert_called_once()
mock_api.return_value.get_request.assert_called_once()
updater_update = mock_api.return_value.start_websocket_handler.call_args[0][2]
await updater_update(["player", "outputs", "queue"])
await hass.async_block_till_done()
async def add_to_queue_side_effect(
uris, playback=None, playback_from_position=None, clear=None
):
await updater_update(["queue", "player"])
mock_api.return_value.add_to_queue.side_effect = (
add_to_queue_side_effect # for play_media testing
)
async def pause_side_effect():
await updater_update(["player"])
mock_api.return_value.pause_playback.side_effect = pause_side_effect
return mock_api.return_value
async def test_unload_config_entry(hass, config_entry, mock_api_object):
"""Test the player is set unavailable when the config entry is unloaded."""
assert hass.states.get(TEST_MASTER_ENTITY_NAME)
assert hass.states.get(TEST_ZONE_ENTITY_NAMES[0])
await config_entry.async_unload(hass)
assert hass.states.get(TEST_MASTER_ENTITY_NAME).state == STATE_UNAVAILABLE
assert hass.states.get(TEST_ZONE_ENTITY_NAMES[0]).state == STATE_UNAVAILABLE
def test_master_state(hass, mock_api_object):
"""Test master state attributes."""
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.state == STATE_PAUSED
assert state.attributes[ATTR_FRIENDLY_NAME] == "forked-daapd server"
assert state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORTED_FEATURES
assert not state.attributes[ATTR_MEDIA_VOLUME_MUTED]
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.2
assert state.attributes[ATTR_MEDIA_CONTENT_ID] == 12322
assert state.attributes[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert state.attributes[ATTR_MEDIA_DURATION] == 0.05
assert state.attributes[ATTR_MEDIA_POSITION] == 0.005
assert state.attributes[ATTR_MEDIA_TITLE] == "No album" # reversed for url
assert state.attributes[ATTR_MEDIA_ARTIST] == "Google"
assert state.attributes[ATTR_MEDIA_ALBUM_NAME] == "Short TTS file" # reversed
assert state.attributes[ATTR_MEDIA_ALBUM_ARTIST] == "The xx"
assert state.attributes[ATTR_MEDIA_TRACK] == 1
assert not state.attributes[ATTR_MEDIA_SHUFFLE]
async def test_no_update_when_get_request_returns_none(
hass, config_entry, mock_api_object
):
"""Test when get request returns None."""
async def get_request_side_effect(update_type):
return None
mock_api_object.get_request.side_effect = get_request_side_effect
updater_update = mock_api_object.start_websocket_handler.call_args[0][2]
signal_output_call = async_mock_signal(
hass, SIGNAL_UPDATE_OUTPUTS.format(config_entry.entry_id)
)
signal_player_call = async_mock_signal(
hass, SIGNAL_UPDATE_PLAYER.format(config_entry.entry_id)
)
signal_queue_call = async_mock_signal(
hass, SIGNAL_UPDATE_QUEUE.format(config_entry.entry_id)
)
await updater_update(["outputs", "player", "queue"])
await hass.async_block_till_done()
assert len(signal_output_call) == 0
assert len(signal_player_call) == 0
assert len(signal_queue_call) == 0
async def _service_call(
hass, entity_name, service, additional_service_data=None, blocking=True
):
if additional_service_data is None:
additional_service_data = {}
return await hass.services.async_call(
MP_DOMAIN,
service,
service_data={ATTR_ENTITY_ID: entity_name, **additional_service_data},
blocking=blocking,
)
async def test_zone(hass, mock_api_object):
"""Test zone attributes and methods."""
zone_entity_name = TEST_ZONE_ENTITY_NAMES[0]
state = hass.states.get(zone_entity_name)
assert state.attributes[ATTR_FRIENDLY_NAME] == "forked-daapd output (kitchen)"
assert state.attributes[ATTR_SUPPORTED_FEATURES] == SUPPORTED_FEATURES_ZONE
assert state.state == STATE_ON
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.5
assert not state.attributes[ATTR_MEDIA_VOLUME_MUTED]
await _service_call(hass, zone_entity_name, SERVICE_TURN_ON)
await _service_call(hass, zone_entity_name, SERVICE_TURN_OFF)
await _service_call(hass, zone_entity_name, SERVICE_TOGGLE)
await _service_call(
hass, zone_entity_name, SERVICE_VOLUME_SET, {ATTR_MEDIA_VOLUME_LEVEL: 0.3}
)
await _service_call(
hass, zone_entity_name, SERVICE_VOLUME_MUTE, {ATTR_MEDIA_VOLUME_MUTED: True}
)
await _service_call(
hass, zone_entity_name, SERVICE_VOLUME_MUTE, {ATTR_MEDIA_VOLUME_MUTED: False}
)
zone_entity_name = TEST_ZONE_ENTITY_NAMES[2]
await _service_call(hass, zone_entity_name, SERVICE_TOGGLE)
await _service_call(
hass, zone_entity_name, SERVICE_VOLUME_MUTE, {ATTR_MEDIA_VOLUME_MUTED: True}
)
output_id = SAMPLE_OUTPUTS_ON[0]["id"]
initial_volume = SAMPLE_OUTPUTS_ON[0]["volume"]
mock_api_object.change_output.assert_any_call(output_id, selected=True)
mock_api_object.change_output.assert_any_call(output_id, selected=False)
mock_api_object.set_volume.assert_any_call(output_id=output_id, volume=30)
mock_api_object.set_volume.assert_any_call(output_id=output_id, volume=0)
mock_api_object.set_volume.assert_any_call(
output_id=output_id, volume=initial_volume
)
output_id = SAMPLE_OUTPUTS_ON[2]["id"]
mock_api_object.change_output.assert_any_call(output_id, selected=True)
async def test_last_outputs_master(hass, mock_api_object):
"""Test restoration of _last_outputs."""
# Test turning on sends API call
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_TURN_ON)
assert mock_api_object.change_output.call_count == 0
assert mock_api_object.set_enabled_outputs.call_count == 1
await _service_call(
hass, TEST_MASTER_ENTITY_NAME, SERVICE_TURN_OFF
) # should have stored last outputs
assert mock_api_object.change_output.call_count == 0
assert mock_api_object.set_enabled_outputs.call_count == 2
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_TURN_ON)
assert mock_api_object.change_output.call_count == 3
assert mock_api_object.set_enabled_outputs.call_count == 2
async def test_bunch_of_stuff_master(hass, get_request_return_values, mock_api_object):
"""Run bunch of stuff."""
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_TURN_ON)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_TURN_OFF)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_TOGGLE)
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_VOLUME_MUTE,
{ATTR_MEDIA_VOLUME_MUTED: True},
)
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_VOLUME_MUTE,
{ATTR_MEDIA_VOLUME_MUTED: False},
)
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_VOLUME_SET,
{ATTR_MEDIA_VOLUME_LEVEL: 0.5},
)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_MEDIA_PAUSE)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_MEDIA_PLAY)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_MEDIA_STOP)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_MEDIA_PREVIOUS_TRACK)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_MEDIA_NEXT_TRACK)
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_MEDIA_SEEK,
{ATTR_MEDIA_SEEK_POSITION: 35},
)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_CLEAR_PLAYLIST)
await _service_call(
hass, TEST_MASTER_ENTITY_NAME, SERVICE_SHUFFLE_SET, {ATTR_MEDIA_SHUFFLE: False}
)
# stop player and run more stuff
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0.2
get_request_return_values["player"] = SAMPLE_PLAYER_STOPPED
updater_update = mock_api_object.start_websocket_handler.call_args[0][2]
await updater_update(["player"])
await hass.async_block_till_done()
# mute from volume==0
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.attributes[ATTR_MEDIA_VOLUME_LEVEL] == 0
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_VOLUME_MUTE,
{ATTR_MEDIA_VOLUME_MUTED: True},
)
# now turn off (stopped and all outputs unselected)
get_request_return_values["outputs"] = SAMPLE_OUTPUTS_UNSELECTED
await updater_update(["outputs"])
await hass.async_block_till_done()
# toggle from off
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_TOGGLE)
for output in SAMPLE_OUTPUTS_ON:
mock_api_object.change_output.assert_any_call(
output["id"],
selected=output["selected"],
volume=output["volume"],
)
mock_api_object.set_volume.assert_any_call(volume=0)
mock_api_object.set_volume.assert_any_call(volume=SAMPLE_PLAYER_PAUSED["volume"])
mock_api_object.set_volume.assert_any_call(volume=50)
mock_api_object.set_enabled_outputs.assert_any_call(
[output["id"] for output in SAMPLE_OUTPUTS_ON]
)
mock_api_object.set_enabled_outputs.assert_any_call([])
mock_api_object.start_playback.assert_called_once()
assert mock_api_object.pause_playback.call_count == 3
mock_api_object.stop_playback.assert_called_once()
mock_api_object.previous_track.assert_called_once()
mock_api_object.next_track.assert_called_once()
mock_api_object.seek.assert_called_once()
mock_api_object.shuffle.assert_called_once()
mock_api_object.clear_queue.assert_called_once()
async def test_async_play_media_from_paused(hass, mock_api_object):
"""Test async play media from paused."""
initial_state = hass.states.get(TEST_MASTER_ENTITY_NAME)
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_PLAY_MEDIA,
{
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: "somefile.mp3",
},
)
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.state == initial_state.state
assert state.last_updated > initial_state.last_updated
async def test_async_play_media_from_stopped(
hass, get_request_return_values, mock_api_object
):
"""Test async play media from stopped."""
updater_update = mock_api_object.start_websocket_handler.call_args[0][2]
get_request_return_values["player"] = SAMPLE_PLAYER_STOPPED
await updater_update(["player"])
await hass.async_block_till_done()
initial_state = hass.states.get(TEST_MASTER_ENTITY_NAME)
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_PLAY_MEDIA,
{
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: "somefile.mp3",
},
)
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.state == initial_state.state
assert state.last_updated > initial_state.last_updated
async def test_async_play_media_unsupported(hass, mock_api_object):
"""Test async play media on unsupported media type."""
initial_state = hass.states.get(TEST_MASTER_ENTITY_NAME)
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_PLAY_MEDIA,
{
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_TVSHOW,
ATTR_MEDIA_CONTENT_ID: "wontwork.mp4",
},
)
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.last_updated == initial_state.last_updated
async def test_async_play_media_tts_timeout(hass, mock_api_object):
"""Test async play media with TTS timeout."""
mock_api_object.add_to_queue.side_effect = None
with patch("homeassistant.components.forked_daapd.media_player.TTS_TIMEOUT", 0):
initial_state = hass.states.get(TEST_MASTER_ENTITY_NAME)
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_PLAY_MEDIA,
{
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: "somefile.mp3",
},
)
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.state == initial_state.state
assert state.last_updated > initial_state.last_updated
async def test_use_pipe_control_with_no_api(hass, mock_api_object):
"""Test using pipe control with no api set."""
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: "librespot-java (pipe)"},
)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_MEDIA_PLAY)
assert mock_api_object.start_playback.call_count == 0
async def test_clear_source(hass, mock_api_object):
"""Test changing source to clear."""
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: SOURCE_NAME_CLEAR},
)
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.attributes[ATTR_INPUT_SOURCE] == SOURCE_NAME_DEFAULT
@pytest.fixture(name="pipe_control_api_object")
async def pipe_control_api_object_fixture(
hass, config_entry, get_request_return_values, mock_api_object
):
"""Fixture for mock librespot_java api."""
with patch(
"homeassistant.components.forked_daapd.media_player.LibrespotJavaAPI",
autospec=True,
) as pipe_control_api:
hass.config_entries.async_update_entry(config_entry, options=OPTIONS_DATA)
await hass.async_block_till_done()
get_request_return_values["player"] = SAMPLE_PLAYER_PLAYING
updater_update = mock_api_object.start_websocket_handler.call_args[0][2]
await updater_update(["player"])
await hass.async_block_till_done()
async def pause_side_effect():
await updater_update(["player"])
pipe_control_api.return_value.player_pause.side_effect = pause_side_effect
await updater_update(["database"]) # load in sources
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: "librespot-java (pipe)"},
)
return pipe_control_api.return_value
async def test_librespot_java_stuff(
hass, get_request_return_values, mock_api_object, pipe_control_api_object
):
"""Test options update and librespot-java stuff."""
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.attributes[ATTR_INPUT_SOURCE] == "librespot-java (pipe)"
# call some basic services
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_MEDIA_STOP)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_MEDIA_PREVIOUS_TRACK)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_MEDIA_NEXT_TRACK)
await _service_call(hass, TEST_MASTER_ENTITY_NAME, SERVICE_MEDIA_PLAY)
pipe_control_api_object.player_pause.assert_called_once()
pipe_control_api_object.player_prev.assert_called_once()
pipe_control_api_object.player_next.assert_called_once()
pipe_control_api_object.player_resume.assert_called_once()
# switch away
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_SELECT_SOURCE,
{ATTR_INPUT_SOURCE: SOURCE_NAME_DEFAULT},
)
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.attributes[ATTR_INPUT_SOURCE] == SOURCE_NAME_DEFAULT
# test pipe getting queued externally changes source
get_request_return_values["queue"] = SAMPLE_QUEUE_PIPE
updater_update = mock_api_object.start_websocket_handler.call_args[0][2]
await updater_update(["queue"])
await hass.async_block_till_done()
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.attributes[ATTR_INPUT_SOURCE] == "librespot-java (pipe)"
# test title and album not reversed when data_kind not url
assert state.attributes[ATTR_MEDIA_TITLE] == "librespot-java"
assert state.attributes[ATTR_MEDIA_ALBUM_NAME] == "some album"
async def test_librespot_java_play_media(hass, pipe_control_api_object):
"""Test play media with librespot-java pipe."""
initial_state = hass.states.get(TEST_MASTER_ENTITY_NAME)
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_PLAY_MEDIA,
{
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: "somefile.mp3",
},
)
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.state == initial_state.state
assert state.last_updated > initial_state.last_updated
async def test_librespot_java_play_media_pause_timeout(hass, pipe_control_api_object):
"""Test play media with librespot-java pipe."""
# test media play with pause timeout
pipe_control_api_object.player_pause.side_effect = None
with patch(
"homeassistant.components.forked_daapd.media_player.CALLBACK_TIMEOUT", 0
):
initial_state = hass.states.get(TEST_MASTER_ENTITY_NAME)
await _service_call(
hass,
TEST_MASTER_ENTITY_NAME,
SERVICE_PLAY_MEDIA,
{
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_MEDIA_CONTENT_ID: "somefile.mp3",
},
)
state = hass.states.get(TEST_MASTER_ENTITY_NAME)
assert state.state == initial_state.state
assert state.last_updated > initial_state.last_updated
async def test_unsupported_update(hass, mock_api_object):
"""Test unsupported update type."""
last_updated = hass.states.get(TEST_MASTER_ENTITY_NAME).last_updated
updater_update = mock_api_object.start_websocket_handler.call_args[0][2]
await updater_update(["config"])
await hass.async_block_till_done()
assert hass.states.get(TEST_MASTER_ENTITY_NAME).last_updated == last_updated
async def test_invalid_websocket_port(hass, config_entry):
"""Test invalid websocket port on async_init."""
with patch(
"homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI",
autospec=True,
) as mock_api:
mock_api.return_value.get_request.return_value = SAMPLE_CONFIG_NO_WEBSOCKET
config_entry.add_to_hass(hass)
await config_entry.async_setup(hass)
await hass.async_block_till_done()
assert hass.states.get(TEST_MASTER_ENTITY_NAME).state == STATE_UNAVAILABLE
async def test_websocket_disconnect(hass, mock_api_object):
"""Test websocket disconnection."""
assert hass.states.get(TEST_MASTER_ENTITY_NAME).state != STATE_UNAVAILABLE
assert hass.states.get(TEST_ZONE_ENTITY_NAMES[0]).state != STATE_UNAVAILABLE
updater_disconnected = mock_api_object.start_websocket_handler.call_args[0][4]
updater_disconnected()
await hass.async_block_till_done()
assert hass.states.get(TEST_MASTER_ENTITY_NAME).state == STATE_UNAVAILABLE
assert hass.states.get(TEST_ZONE_ENTITY_NAMES[0]).state == STATE_UNAVAILABLE
|
{
"content_hash": "c97dc73f74120544d6a11e292a3adbb4",
"timestamp": "",
"source": "github",
"line_count": 788,
"max_line_length": 87,
"avg_line_length": 33.90355329949239,
"alnum_prop": 0.6439212456954634,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "a2e0050c3d965e8949c71d359b584d1e35743b27",
"size": "26716",
"binary": false,
"copies": "10",
"ref": "refs/heads/dev",
"path": "tests/components/forked_daapd/test_media_player.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
import logging
from .client import MarathonClient
from .models import MarathonResource, MarathonApp, MarathonTask, MarathonConstraint
from .exceptions import MarathonError, MarathonHttpError, NotFoundError, InvalidChoiceError
log = logging.getLogger(__name__)
logging.basicConfig()
|
{
"content_hash": "9e6b2a502d5b890e5fc63ad6917486ba",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 91,
"avg_line_length": 35.375,
"alnum_prop": 0.8409893992932862,
"repo_name": "Carles-Figuerola/marathon-python",
"id": "ea21d0a48242a9446570de7eaa466f5415c3ad57",
"size": "283",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "marathon/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cucumber",
"bytes": "428"
},
{
"name": "Makefile",
"bytes": "235"
},
{
"name": "Python",
"bytes": "70369"
},
{
"name": "Shell",
"bytes": "795"
}
],
"symlink_target": ""
}
|
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgrid to send emails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ('collectfast', )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIRY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIRY, AWS_EXPIRY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('ordo_electro <noreply@ordo.club>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[ordo_electro] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party library settings
|
{
"content_hash": "b9fee3529a2af68a0b5fc3c992ca4622",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 97,
"avg_line_length": 35.78512396694215,
"alnum_prop": 0.6946882217090069,
"repo_name": "solvire/ordo_electro",
"id": "9183c905aeff283a7b6fb0cc8e5901c049128921",
"size": "4354",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ordo_electro/config/production.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "227137"
},
{
"name": "HTML",
"bytes": "41281"
},
{
"name": "JavaScript",
"bytes": "207103"
},
{
"name": "Python",
"bytes": "95436"
}
],
"symlink_target": ""
}
|
from NodeDefender.mqtt.command import fire, topic_format
def qry(mac_address):
topic = topic_format.format(mac_address, "sys", "svc", "qry")
return fire(topic, icpe = mac_address)
def telnet(mac_address, enabled):
topic = topic_format.format(mac_address, "sys", "svc:cli", "set")
return fire(topic, payload = str(int(enabled)), icpe = mac_address)
def ssh(mac_address, enabled):
topic = topic_format.format(mac_address, "sys", "svc:ssh", "set")
return fire(topic, payload = str(int(enabled)), icpe = mac_address)
def web(mac_address, enabled):
topic = topic_format.format(mac_address, "sys", "svc:web", "st")
return fire(topic, payload = str(int(enabled)), icpe = mac_address)
def snmp(mac_address, enabled):
topic = topic_format.format(mac_address, "sys", "svc:snmp", "set")
return fire(topic, payload = str(int(enabled)), icpe = mac_address)
|
{
"content_hash": "257f0ff8ff6f548059b4e37cd877c33d",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 71,
"avg_line_length": 42.333333333333336,
"alnum_prop": 0.6771653543307087,
"repo_name": "CTSNE/NodeDefender",
"id": "253c2a5918e54650a7a7c1ad3d6fb203a5982041",
"size": "889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NodeDefender/mqtt/command/icpe/sys/service.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5419"
},
{
"name": "HTML",
"bytes": "188223"
},
{
"name": "JavaScript",
"bytes": "2861"
},
{
"name": "Python",
"bytes": "290127"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class ApplicationGatewaySslPolicy(Model):
"""Application gateway SSL policy.
:param disabled_ssl_protocols: SSL protocols to be disabled on application
gateway. Possible values are: 'TLSv1_0', 'TLSv1_1', and 'TLSv1_2'.
:type disabled_ssl_protocols: list[str or
~azure.mgmt.network.v2016_12_01.models.ApplicationGatewaySslProtocol]
"""
_attribute_map = {
'disabled_ssl_protocols': {'key': 'disabledSslProtocols', 'type': '[str]'},
}
def __init__(self, *, disabled_ssl_protocols=None, **kwargs) -> None:
super(ApplicationGatewaySslPolicy, self).__init__(**kwargs)
self.disabled_ssl_protocols = disabled_ssl_protocols
|
{
"content_hash": "64e8f32f2ceab45053cc5e0c9511389d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 83,
"avg_line_length": 37.94736842105263,
"alnum_prop": 0.6865464632454924,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "af90809ca730cc123ace7f3364113087a6be9373",
"size": "1195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "azure-mgmt-network/azure/mgmt/network/v2016_12_01/models/application_gateway_ssl_policy_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import io
import os
import sys
from collections import defaultdict
from functools import partial
from distutils.errors import DistutilsOptionError, DistutilsFileError
from setuptools.py26compat import import_module
from six import string_types
def read_configuration(
filepath, find_others=False, ignore_option_errors=False):
"""Read given configuration file and returns options from it as a dict.
:param str|unicode filepath: Path to configuration file
to get options from.
:param bool find_others: Whether to search for other configuration files
which could be on in various places.
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: dict
"""
from setuptools.dist import Distribution, _Distribution
filepath = os.path.abspath(filepath)
if not os.path.isfile(filepath):
raise DistutilsFileError(
'Configuration file %s does not exist.' % filepath)
current_directory = os.getcwd()
os.chdir(os.path.dirname(filepath))
try:
dist = Distribution()
filenames = dist.find_config_files() if find_others else []
if filepath not in filenames:
filenames.append(filepath)
_Distribution.parse_config_files(dist, filenames=filenames)
handlers = parse_configuration(
dist, dist.command_options,
ignore_option_errors=ignore_option_errors)
finally:
os.chdir(current_directory)
return configuration_to_dict(handlers)
def configuration_to_dict(handlers):
"""Returns configuration data gathered by given handlers as a dict.
:param list[ConfigHandler] handlers: Handlers list,
usually from parse_configuration()
:rtype: dict
"""
config_dict = defaultdict(dict)
for handler in handlers:
obj_alias = handler.section_prefix
target_obj = handler.target_obj
for option in handler.set_options:
getter = getattr(target_obj, 'get_%s' % option, None)
if getter is None:
value = getattr(target_obj, option)
else:
value = getter()
config_dict[obj_alias][option] = value
return config_dict
def parse_configuration(
distribution, command_options, ignore_option_errors=False):
"""Performs additional parsing of configuration options
for a distribution.
Returns a list of used option handlers.
:param Distribution distribution:
:param dict command_options:
:param bool ignore_option_errors: Whether to silently ignore
options, values of which could not be resolved (e.g. due to exceptions
in directives such as file:, attr:, etc.).
If False exceptions are propagated as expected.
:rtype: list
"""
meta = ConfigMetadataHandler(
distribution.metadata, command_options, ignore_option_errors)
meta.parse()
options = ConfigOptionsHandler(
distribution, command_options, ignore_option_errors)
options.parse()
return [meta, options]
class ConfigHandler(object):
"""Handles metadata supplied in configuration files."""
section_prefix = None
"""Prefix for config sections handled by this handler.
Must be provided by class heirs.
"""
aliases = {}
"""Options aliases.
For compatibility with various packages. E.g.: d2to1 and pbr.
Note: `-` in keys is replaced with `_` by config parser.
"""
def __init__(self, target_obj, options, ignore_option_errors=False):
sections = {}
section_prefix = self.section_prefix
for section_name, section_options in options.items():
if not section_name.startswith(section_prefix):
continue
section_name = section_name.replace(section_prefix, '').strip('.')
sections[section_name] = section_options
self.ignore_option_errors = ignore_option_errors
self.target_obj = target_obj
self.sections = sections
self.set_options = []
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
raise NotImplementedError(
'%s must provide .parsers property' % self.__class__.__name__)
def __setitem__(self, option_name, value):
unknown = tuple()
target_obj = self.target_obj
# Translate alias into real name.
option_name = self.aliases.get(option_name, option_name)
current_value = getattr(target_obj, option_name, unknown)
if current_value is unknown:
raise KeyError(option_name)
if current_value:
# Already inhabited. Skipping.
return
skip_option = False
parser = self.parsers.get(option_name)
if parser:
try:
value = parser(value)
except Exception:
skip_option = True
if not self.ignore_option_errors:
raise
if skip_option:
return
setter = getattr(target_obj, 'set_%s' % option_name, None)
if setter is None:
setattr(target_obj, option_name, value)
else:
setter(value)
self.set_options.append(option_name)
@classmethod
def _parse_list(cls, value, separator=','):
"""Represents value as a list.
Value is split either by separator (defaults to comma) or by lines.
:param value:
:param separator: List items separator character.
:rtype: list
"""
if isinstance(value, list): # _get_parser_compound case
return value
if '\n' in value:
value = value.splitlines()
else:
value = value.split(separator)
return [chunk.strip() for chunk in value if chunk.strip()]
@classmethod
def _parse_dict(cls, value):
"""Represents value as a dict.
:param value:
:rtype: dict
"""
separator = '='
result = {}
for line in cls._parse_list(value):
key, sep, val = line.partition(separator)
if sep != separator:
raise DistutilsOptionError(
'Unable to parse option value to dict: %s' % value)
result[key.strip()] = val.strip()
return result
@classmethod
def _parse_bool(cls, value):
"""Represents value as boolean.
:param value:
:rtype: bool
"""
value = value.lower()
return value in ('1', 'true', 'yes')
@classmethod
def _parse_file(cls, value):
"""Represents value as a string, allowing including text
from nearest files using `file:` directive.
Directive is sandboxed and won't reach anything outside
directory with setup.py.
Examples:
include: LICENSE
include: src/file.txt
:param str value:
:rtype: str
"""
if not isinstance(value, string_types):
return value
include_directive = 'file:'
if not value.startswith(include_directive):
return value
current_directory = os.getcwd()
filepath = value.replace(include_directive, '').strip()
filepath = os.path.abspath(filepath)
if not filepath.startswith(current_directory):
raise DistutilsOptionError(
'`file:` directive can not access %s' % filepath)
if os.path.isfile(filepath):
with io.open(filepath, encoding='utf-8') as f:
value = f.read()
return value
@classmethod
def _parse_attr(cls, value):
"""Represents value as a module attribute.
Examples:
attr: package.attr
attr: package.module.attr
:param str value:
:rtype: str
"""
attr_directive = 'attr:'
if not value.startswith(attr_directive):
return value
attrs_path = value.replace(attr_directive, '').strip().split('.')
attr_name = attrs_path.pop()
module_name = '.'.join(attrs_path)
module_name = module_name or '__init__'
sys.path.insert(0, os.getcwd())
try:
module = import_module(module_name)
value = getattr(module, attr_name)
finally:
sys.path = sys.path[1:]
return value
@classmethod
def _get_parser_compound(cls, *parse_methods):
"""Returns parser function to represents value as a list.
Parses a value applying given methods one after another.
:param parse_methods:
:rtype: callable
"""
def parse(value):
parsed = value
for method in parse_methods:
parsed = method(parsed)
return parsed
return parse
@classmethod
def _parse_section_to_dict(cls, section_options, values_parser=None):
"""Parses section options into a dictionary.
Optionally applies a given parser to values.
:param dict section_options:
:param callable values_parser:
:rtype: dict
"""
value = {}
values_parser = values_parser or (lambda val: val)
for key, (_, val) in section_options.items():
value[key] = values_parser(val)
return value
def parse_section(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
for (name, (_, value)) in section_options.items():
try:
self[name] = value
except KeyError:
pass # Keep silent for a new option may appear anytime.
def parse(self):
"""Parses configuration file items from one
or more related sections.
"""
for section_name, section_options in self.sections.items():
method_postfix = ''
if section_name: # [section.option] variant
method_postfix = '_%s' % section_name
section_parser_method = getattr(
self,
# Dots in section names are tranlsated into dunderscores.
('parse_section%s' % method_postfix).replace('.', '__'),
None)
if section_parser_method is None:
raise DistutilsOptionError(
'Unsupported distribution option section: [%s.%s]' % (
self.section_prefix, section_name))
section_parser_method(section_options)
class ConfigMetadataHandler(ConfigHandler):
section_prefix = 'metadata'
aliases = {
'home_page': 'url',
'summary': 'description',
'classifier': 'classifiers',
'platform': 'platforms',
}
strict_mode = False
"""We need to keep it loose, to be partially compatible with
`pbr` and `d2to1` packages which also uses `metadata` section.
"""
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_file = self._parse_file
return {
'platforms': parse_list,
'keywords': parse_list,
'provides': parse_list,
'requires': parse_list,
'obsoletes': parse_list,
'classifiers': self._get_parser_compound(parse_file, parse_list),
'license': parse_file,
'description': parse_file,
'long_description': parse_file,
'version': self._parse_version,
}
def parse_section_classifiers(self, section_options):
"""Parses configuration file section.
:param dict section_options:
"""
classifiers = []
for begin, (_, rest) in section_options.items():
classifiers.append('%s :%s' % (begin.title(), rest))
self['classifiers'] = classifiers
def _parse_version(self, value):
"""Parses `version` option value.
:param value:
:rtype: str
"""
version = self._parse_attr(value)
if callable(version):
version = version()
if not isinstance(version, string_types):
if hasattr(version, '__iter__'):
version = '.'.join(map(str, version))
else:
version = '%s' % version
return version
class ConfigOptionsHandler(ConfigHandler):
section_prefix = 'options'
@property
def parsers(self):
"""Metadata item name to parser function mapping."""
parse_list = self._parse_list
parse_list_semicolon = partial(self._parse_list, separator=';')
parse_bool = self._parse_bool
parse_dict = self._parse_dict
return {
'zip_safe': parse_bool,
'use_2to3': parse_bool,
'include_package_data': parse_bool,
'package_dir': parse_dict,
'use_2to3_fixers': parse_list,
'use_2to3_exclude_fixers': parse_list,
'convert_2to3_doctests': parse_list,
'scripts': parse_list,
'eager_resources': parse_list,
'dependency_links': parse_list,
'namespace_packages': parse_list,
'install_requires': parse_list_semicolon,
'setup_requires': parse_list_semicolon,
'tests_require': parse_list_semicolon,
'packages': self._parse_packages,
'entry_points': self._parse_file,
}
def _parse_packages(self, value):
"""Parses `packages` option value.
:param value:
:rtype: list
"""
find_directive = 'find:'
if not value.startswith(find_directive):
return self._parse_list(value)
# Read function arguments from a dedicated section.
find_kwargs = self.parse_section_packages__find(
self.sections.get('packages.find', {}))
from setuptools import find_packages
return find_packages(**find_kwargs)
def parse_section_packages__find(self, section_options):
"""Parses `packages.find` configuration file section.
To be used in conjunction with _parse_packages().
:param dict section_options:
"""
section_data = self._parse_section_to_dict(
section_options, self._parse_list)
valid_keys = ['where', 'include', 'exclude']
find_kwargs = dict(
[(k, v) for k, v in section_data.items() if k in valid_keys and v])
where = find_kwargs.get('where')
if where is not None:
find_kwargs['where'] = where[0] # cast list to single val
return find_kwargs
def parse_section_entry_points(self, section_options):
"""Parses `entry_points` configuration file section.
:param dict section_options:
"""
parsed = self._parse_section_to_dict(section_options, self._parse_list)
self['entry_points'] = parsed
def _parse_package_data(self, section_options):
parsed = self._parse_section_to_dict(section_options, self._parse_list)
root = parsed.get('*')
if root:
parsed[''] = root
del parsed['*']
return parsed
def parse_section_package_data(self, section_options):
"""Parses `package_data` configuration file section.
:param dict section_options:
"""
self['package_data'] = self._parse_package_data(section_options)
def parse_section_exclude_package_data(self, section_options):
"""Parses `exclude_package_data` configuration file section.
:param dict section_options:
"""
self['exclude_package_data'] = self._parse_package_data(
section_options)
def parse_section_extras_require(self, section_options):
"""Parses `extras_require` configuration file section.
:param dict section_options:
"""
parse_list = partial(self._parse_list, separator=';')
self['extras_require'] = self._parse_section_to_dict(
section_options, parse_list)
|
{
"content_hash": "34a9562502cafe6c2c4296b063722941",
"timestamp": "",
"source": "github",
"line_count": 558,
"max_line_length": 79,
"avg_line_length": 29.381720430107528,
"alnum_prop": 0.5890820372064653,
"repo_name": "mpercich/Calendarize",
"id": "19b39629c707a6238a756cd7c79b0b7c1236bde9",
"size": "16395",
"binary": false,
"copies": "26",
"ref": "refs/heads/master",
"path": "ios/dateparser/lib/python2.7/site-packages/setuptools/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "859680"
},
{
"name": "C++",
"bytes": "1621286"
},
{
"name": "M4",
"bytes": "1690"
},
{
"name": "Objective-C",
"bytes": "17561"
},
{
"name": "Objective-C++",
"bytes": "1764"
},
{
"name": "Python",
"bytes": "2941199"
},
{
"name": "Shell",
"bytes": "3252"
},
{
"name": "Swift",
"bytes": "5812"
}
],
"symlink_target": ""
}
|
"""
Tests for Block Device utility functions.
"""
from nova import block_device
from nova import exception
from nova import objects
from nova import test
from nova.tests import fake_block_device
from nova.tests import matchers
class BlockDeviceTestCase(test.NoDBTestCase):
def setUp(self):
super(BlockDeviceTestCase, self).setUp()
BDM = block_device.BlockDeviceDict
self.new_mapping = [
BDM({'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'volume_size': 1,
'guest_format': 'swap',
'boot_index': -1}),
BDM({'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'volume_size': 10,
'delete_on_termination': True,
'boot_index': -1}),
BDM({'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}",
'boot_index': 0}),
BDM({'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1}),
BDM({'id': 5, 'instance_uuid': 'fake-instance',
'no_device': True,
'device_name': '/dev/vdc'}),
]
def test_properties(self):
root_device0 = '/dev/sda'
root_device1 = '/dev/sdb'
mappings = [{'virtual': 'root',
'device': root_device0}]
properties0 = {'mappings': mappings}
properties1 = {'mappings': mappings,
'root_device_name': root_device1}
self.assertIsNone(block_device.properties_root_device_name({}))
self.assertEqual(
block_device.properties_root_device_name(properties0),
root_device0)
self.assertEqual(
block_device.properties_root_device_name(properties1),
root_device1)
def test_ephemeral(self):
self.assertFalse(block_device.is_ephemeral('ephemeral'))
self.assertTrue(block_device.is_ephemeral('ephemeral0'))
self.assertTrue(block_device.is_ephemeral('ephemeral1'))
self.assertTrue(block_device.is_ephemeral('ephemeral11'))
self.assertFalse(block_device.is_ephemeral('root'))
self.assertFalse(block_device.is_ephemeral('swap'))
self.assertFalse(block_device.is_ephemeral('/dev/sda1'))
self.assertEqual(block_device.ephemeral_num('ephemeral0'), 0)
self.assertEqual(block_device.ephemeral_num('ephemeral1'), 1)
self.assertEqual(block_device.ephemeral_num('ephemeral11'), 11)
self.assertFalse(block_device.is_swap_or_ephemeral('ephemeral'))
self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral0'))
self.assertTrue(block_device.is_swap_or_ephemeral('ephemeral1'))
self.assertTrue(block_device.is_swap_or_ephemeral('swap'))
self.assertFalse(block_device.is_swap_or_ephemeral('root'))
self.assertFalse(block_device.is_swap_or_ephemeral('/dev/sda1'))
def test_mappings_prepend_dev(self):
mapping = [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': 'sdb'},
{'virtual': 'swap', 'device': 'sdc'},
{'virtual': 'ephemeral1', 'device': 'sdd'},
{'virtual': 'ephemeral2', 'device': 'sde'}]
expected = [
{'virtual': 'ami', 'device': '/dev/sda'},
{'virtual': 'root', 'device': 'sda'},
{'virtual': 'ephemeral0', 'device': '/dev/sdb'},
{'virtual': 'swap', 'device': '/dev/sdc'},
{'virtual': 'ephemeral1', 'device': '/dev/sdd'},
{'virtual': 'ephemeral2', 'device': '/dev/sde'}]
prepended = block_device.mappings_prepend_dev(mapping)
self.assertEqual(prepended.sort(), expected.sort())
def test_strip_dev(self):
self.assertEqual(block_device.strip_dev('/dev/sda'), 'sda')
self.assertEqual(block_device.strip_dev('sda'), 'sda')
def test_strip_prefix(self):
self.assertEqual(block_device.strip_prefix('/dev/sda'), 'a')
self.assertEqual(block_device.strip_prefix('a'), 'a')
self.assertEqual(block_device.strip_prefix('xvda'), 'a')
self.assertEqual(block_device.strip_prefix('vda'), 'a')
self.assertEqual(block_device.strip_prefix('hda'), 'a')
def test_get_device_letter(self):
self.assertEqual(block_device.get_device_letter(''), '')
self.assertEqual(block_device.get_device_letter('/dev/sda1'), 'a')
self.assertEqual(block_device.get_device_letter('/dev/xvdb'), 'b')
self.assertEqual(block_device.get_device_letter('/dev/d'), 'd')
self.assertEqual(block_device.get_device_letter('a'), 'a')
self.assertEqual(block_device.get_device_letter('sdb2'), 'b')
self.assertEqual(block_device.get_device_letter('vdc'), 'c')
self.assertEqual(block_device.get_device_letter('hdc'), 'c')
def test_volume_in_mapping(self):
swap = {'device_name': '/dev/sdb',
'swap_size': 1}
ephemerals = [{'num': 0,
'virtual_name': 'ephemeral0',
'device_name': '/dev/sdc1',
'size': 1},
{'num': 2,
'virtual_name': 'ephemeral2',
'device_name': '/dev/sdd',
'size': 1}]
block_device_mapping = [{'mount_device': '/dev/sde',
'device_path': 'fake_device'},
{'mount_device': '/dev/sdf',
'device_path': 'fake_device'}]
block_device_info = {
'root_device_name': '/dev/sda',
'swap': swap,
'ephemerals': ephemerals,
'block_device_mapping': block_device_mapping}
def _assert_volume_in_mapping(device_name, true_or_false):
in_mapping = block_device.volume_in_mapping(
device_name, block_device_info)
self.assertEqual(in_mapping, true_or_false)
_assert_volume_in_mapping('sda', False)
_assert_volume_in_mapping('sdb', True)
_assert_volume_in_mapping('sdc1', True)
_assert_volume_in_mapping('sdd', True)
_assert_volume_in_mapping('sde', True)
_assert_volume_in_mapping('sdf', True)
_assert_volume_in_mapping('sdg', False)
_assert_volume_in_mapping('sdh1', False)
def test_get_root_bdm(self):
root_bdm = {'device_name': 'vda', 'boot_index': 0}
bdms = [root_bdm,
{'device_name': 'vdb', 'boot_index': 1},
{'device_name': 'vdc', 'boot_index': -1},
{'device_name': 'vdd'}]
self.assertEqual(root_bdm, block_device.get_root_bdm(bdms))
self.assertEqual(root_bdm, block_device.get_root_bdm([bdms[0]]))
self.assertIsNone(block_device.get_root_bdm(bdms[1:]))
self.assertIsNone(block_device.get_root_bdm(bdms[2:]))
self.assertIsNone(block_device.get_root_bdm(bdms[3:]))
self.assertIsNone(block_device.get_root_bdm([]))
def test_get_bdm_ephemeral_disk_size(self):
size = block_device.get_bdm_ephemeral_disk_size(self.new_mapping)
self.assertEqual(10, size)
def test_get_bdm_swap_list(self):
swap_list = block_device.get_bdm_swap_list(self.new_mapping)
self.assertEqual(1, len(swap_list))
self.assertEqual(1, swap_list[0].get('id'))
def test_get_bdm_local_disk_num(self):
size = block_device.get_bdm_local_disk_num(self.new_mapping)
self.assertEqual(2, size)
def test_new_format_is_swap(self):
expected_results = [True, False, False, False, False]
for expected, bdm in zip(expected_results, self.new_mapping):
res = block_device.new_format_is_swap(bdm)
self.assertEqual(expected, res)
def test_new_format_is_ephemeral(self):
expected_results = [False, True, False, False, False]
for expected, bdm in zip(expected_results, self.new_mapping):
res = block_device.new_format_is_ephemeral(bdm)
self.assertEqual(expected, res)
def test_validate_device_name(self):
for value in [' ', 10, None, 'a' * 260]:
self.assertRaises(exception.InvalidBDMFormat,
block_device.validate_device_name,
value)
def test_validate_and_default_volume_size(self):
bdm = {}
for value in [-1, 'a', 2.5]:
bdm['volume_size'] = value
self.assertRaises(exception.InvalidBDMFormat,
block_device.validate_and_default_volume_size,
bdm)
def test_get_bdms_to_connect(self):
root_bdm = {'device_name': 'vda', 'boot_index': 0}
bdms = [root_bdm,
{'device_name': 'vdb', 'boot_index': 1},
{'device_name': 'vdc', 'boot_index': -1},
{'device_name': 'vde', 'boot_index': None},
{'device_name': 'vdd'}]
self.assertNotIn(root_bdm, block_device.get_bdms_to_connect(bdms,
exclude_root_mapping=True))
self.assertIn(root_bdm, block_device.get_bdms_to_connect(bdms))
class TestBlockDeviceDict(test.NoDBTestCase):
def setUp(self):
super(TestBlockDeviceDict, self).setUp()
BDM = block_device.BlockDeviceDict
self.api_mapping = [
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1},
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'boot_index': -1},
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'boot_index': 0},
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'uuid': 'fake-snapshot-id-1',
'boot_index': -1},
{'id': 5, 'instance_uuid': 'fake-instance',
'no_device': True,
'device_name': '/dev/vdc'},
]
self.new_mapping = [
BDM({'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'guest_format': 'swap',
'boot_index': -1}),
BDM({'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'source_type': 'blank',
'destination_type': 'local',
'delete_on_termination': True,
'boot_index': -1}),
BDM({'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}",
'boot_index': 0}),
BDM({'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'source_type': 'snapshot',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2',
'boot_index': -1}),
BDM({'id': 5, 'instance_uuid': 'fake-instance',
'no_device': True,
'device_name': '/dev/vdc'}),
]
self.legacy_mapping = [
{'id': 1, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdb1',
'delete_on_termination': True,
'virtual_name': 'swap'},
{'id': 2, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sdc1',
'delete_on_termination': True,
'virtual_name': 'ephemeral0'},
{'id': 3, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda1',
'volume_id': 'fake-volume-id-1',
'connection_info': "{'fake': 'connection_info'}"},
{'id': 4, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda2',
'connection_info': "{'fake': 'connection_info'}",
'snapshot_id': 'fake-snapshot-id-1',
'volume_id': 'fake-volume-id-2'},
{'id': 5, 'instance_uuid': 'fake-instance',
'no_device': True,
'device_name': '/dev/vdc'},
]
self.new_mapping_source_image = [
BDM({'id': 6, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda3',
'source_type': 'image',
'destination_type': 'volume',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 'fake-volume-id-3',
'boot_index': -1}),
BDM({'id': 7, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda4',
'source_type': 'image',
'destination_type': 'local',
'connection_info': "{'fake': 'connection_info'}",
'image_id': 'fake-image-id-2',
'boot_index': -1}),
]
self.legacy_mapping_source_image = [
{'id': 6, 'instance_uuid': 'fake-instance',
'device_name': '/dev/sda3',
'connection_info': "{'fake': 'connection_info'}",
'volume_id': 'fake-volume-id-3'},
]
def test_init(self):
def fake_validate(obj, dct):
pass
self.stubs.Set(block_device.BlockDeviceDict, '_fields',
set(['field1', 'field2']))
self.stubs.Set(block_device.BlockDeviceDict, '_db_only_fields',
set(['db_field1', 'db_field2']))
self.stubs.Set(block_device.BlockDeviceDict, '_validate',
fake_validate)
# Make sure db fields are not picked up if they are not
# in the original dict
dev_dict = block_device.BlockDeviceDict({'field1': 'foo',
'field2': 'bar',
'db_field1': 'baz'})
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Make sure all expected fields are defaulted
dev_dict = block_device.BlockDeviceDict({'field1': 'foo'})
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIsNone(dev_dict['field2'])
self.assertNotIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Unless they are not meant to be
dev_dict = block_device.BlockDeviceDict({'field1': 'foo'},
do_not_default=set(['field2']))
self.assertIn('field1', dev_dict)
self.assertNotIn('field2', dev_dict)
self.assertNotIn('db_field1', dev_dict)
self.assertNotIn('db_field2', dev_dict)
# Passing kwargs to constructor works
dev_dict = block_device.BlockDeviceDict(field1='foo')
self.assertIn('field1', dev_dict)
self.assertIn('field2', dev_dict)
self.assertIsNone(dev_dict['field2'])
dev_dict = block_device.BlockDeviceDict(
{'field1': 'foo'}, field2='bar')
self.assertEqual('foo', dev_dict['field1'])
self.assertEqual('bar', dev_dict['field2'])
def test_init_prepend_dev_to_device_name(self):
bdm = {'id': 3, 'instance_uuid': 'fake-instance',
'device_name': 'vda',
'source_type': 'volume',
'destination_type': 'volume',
'volume_id': 'fake-volume-id-1',
'boot_index': 0}
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertEqual('/dev/vda', bdm_dict['device_name'])
bdm['device_name'] = '/dev/vdb'
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertEqual('/dev/vdb', bdm_dict['device_name'])
bdm['device_name'] = None
bdm_dict = block_device.BlockDeviceDict(bdm)
self.assertIsNone(bdm_dict['device_name'])
def test_validate(self):
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
{'bogus_field': 'lame_val'})
lame_bdm = dict(self.new_mapping[2])
del lame_bdm['source_type']
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_bdm)
lame_bdm['no_device'] = True
block_device.BlockDeviceDict(lame_bdm)
lame_dev_bdm = dict(self.new_mapping[2])
lame_dev_bdm['device_name'] = "not a valid name"
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_dev_bdm)
lame_dev_bdm['device_name'] = ""
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_dev_bdm)
cool_volume_size_bdm = dict(self.new_mapping[2])
cool_volume_size_bdm['volume_size'] = '42'
cool_volume_size_bdm = block_device.BlockDeviceDict(
cool_volume_size_bdm)
self.assertEqual(cool_volume_size_bdm['volume_size'], 42)
lame_volume_size_bdm = dict(self.new_mapping[2])
lame_volume_size_bdm['volume_size'] = 'some_non_int_string'
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
lame_volume_size_bdm)
truthy_bdm = dict(self.new_mapping[2])
truthy_bdm['delete_on_termination'] = '1'
truthy_bdm = block_device.BlockDeviceDict(truthy_bdm)
self.assertEqual(truthy_bdm['delete_on_termination'], True)
verbose_bdm = dict(self.new_mapping[2])
verbose_bdm['boot_index'] = 'first'
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict,
verbose_bdm)
def test_from_legacy(self):
for legacy, new in zip(self.legacy_mapping, self.new_mapping):
self.assertThat(
block_device.BlockDeviceDict.from_legacy(legacy),
matchers.IsSubDictOf(new))
def test_from_legacy_mapping(self):
def _get_image_bdms(bdms):
return [bdm for bdm in bdms if bdm['source_type'] == 'image']
def _get_bootable_bdms(bdms):
return [bdm for bdm in bdms if bdm['boot_index'] >= 0]
new_no_img = block_device.from_legacy_mapping(self.legacy_mapping)
self.assertEqual(len(_get_image_bdms(new_no_img)), 0)
for new, expected in zip(new_no_img, self.new_mapping):
self.assertThat(new, matchers.IsSubDictOf(expected))
new_with_img = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref')
image_bdms = _get_image_bdms(new_with_img)
boot_bdms = _get_bootable_bdms(new_with_img)
self.assertEqual(len(image_bdms), 1)
self.assertEqual(len(boot_bdms), 1)
self.assertEqual(image_bdms[0]['boot_index'], 0)
self.assertEqual(boot_bdms[0]['source_type'], 'image')
new_with_img_and_root = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref', 'sda1')
image_bdms = _get_image_bdms(new_with_img_and_root)
boot_bdms = _get_bootable_bdms(new_with_img_and_root)
self.assertEqual(len(image_bdms), 0)
self.assertEqual(len(boot_bdms), 1)
self.assertEqual(boot_bdms[0]['boot_index'], 0)
self.assertEqual(boot_bdms[0]['source_type'], 'volume')
new_no_root = block_device.from_legacy_mapping(
self.legacy_mapping, 'fake_image_ref', 'sda1', no_root=True)
self.assertEqual(len(_get_image_bdms(new_no_root)), 0)
self.assertEqual(len(_get_bootable_bdms(new_no_root)), 0)
def test_from_api(self):
for api, new in zip(self.api_mapping, self.new_mapping):
new['connection_info'] = None
if new['snapshot_id']:
new['volume_id'] = None
self.assertThat(
block_device.BlockDeviceDict.from_api(api),
matchers.IsSubDictOf(new))
def test_from_api_invalid_blank_id(self):
api_dict = {'id': 1,
'source_type': 'blank',
'destination_type': 'volume',
'uuid': 'fake-volume-id-1',
'delete_on_termination': True,
'boot_index': -1}
self.assertRaises(exception.InvalidBDMFormat,
block_device.BlockDeviceDict.from_api, api_dict)
def test_legacy(self):
for legacy, new in zip(self.legacy_mapping, self.new_mapping):
self.assertThat(
legacy,
matchers.IsSubDictOf(new.legacy()))
def test_legacy_mapping(self):
got_legacy = block_device.legacy_mapping(self.new_mapping)
for legacy, expected in zip(got_legacy, self.legacy_mapping):
self.assertThat(expected, matchers.IsSubDictOf(legacy))
def test_legacy_source_image(self):
for legacy, new in zip(self.legacy_mapping_source_image,
self.new_mapping_source_image):
if new['destination_type'] == 'volume':
self.assertThat(legacy, matchers.IsSubDictOf(new.legacy()))
else:
self.assertRaises(exception.InvalidBDMForLegacy, new.legacy)
def test_legacy_mapping_source_image(self):
got_legacy = block_device.legacy_mapping(self.new_mapping)
for legacy, expected in zip(got_legacy, self.legacy_mapping):
self.assertThat(expected, matchers.IsSubDictOf(legacy))
def test_legacy_mapping_from_object_list(self):
bdm1 = objects.BlockDeviceMapping()
bdm1 = objects.BlockDeviceMapping._from_db_object(
None, bdm1, fake_block_device.FakeDbBlockDeviceDict(
self.new_mapping[0]))
bdm2 = objects.BlockDeviceMapping()
bdm2 = objects.BlockDeviceMapping._from_db_object(
None, bdm2, fake_block_device.FakeDbBlockDeviceDict(
self.new_mapping[1]))
bdmlist = objects.BlockDeviceMappingList()
bdmlist.objects = [bdm1, bdm2]
block_device.legacy_mapping(bdmlist)
def test_image_mapping(self):
removed_fields = ['id', 'instance_uuid', 'connection_info',
'device_name', 'created_at', 'updated_at',
'deleted_at', 'deleted']
for bdm in self.new_mapping:
mapping_bdm = fake_block_device.FakeDbBlockDeviceDict(
bdm).get_image_mapping()
for fld in removed_fields:
self.assertNotIn(fld, mapping_bdm)
def _test_snapshot_from_bdm(self, template):
snapshot = block_device.snapshot_from_bdm('new-snapshot-id', template)
self.assertEqual(snapshot['snapshot_id'], 'new-snapshot-id')
self.assertEqual(snapshot['source_type'], 'snapshot')
self.assertEqual(snapshot['destination_type'], 'volume')
for key in ['disk_bus', 'device_type', 'boot_index']:
self.assertEqual(snapshot[key], template[key])
def test_snapshot_from_bdm(self):
for bdm in self.new_mapping:
self._test_snapshot_from_bdm(bdm)
def test_snapshot_from_object(self):
for bdm in self.new_mapping[:-1]:
obj = objects.BlockDeviceMapping()
obj = objects.BlockDeviceMapping._from_db_object(
None, obj, fake_block_device.FakeDbBlockDeviceDict(
bdm))
self._test_snapshot_from_bdm(obj)
|
{
"content_hash": "e2bec921f24652edeae1a36e6f957fbd",
"timestamp": "",
"source": "github",
"line_count": 591,
"max_line_length": 78,
"avg_line_length": 43.18274111675127,
"alnum_prop": 0.5432389012969712,
"repo_name": "redhat-openstack/nova",
"id": "2410039ac5e6f5c6fe597434d0023b5ffa242aee",
"size": "26151",
"binary": false,
"copies": "1",
"ref": "refs/heads/f22-patches",
"path": "nova/tests/test_block_device.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groff",
"bytes": "112"
},
{
"name": "PLpgSQL",
"bytes": "2958"
},
{
"name": "Python",
"bytes": "15424955"
},
{
"name": "Shell",
"bytes": "20796"
},
{
"name": "Smarty",
"bytes": "678196"
}
],
"symlink_target": ""
}
|
import string
from collections import namedtuple
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, cast
import pytest
from pants.engine.addresses import Address
from pants.engine.fs import GlobExpansionConjunction, GlobMatchErrorBehavior, PathGlobs, Paths
from pants.engine.target import (
AsyncFieldMixin,
BoolField,
CoarsenedTarget,
DictStringToStringField,
DictStringToStringSequenceField,
ExplicitlyProvidedDependencies,
Field,
FieldSet,
FloatField,
GeneratedTargets,
GenerateSourcesRequest,
IntField,
InvalidFieldChoiceException,
InvalidFieldException,
InvalidFieldTypeException,
InvalidGeneratedTargetException,
InvalidTargetException,
MultipleSourcesField,
NestedDictStringToStringField,
OptionalSingleSourceField,
OverridesField,
RequiredFieldMissingException,
ScalarField,
SequenceField,
SingleSourceField,
StringField,
StringSequenceField,
Target,
ValidNumbers,
generate_file_based_overrides_field_help_message,
get_shard,
parse_shard_spec,
targets_with_sources_types,
)
from pants.engine.unions import UnionMembership
from pants.option.global_options import UnmatchedBuildFileGlobs
from pants.testutil.pytest_util import no_exception
from pants.util.frozendict import FrozenDict
from pants.util.meta import FrozenInstanceError
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
# -----------------------------------------------------------------------------------------------
# Test core Field and Target abstractions
# -----------------------------------------------------------------------------------------------
class FortranExtensions(Field):
alias = "fortran_extensions"
value: Tuple[str, ...]
default = ()
@classmethod
def compute_value(cls, raw_value: Optional[Iterable[str]], address: Address) -> Tuple[str, ...]:
value_or_default = super().compute_value(raw_value, address)
# Add some arbitrary validation to test that hydration/validation works properly.
bad_extensions = [
extension for extension in value_or_default if not extension.startswith("Fortran")
]
if bad_extensions:
raise InvalidFieldException(
f"The {repr(cls.alias)} field in target {address} expects all elements to be "
f"prefixed by `Fortran`. Received {bad_extensions}.",
)
return tuple(value_or_default)
class FortranVersion(StringField):
alias = "version"
class UnrelatedField(BoolField):
alias = "unrelated"
default = False
class FortranTarget(Target):
alias = "fortran"
core_fields = (FortranExtensions, FortranVersion)
def validate(self) -> None:
if self[FortranVersion].value == "bad":
raise InvalidTargetException("Bad!")
def test_field_and_target_eq() -> None:
addr = Address("", target_name="tgt")
field = FortranVersion("dev0", addr)
assert field.value == "dev0"
other = FortranVersion("dev0", addr)
assert field == other
assert hash(field) == hash(other)
other = FortranVersion("dev1", addr)
assert field != other
assert hash(field) != hash(other)
# NB: because normal `Field`s throw away the address, these are equivalent.
other = FortranVersion("dev0", Address("", target_name="other"))
assert field == other
assert hash(field) == hash(other)
# Ensure the field is frozen.
with pytest.raises(FrozenInstanceError):
field.y = "foo" # type: ignore[attr-defined]
tgt = FortranTarget({"version": "dev0"}, addr)
assert tgt.address == addr
other_tgt = FortranTarget({"version": "dev0"}, addr)
assert tgt == other_tgt
assert hash(tgt) == hash(other_tgt)
other_tgt = FortranTarget({"version": "dev1"}, addr)
assert tgt != other_tgt
assert hash(tgt) != hash(other_tgt)
other_tgt = FortranTarget({"version": "dev0"}, Address("", target_name="other"))
assert tgt != other_tgt
assert hash(tgt) != hash(other_tgt)
# Ensure the target is frozen.
with pytest.raises(FrozenInstanceError):
tgt.y = "foo" # type: ignore[attr-defined]
# Ensure that subclasses are not equal.
class SubclassField(FortranVersion):
pass
subclass_field = SubclassField("dev0", addr)
assert field != subclass_field
assert hash(field) != hash(subclass_field)
class SubclassTarget(FortranTarget):
pass
subclass_tgt = SubclassTarget({"version": "dev0"}, addr)
assert tgt != subclass_tgt
assert hash(tgt) != hash(subclass_tgt)
def test_invalid_fields_rejected() -> None:
with pytest.raises(InvalidFieldException) as exc:
FortranTarget({"invalid_field": True}, Address("", target_name="lib"))
assert "Unrecognized field `invalid_field=True`" in str(exc)
assert "//:lib" in str(exc)
with no_exception():
FortranTarget(
{"invalid_field": True}, Address("", target_name="lib"), ignore_unrecognized_fields=True
)
def test_get_field() -> None:
extensions = ("FortranExt1",)
tgt = FortranTarget({FortranExtensions.alias: extensions}, Address("", target_name="lib"))
assert tgt[FortranExtensions].value == extensions
assert tgt.get(FortranExtensions).value == extensions
assert tgt.get(FortranExtensions, default_raw_value=["FortranExt2"]).value == extensions
# Default field value. This happens when the field is registered on the target type, but the
# user does not explicitly set the field in the BUILD file.
default_field_tgt = FortranTarget({}, Address("", target_name="default"))
assert default_field_tgt[FortranExtensions].value == ()
assert default_field_tgt.get(FortranExtensions).value == ()
assert default_field_tgt.get(FortranExtensions, default_raw_value=["FortranExt2"]).value == ()
# Example of a call site applying its own default value instead of the field's default value.
assert default_field_tgt[FortranExtensions].value or 123 == 123
assert (
FortranTarget.class_get_field(FortranExtensions, union_membership=UnionMembership({}))
is FortranExtensions
)
# Field is not registered on the target.
with pytest.raises(KeyError) as exc:
default_field_tgt[UnrelatedField]
assert UnrelatedField.__name__ in str(exc)
with pytest.raises(KeyError) as exc:
FortranTarget.class_get_field(UnrelatedField, union_membership=UnionMembership({}))
assert UnrelatedField.__name__ in str(exc)
assert default_field_tgt.get(UnrelatedField).value == UnrelatedField.default
assert default_field_tgt.get(
UnrelatedField, default_raw_value=not UnrelatedField.default
).value == (not UnrelatedField.default)
def test_field_hydration_is_eager() -> None:
with pytest.raises(InvalidFieldException) as exc:
FortranTarget(
{FortranExtensions.alias: ["FortranExt1", "DoesNotStartWithFortran"]},
Address("", target_name="bad_extension"),
)
assert "DoesNotStartWithFortran" in str(exc)
assert "//:bad_extension" in str(exc)
def test_has_fields() -> None:
empty_union_membership = UnionMembership({})
tgt = FortranTarget({}, Address("", target_name="lib"))
assert tgt.field_types == (FortranExtensions, FortranVersion)
assert FortranTarget.class_field_types(union_membership=empty_union_membership) == (
FortranExtensions,
FortranVersion,
)
assert tgt.has_fields([]) is True
assert FortranTarget.class_has_fields([], union_membership=empty_union_membership) is True
assert tgt.has_fields([FortranExtensions]) is True
assert tgt.has_field(FortranExtensions) is True
assert (
FortranTarget.class_has_fields([FortranExtensions], union_membership=empty_union_membership)
is True
)
assert (
FortranTarget.class_has_field(FortranExtensions, union_membership=empty_union_membership)
is True
)
assert tgt.has_fields([UnrelatedField]) is False
assert tgt.has_field(UnrelatedField) is False
assert (
FortranTarget.class_has_fields([UnrelatedField], union_membership=empty_union_membership)
is False
)
assert (
FortranTarget.class_has_field(UnrelatedField, union_membership=empty_union_membership)
is False
)
assert tgt.has_fields([FortranExtensions, UnrelatedField]) is False
assert (
FortranTarget.class_has_fields(
[FortranExtensions, UnrelatedField], union_membership=empty_union_membership
)
is False
)
def test_add_custom_fields() -> None:
class CustomField(BoolField):
alias = "custom_field"
default = False
union_membership = UnionMembership.from_rules(
[FortranTarget.register_plugin_field(CustomField)]
)
tgt_values = {CustomField.alias: True}
tgt = FortranTarget(
tgt_values, Address("", target_name="lib"), union_membership=union_membership
)
assert tgt.field_types == (FortranExtensions, FortranVersion, CustomField)
assert tgt.core_fields == (FortranExtensions, FortranVersion)
assert tgt.plugin_fields == (CustomField,)
assert tgt.has_field(CustomField) is True
assert FortranTarget.class_field_types(union_membership=union_membership) == (
FortranExtensions,
FortranVersion,
CustomField,
)
assert FortranTarget.class_has_field(CustomField, union_membership=union_membership) is True
assert (
FortranTarget.class_get_field(CustomField, union_membership=union_membership) is CustomField
)
assert tgt[CustomField].value is True
default_tgt = FortranTarget(
{}, Address("", target_name="default"), union_membership=union_membership
)
assert default_tgt[CustomField].value is False
# Ensure that the `PluginField` is not being registered on other target types.
class OtherTarget(Target):
alias = "other_target"
core_fields = ()
other_tgt = OtherTarget({}, Address("", target_name="other"))
assert other_tgt.plugin_fields == ()
assert other_tgt.has_field(CustomField) is False
def test_subclassed_target_inherits_plugin_fields() -> None:
class CustomFortranTarget(FortranTarget):
alias = "custom_fortran"
class CustomField(BoolField):
alias = "custom_field"
default = False
union_membership = UnionMembership.from_rules(
[FortranTarget.register_plugin_field(CustomField)]
)
custom_tgt = CustomFortranTarget(
{}, Address("", target_name="custom"), union_membership=union_membership
)
assert custom_tgt.has_field(CustomField) is True
def test_override_preexisting_field_via_new_target() -> None:
# To change the behavior of a pre-existing field, you must create a new target as it would not
# be safe to allow plugin authors to change the behavior of core target types.
#
# Because the Target API does not care about the actual target type and we only check that the
# target has the required fields via Target.has_fields(), it is safe to create a new target
# that still works where the original target was expected.
#
# However, this means that we must ensure `Target.get()` and `Target.has_fields()` will work
# with subclasses of the original `Field`s.
class CustomFortranExtensions(FortranExtensions):
banned_extensions = ("FortranBannedExt",)
default_extensions = ("FortranCustomExt",)
@classmethod
def compute_value(
cls, raw_value: Optional[Iterable[str]], address: Address
) -> Tuple[str, ...]:
# Ensure that we avoid certain problematic extensions and always use some defaults.
specified_extensions = super().compute_value(raw_value, address)
banned = [
extension
for extension in specified_extensions
if extension in cls.banned_extensions
]
if banned:
raise InvalidFieldException(
f"The {repr(cls.alias)} field in target {address} is using banned "
f"extensions: {banned}"
)
return (*specified_extensions, *cls.default_extensions)
class CustomFortranTarget(Target):
alias = "custom_fortran"
core_fields = tuple(
{*FortranTarget.core_fields, CustomFortranExtensions} - {FortranExtensions}
)
custom_tgt = CustomFortranTarget(
{FortranExtensions.alias: ["FortranExt1"]}, Address("", target_name="custom")
)
assert custom_tgt.has_field(FortranExtensions) is True
assert custom_tgt.has_field(CustomFortranExtensions) is True
assert custom_tgt.has_fields([FortranExtensions, CustomFortranExtensions]) is True
assert (
CustomFortranTarget.class_get_field(FortranExtensions, union_membership=UnionMembership({}))
is CustomFortranExtensions
)
# Ensure that subclasses not defined on a target are not accepted. This allows us to, for
# example, filter every target with `PythonSources` (or a subclass) and to ignore targets with
# only `SourcesField`.
normal_tgt = FortranTarget({}, Address("", target_name="normal"))
assert normal_tgt.has_field(FortranExtensions) is True
assert normal_tgt.has_field(CustomFortranExtensions) is False
assert custom_tgt[FortranExtensions] == custom_tgt[CustomFortranExtensions]
assert custom_tgt[FortranExtensions].value == (
"FortranExt1",
*CustomFortranExtensions.default_extensions,
)
# Check custom default value
assert (
CustomFortranTarget({}, Address("", target_name="default"))[FortranExtensions].value
== CustomFortranExtensions.default_extensions
)
# Custom validation
with pytest.raises(InvalidFieldException) as exc:
CustomFortranTarget(
{FortranExtensions.alias: CustomFortranExtensions.banned_extensions},
Address("", target_name="invalid"),
)
assert str(list(CustomFortranExtensions.banned_extensions)) in str(exc)
assert "//:invalid" in str(exc)
def test_required_field() -> None:
class RequiredField(StringField):
alias = "field"
required = True
class RequiredTarget(Target):
alias = "required_target"
core_fields = (RequiredField,)
address = Address("", target_name="lib")
# No errors when defined
RequiredTarget({"field": "present"}, address)
with pytest.raises(RequiredFieldMissingException) as exc:
RequiredTarget({}, address)
assert str(address) in str(exc.value)
assert "field" in str(exc.value)
def test_async_field_mixin() -> None:
class ExampleField(IntField, AsyncFieldMixin):
alias = "field"
default = 10
addr = Address("", target_name="tgt")
field = ExampleField(None, addr)
assert field.value == 10
assert field.address == addr
ExampleField.mro() # Regression test that the mro is resolvable.
# Ensure equality and __hash__ work correctly.
other = ExampleField(None, addr)
assert field == other
assert hash(field) == hash(other)
other = ExampleField(25, addr)
assert field != other
assert hash(field) != hash(other)
# Whereas normally the address is not considered, it is considered for async fields.
other = ExampleField(None, Address("", target_name="other"))
assert field != other
assert hash(field) != hash(other)
# Ensure it's still frozen.
with pytest.raises(FrozenInstanceError):
field.y = "foo" # type: ignore[attr-defined]
# Ensure that subclasses are not equal.
class Subclass(ExampleField):
pass
subclass = Subclass(None, addr)
assert field != subclass
assert hash(field) != hash(subclass)
def test_target_validate() -> None:
with pytest.raises(InvalidTargetException):
FortranTarget({FortranVersion.alias: "bad"}, Address("", target_name="t"))
def test_target_residence_dir() -> None:
assert FortranTarget({}, Address("some_dir/subdir")).residence_dir == "some_dir/subdir"
assert (
FortranTarget({}, Address("some_dir/subdir"), residence_dir="another_dir").residence_dir
== "another_dir"
)
# -----------------------------------------------------------------------------------------------
# Test CoarsenedTarget
# -----------------------------------------------------------------------------------------------
def test_coarsened_target_equality() -> None:
a, b = (FortranTarget({}, Address(name)) for name in string.ascii_lowercase[:2])
def ct(members: List[Target], dependencies: List[CoarsenedTarget] = []):
return CoarsenedTarget(members, dependencies)
assert ct([]) == ct([])
assert ct([a]) == ct([a])
assert ct([a]) != ct([b])
# Unique instances.
assert ct([], [ct([a])]) == ct([], [ct([a])])
assert ct([], [ct([a])]) != ct([], [ct([b])])
# Create two root CTs (with unique `id`s), which contain some reused instances.
def nested():
ct_a = ct([a])
return ct([], [ct_a, ct([], [ct_a])])
assert id(nested()) != id(nested())
assert nested() == nested()
# -----------------------------------------------------------------------------------------------
# Test file-level target generation
# -----------------------------------------------------------------------------------------------
def test_generated_targets_address_validation() -> None:
"""Ensure that all addresses are well formed."""
class MockTarget(Target):
alias = "tgt"
core_fields = ()
generator = MockTarget({}, Address("dir", target_name="generator"))
with pytest.raises(InvalidGeneratedTargetException):
GeneratedTargets(
generator,
[
MockTarget(
{}, Address("a_different_dir", target_name="generator", generated_name="gen")
)
],
)
with pytest.raises(InvalidGeneratedTargetException):
GeneratedTargets(
generator,
[
MockTarget(
{}, Address("dir", target_name="a_different_generator", generated_name="gen")
)
],
)
with pytest.raises(InvalidGeneratedTargetException):
GeneratedTargets(
generator,
[
MockTarget(
{},
Address(
"dir",
target_name="a_different_generator",
generated_name=None,
relative_file_path=None,
),
)
],
)
# These are fine.
GeneratedTargets(
generator,
[
MockTarget({}, Address("dir", target_name="generator", generated_name="gen")),
MockTarget({}, Address("dir", target_name="generator", relative_file_path="gen")),
],
)
# -----------------------------------------------------------------------------------------------
# Test FieldSet. Also see engine/internals/graph_test.py.
# -----------------------------------------------------------------------------------------------
def test_field_set() -> None:
class RequiredField(StringField):
alias = "required_field"
default = "default"
class OptionalField(StringField):
alias = "optional_field"
default = "default"
class OptOutField(BoolField):
alias = "opt_out_field"
default = False
class TargetWithRequired(Target):
alias = "tgt_w_required"
# It has the required field registered, but not the optional field.
core_fields = (RequiredField,)
class TargetWithoutRequired(Target):
alias = "tgt_wo_required"
# It has the optional field registered, but not the required field.
core_fields = (OptionalField,)
class NoFieldsTarget(Target):
alias = "no_fields_tgt"
core_fields = ()
class OptOutTarget(Target):
alias = "opt_out_tgt"
core_fields = (RequiredField, OptOutField)
@dataclass(frozen=True)
class RequiredFieldSet(FieldSet):
required_fields = (RequiredField,)
required: RequiredField
optional: OptionalField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(OptOutField).value is True
@dataclass(frozen=True)
class OptionalFieldSet(FieldSet):
required_fields = ()
optional: OptionalField
@classmethod
def opt_out(cls, tgt: Target) -> bool:
return tgt.get(OptOutField).value is True
required_addr = Address("", target_name="required")
required_tgt = TargetWithRequired({RequiredField.alias: "configured"}, required_addr)
optional_addr = Address("", target_name="unrelated")
optional_tgt = TargetWithoutRequired({OptionalField.alias: "configured"}, optional_addr)
no_fields_addr = Address("", target_name="no_fields")
no_fields_tgt = NoFieldsTarget({}, no_fields_addr)
opt_out_addr = Address("", target_name="conditional")
opt_out_tgt = OptOutTarget(
{RequiredField.alias: "configured", OptOutField.alias: True}, opt_out_addr
)
assert RequiredFieldSet.is_applicable(required_tgt) is True
for tgt in [optional_tgt, no_fields_tgt, opt_out_tgt]:
assert RequiredFieldSet.is_applicable(tgt) is False
# When no fields are required, every target is applicable _unless_ it has been opted out of.
for tgt in [required_tgt, optional_tgt, no_fields_tgt]:
assert OptionalFieldSet.is_applicable(tgt) is True
assert OptionalFieldSet.is_applicable(opt_out_tgt) is False
required_fs = RequiredFieldSet.create(required_tgt)
assert required_fs.address == required_addr
assert required_fs.required.value == "configured"
assert required_fs.optional.value == OptionalField.default
assert isinstance(required_fs.required_fields, tuple)
with pytest.raises(KeyError):
RequiredFieldSet.create(optional_tgt)
# It is possible to create a target that should be opted out of; the caller must call
# `.is_applicable()` first.
opt_out_fs = RequiredFieldSet.create(opt_out_tgt)
assert opt_out_fs.address == opt_out_addr
assert opt_out_fs.required.value == "configured"
assert opt_out_fs.optional.value == OptionalField.default
assert isinstance(required_fs.required_fields, tuple)
assert OptionalFieldSet.create(optional_tgt).optional.value == "configured"
assert OptionalFieldSet.create(no_fields_tgt).optional.value == OptionalField.default
# -----------------------------------------------------------------------------------------------
# Test Field templates
# -----------------------------------------------------------------------------------------------
def test_scalar_field() -> None:
@dataclass(frozen=True)
class CustomObject:
pass
class Example(ScalarField):
alias = "example"
expected_type = CustomObject
expected_type_description = "a `CustomObject` instance"
@classmethod
def compute_value(
cls, raw_value: Optional[CustomObject], address: Address
) -> Optional[CustomObject]:
return super().compute_value(raw_value, address)
addr = Address("", target_name="example")
with pytest.raises(InvalidFieldTypeException) as exc:
Example(1, addr)
assert Example.expected_type_description in str(exc.value)
assert Example(CustomObject(), addr).value == CustomObject()
assert Example(None, addr).value is None
def test_string_field_valid_choices() -> None:
class GivenStrings(StringField):
alias = "example"
valid_choices = ("kale", "spinach")
class LeafyGreens(Enum):
KALE = "kale"
SPINACH = "spinach"
class GivenEnum(StringField):
alias = "example"
valid_choices = LeafyGreens
default = LeafyGreens.KALE.value
addr = Address("", target_name="example")
assert GivenStrings("spinach", addr).value == "spinach"
assert GivenEnum("spinach", addr).value == "spinach"
assert GivenStrings(None, addr).value is None
assert GivenEnum(None, addr).value == "kale"
with pytest.raises(InvalidFieldChoiceException):
GivenStrings("carrot", addr)
with pytest.raises(InvalidFieldChoiceException):
GivenEnum("carrot", addr)
@pytest.mark.parametrize("field_cls", [IntField, FloatField])
def test_int_float_fields_valid_numbers(field_cls: type) -> None:
class AllNums(field_cls): # type: ignore[valid-type,misc]
alias = "all_nums"
valid_numbers = ValidNumbers.all
class PositiveAndZero(field_cls): # type: ignore[valid-type,misc]
alias = "positive_and_zero"
valid_numbers = ValidNumbers.positive_and_zero
class PositiveOnly(field_cls): # type: ignore[valid-type,misc]
alias = "positive_only"
valid_numbers = ValidNumbers.positive_only
addr = Address("nums")
neg = -1 if issubclass(field_cls, IntField) else -1.0
zero = 0 if issubclass(field_cls, IntField) else 0.0
pos = 1 if issubclass(field_cls, IntField) else 1.0
assert AllNums(neg, addr).value == neg
assert AllNums(zero, addr).value == zero
assert AllNums(pos, addr).value == pos
with pytest.raises(InvalidFieldException):
PositiveAndZero(neg, addr)
assert PositiveAndZero(zero, addr).value == zero
assert PositiveAndZero(pos, addr).value == pos
with pytest.raises(InvalidFieldException):
PositiveOnly(neg, addr)
with pytest.raises(InvalidFieldException):
PositiveOnly(zero, addr)
assert PositiveOnly(pos, addr).value == pos
def test_sequence_field() -> None:
@dataclass(frozen=True)
class CustomObject:
pass
class Example(SequenceField):
alias = "example"
expected_element_type = CustomObject
expected_type_description = "an iterable of `CustomObject` instances"
@classmethod
def compute_value(
cls, raw_value: Optional[Iterable[CustomObject]], address: Address
) -> Optional[Tuple[CustomObject, ...]]:
return super().compute_value(raw_value, address)
addr = Address("", target_name="example")
def assert_flexible_constructor(raw_value: Iterable[CustomObject]) -> None:
assert Example(raw_value, addr).value == tuple(raw_value)
assert_flexible_constructor([CustomObject(), CustomObject()])
assert_flexible_constructor((CustomObject(), CustomObject()))
assert_flexible_constructor(OrderedSet([CustomObject(), CustomObject()]))
# Must be given a sequence, not a single element.
with pytest.raises(InvalidFieldTypeException) as exc:
Example(CustomObject(), addr)
assert Example.expected_type_description in str(exc.value)
# All elements must be the expected type.
with pytest.raises(InvalidFieldTypeException):
Example([CustomObject(), 1, CustomObject()], addr)
def test_string_sequence_field() -> None:
class Example(StringSequenceField):
alias = "example"
addr = Address("", target_name="example")
assert Example(["hello", "world"], addr).value == ("hello", "world")
assert Example(None, addr).value is None
with pytest.raises(InvalidFieldTypeException):
Example("strings are technically iterable...", addr)
with pytest.raises(InvalidFieldTypeException):
Example(["hello", 0, "world"], addr)
def test_string_sequence_field_valid_choices() -> None:
class GivenStrings(StringSequenceField):
alias = "example"
valid_choices = ("arugula", "kale", "spinach")
class LeafyGreens(Enum):
ARUGULA = "arugula"
KALE = "kale"
SPINACH = "spinach"
class GivenEnum(StringSequenceField):
alias = "example"
valid_choices = LeafyGreens
default = (LeafyGreens.KALE.value,)
addr = Address("", target_name="example")
assert GivenStrings(["arugula", "spinach"], addr).value == ("arugula", "spinach")
assert GivenEnum(["arugula", "spinach"], addr).value == ("arugula", "spinach")
assert GivenStrings(None, addr).value is None
assert GivenEnum(None, addr).value == ("kale",)
with pytest.raises(InvalidFieldChoiceException):
GivenStrings(["carrot"], addr)
with pytest.raises(InvalidFieldChoiceException):
GivenEnum(["carrot"], addr)
def test_dict_string_to_string_field() -> None:
class Example(DictStringToStringField):
alias = "example"
addr = Address("", target_name="example")
assert Example(None, addr).value is None
assert Example({}, addr).value == FrozenDict()
assert Example({"hello": "world"}, addr).value == FrozenDict({"hello": "world"})
def assert_invalid_type(raw_value: Any) -> None:
with pytest.raises(InvalidFieldTypeException):
Example(raw_value, addr)
for v in [0, object(), "hello", ["hello"], {"hello": 0}, {0: "world"}]:
assert_invalid_type(v)
# Regression test that a default can be set.
class ExampleDefault(DictStringToStringField):
alias = "example"
# Note that we use `FrozenDict` so that the object can be hashable.
default = FrozenDict({"default": "val"})
assert ExampleDefault(None, addr).value == FrozenDict({"default": "val"})
def test_nested_dict_string_to_string_field() -> None:
class Example(NestedDictStringToStringField):
alias = "example"
addr = Address("", target_name="example")
assert Example(None, address=addr).value is None
assert Example({}, address=addr).value == FrozenDict()
assert Example({"greeting": {"hello": "world"}}, address=addr).value == FrozenDict(
{"greeting": FrozenDict({"hello": "world"})}
)
def assert_invalid_type(raw_value: Any) -> None:
with pytest.raises(InvalidFieldTypeException):
Example(raw_value, address=addr)
for v in [
0,
object(),
"hello",
["hello"],
["hello", "world"],
{"hello": 0},
{0: "world"},
{"hello": "world"},
]:
assert_invalid_type(v)
# Regression test that a default can be set.
class ExampleDefault(NestedDictStringToStringField):
alias = "example"
# Note that we use `FrozenDict` so that the object can be hashable.
default = FrozenDict({"nest": FrozenDict({"default": "val"})})
assert ExampleDefault(None, address=addr).value == FrozenDict(
{"nest": FrozenDict({"default": "val"})}
)
def test_dict_string_to_string_sequence_field() -> None:
class Example(DictStringToStringSequenceField):
alias = "example"
addr = Address("", target_name="example")
def assert_flexible_constructor(raw_value: Dict[str, Iterable[str]]) -> None:
assert Example(raw_value, addr).value == FrozenDict(
{k: tuple(v) for k, v in raw_value.items()}
)
for v in [("hello", "world"), ["hello", "world"], OrderedSet(["hello", "world"])]:
assert_flexible_constructor({"greeting": v})
def assert_invalid_type(raw_value: Any) -> None:
with pytest.raises(InvalidFieldTypeException):
Example(raw_value, addr)
for v in [ # type: ignore[assignment]
0,
object(),
"hello",
["hello"],
{"hello": "world"},
{0: ["world"]},
]:
assert_invalid_type(v)
# Regression test that a default can be set.
class ExampleDefault(DictStringToStringSequenceField):
alias = "example"
# Note that we use `FrozenDict` so that the object can be hashable.
default = FrozenDict({"default": ("val",)})
assert ExampleDefault(None, addr).value == FrozenDict({"default": ("val",)})
# -----------------------------------------------------------------------------------------------
# Test `SourcesField` helper functions
# -----------------------------------------------------------------------------------------------
def test_targets_with_sources_types() -> None:
class Sources1(MultipleSourcesField):
pass
class Sources2(SingleSourceField):
pass
class CodegenSources(MultipleSourcesField):
pass
class Tgt1(Target):
alias = "tgt1"
core_fields = (Sources1,)
class Tgt2(Target):
alias = "tgt2"
core_fields = (Sources2,)
class CodegenTgt(Target):
alias = "codegen_tgt"
core_fields = (CodegenSources,)
class GenSources(GenerateSourcesRequest):
input = CodegenSources
output = Sources1
tgt1 = Tgt1({}, Address("tgt1"))
tgt2 = Tgt2({SingleSourceField.alias: "foo.ext"}, Address("tgt2"))
codegen_tgt = CodegenTgt({}, Address("codegen_tgt"))
result = targets_with_sources_types(
[Sources1],
[tgt1, tgt2, codegen_tgt],
union_membership=UnionMembership({GenerateSourcesRequest: [GenSources]}),
)
assert set(result) == {tgt1, codegen_tgt}
result = targets_with_sources_types(
[Sources2],
[tgt1, tgt2, codegen_tgt],
union_membership=UnionMembership({GenerateSourcesRequest: [GenSources]}),
)
assert set(result) == {tgt2}
SKIP = object()
expected_path_globs = namedtuple(
"expected_path_globs",
["globs", "glob_match_error_behavior", "conjunction", "description_of_origin"],
defaults=(SKIP, SKIP, SKIP, SKIP),
)
@pytest.mark.parametrize(
"default_value, field_value, expected",
[
pytest.param(
None,
None,
expected_path_globs(globs=()),
id="empty",
),
pytest.param(
["*"],
None,
expected_path_globs(
globs=("test/*",),
glob_match_error_behavior=GlobMatchErrorBehavior.ignore,
conjunction=GlobExpansionConjunction.any_match,
description_of_origin=None,
),
id="default ignores glob match error",
),
pytest.param(
["*"],
["a", "b"],
expected_path_globs(
globs=(
"test/a",
"test/b",
),
glob_match_error_behavior=GlobMatchErrorBehavior.warn,
conjunction=GlobExpansionConjunction.all_match,
description_of_origin="test:test's `sources` field",
),
id="provided value warns on glob match error",
),
],
)
def test_multiple_sources_path_globs(
default_value: Any, field_value: Any, expected: expected_path_globs
) -> None:
class TestMultipleSourcesField(MultipleSourcesField):
default = default_value
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
sources = TestMultipleSourcesField(field_value, Address("test"))
actual = sources.path_globs(UnmatchedBuildFileGlobs.warn)
for attr, expect in zip(expected._fields, expected):
if expect is not SKIP:
assert getattr(actual, attr) == expect
@pytest.mark.parametrize(
"default_value, field_value, expected",
[
pytest.param(
None,
None,
expected_path_globs(globs=()),
id="empty",
),
pytest.param(
"file",
None,
expected_path_globs(
globs=("test/file",),
glob_match_error_behavior=GlobMatchErrorBehavior.ignore,
conjunction=GlobExpansionConjunction.any_match,
description_of_origin=None,
),
id="default ignores glob match error",
),
pytest.param(
"default_file",
"other_file",
expected_path_globs(
globs=("test/other_file",),
glob_match_error_behavior=GlobMatchErrorBehavior.warn,
conjunction=GlobExpansionConjunction.all_match,
description_of_origin="test:test's `source` field",
),
id="provided value warns on glob match error",
),
pytest.param(
"file",
"life",
expected_path_globs(
globs=("test/life",),
glob_match_error_behavior=GlobMatchErrorBehavior.warn,
conjunction=GlobExpansionConjunction.all_match,
description_of_origin="test:test's `source` field",
),
id="default glob conjunction",
),
],
)
def test_single_source_path_globs(
default_value: Any, field_value: Any, expected: expected_path_globs
) -> None:
class TestSingleSourceField(SingleSourceField):
default = default_value
default_glob_match_error_behavior = GlobMatchErrorBehavior.ignore
required = False
sources = TestSingleSourceField(field_value, Address("test"))
actual = sources.path_globs(UnmatchedBuildFileGlobs.warn)
for attr, expect in zip(expected._fields, expected):
if expect is not SKIP:
assert getattr(actual, attr) == expect
def test_single_source_file_path() -> None:
class TestSingleSourceField(OptionalSingleSourceField):
pass
assert TestSingleSourceField(None, Address("project")).file_path is None
assert TestSingleSourceField("f.ext", Address("project")).file_path == "project/f.ext"
def test_sources_fields_ban_parent_dir_pattern() -> None:
with pytest.raises(InvalidFieldException):
SingleSourceField("../f.ext", Address("project"))
with pytest.raises(InvalidFieldException):
SingleSourceField("dir/../f.ext", Address("project"))
with pytest.raises(InvalidFieldException):
MultipleSourcesField(["../f.ext", "f.ext"], Address("project"))
with pytest.raises(InvalidFieldException):
MultipleSourcesField(["dir/../f.ext", "f.ext"], Address("project"))
def test_single_source_field_bans_globs() -> None:
with pytest.raises(InvalidFieldException):
SingleSourceField("*.ext", Address("project"))
with pytest.raises(InvalidFieldException):
SingleSourceField("!f.ext", Address("project"))
def test_multiple_sources_field_ban_subdirs() -> None:
class TestSources(MultipleSourcesField):
ban_subdirectories = True
assert TestSources(["f.ext"], Address("project")).value == ("f.ext",)
with pytest.raises(InvalidFieldException):
TestSources(["**"], Address("project"))
with pytest.raises(InvalidFieldException):
TestSources(["dir/f.ext"], Address("project"))
# -----------------------------------------------------------------------------------------------
# Test `ExplicitlyProvidedDependencies` helper functions
# -----------------------------------------------------------------------------------------------
def test_explicitly_provided_dependencies_any_are_covered_by_includes() -> None:
addr = Address("", target_name="a")
generated_addr = Address("", target_name="b", generated_name="gen")
epd = ExplicitlyProvidedDependencies(
Address("", target_name="input_tgt"),
includes=FrozenOrderedSet([addr, generated_addr]),
ignores=FrozenOrderedSet(),
)
assert epd.any_are_covered_by_includes(()) is False
assert epd.any_are_covered_by_includes((addr,)) is True
assert epd.any_are_covered_by_includes((generated_addr,)) is True
assert epd.any_are_covered_by_includes((addr, generated_addr)) is True
# Generated targets are covered if their original target generator is in the includes.
assert (
epd.any_are_covered_by_includes((Address("", target_name="a", generated_name="gen"),))
is True
)
assert epd.any_are_covered_by_includes((Address("", target_name="x"),)) is False
assert (
epd.any_are_covered_by_includes((Address("", target_name="x", generated_name="gen"),))
is False
)
# Ensure we check for _any_, not _all_.
assert epd.any_are_covered_by_includes((Address("", target_name="x"), addr)) is True
def test_explicitly_provided_dependencies_remaining_after_disambiguation() -> None:
# First check disambiguation via ignores (`!` and `!!`).
addr = Address("", target_name="a")
generated_addr = Address("", target_name="b", generated_name="gen")
epd = ExplicitlyProvidedDependencies(
Address("", target_name="input_tgt"),
includes=FrozenOrderedSet(),
ignores=FrozenOrderedSet([addr, generated_addr]),
)
def assert_disambiguated_via_ignores(ambiguous: List[Address], expected: Set[Address]) -> None:
assert (
epd.remaining_after_disambiguation(tuple(ambiguous), owners_must_be_ancestors=False)
== expected
)
assert_disambiguated_via_ignores([], set())
assert_disambiguated_via_ignores([addr], set())
assert_disambiguated_via_ignores([generated_addr], set())
assert_disambiguated_via_ignores([addr, generated_addr], set())
# Generated targets are covered if their original target generator is in the ignores.
assert_disambiguated_via_ignores([Address("", target_name="a", generated_name="gen")], set())
bad_tgt = Address("", target_name="x")
bad_generated_tgt = Address("", target_name="x", generated_name="gen")
assert_disambiguated_via_ignores([bad_tgt], {bad_tgt})
assert_disambiguated_via_ignores([bad_generated_tgt], {bad_generated_tgt})
assert_disambiguated_via_ignores([bad_generated_tgt, addr, generated_addr], {bad_generated_tgt})
# Check disambiguation via `owners_must_be_ancestors`.
epd = ExplicitlyProvidedDependencies(
Address("src/lang/project"), FrozenOrderedSet(), FrozenOrderedSet()
)
valid_candidates = {
Address("src/lang/project", target_name="another_tgt"),
Address("src/lang"),
Address("src"),
Address("", target_name="root_owner"),
}
invalid_candidates = {
Address("tests/lang"),
Address("src/another_lang"),
Address("src/lang/another_project"),
Address("src/lang/project/subdir"),
}
assert (
epd.remaining_after_disambiguation(
(*valid_candidates, *invalid_candidates), owners_must_be_ancestors=True
)
== valid_candidates
)
def test_explicitly_provided_dependencies_disambiguated() -> None:
def get_disambiguated(
ambiguous: List[Address],
*,
ignores: Optional[List[Address]] = None,
includes: Optional[List[Address]] = None,
owners_must_be_ancestors: bool = False,
) -> Optional[Address]:
epd = ExplicitlyProvidedDependencies(
address=Address("dir", target_name="input_tgt"),
includes=FrozenOrderedSet(includes or []),
ignores=FrozenOrderedSet(ignores or []),
)
return epd.disambiguated(
tuple(ambiguous), owners_must_be_ancestors=owners_must_be_ancestors
)
# A mix of normal and generated addresses.
addr_a = Address("dir", target_name="a", generated_name="gen")
addr_b = Address("dir", target_name="b", generated_name="gen")
addr_c = Address("dir", target_name="c")
all_addr = [addr_a, addr_b, addr_c]
# If 1 target remains, it's disambiguated. Note that ignores can be normal or generated targets.
assert get_disambiguated(all_addr, ignores=[addr_b, addr_c]) == addr_a
assert (
get_disambiguated(all_addr, ignores=[addr_b.maybe_convert_to_target_generator(), addr_c])
== addr_a
)
assert get_disambiguated(all_addr, ignores=[addr_a]) is None
assert get_disambiguated(all_addr, ignores=[addr_a.maybe_convert_to_target_generator()]) is None
assert get_disambiguated(all_addr, ignores=all_addr) is None
assert get_disambiguated([]) is None
# If any includes would disambiguate the ambiguous target, we don't consider disambiguating
# via excludes as the user has already explicitly disambiguated the module.
assert get_disambiguated(all_addr, ignores=[addr_a, addr_b], includes=[addr_a]) is None
assert (
get_disambiguated(
ambiguous=all_addr,
ignores=[addr_a, addr_b],
includes=[addr_a.maybe_convert_to_target_generator()],
)
is None
)
# You can also disambiguate via `owners_must_be_ancestors`.
another_dir = Address("another_dir")
assert get_disambiguated([addr_a, another_dir], owners_must_be_ancestors=True) == addr_a
assert get_disambiguated([addr_a, another_dir], owners_must_be_ancestors=False) is None
assert (
get_disambiguated(
[addr_a, addr_b, another_dir], ignores=[addr_b], owners_must_be_ancestors=True
)
== addr_a
)
def test_explicitly_provided_dependencies_maybe_warn_of_ambiguous_dependency_inference(
caplog,
) -> None:
def maybe_warn(
ambiguous: List[Address],
*,
ignores: Optional[List[Address]] = None,
includes: Optional[List[Address]] = None,
owners_must_be_ancestors: bool = False,
) -> None:
caplog.clear()
epd = ExplicitlyProvidedDependencies(
Address("dir", target_name="input_tgt"),
includes=FrozenOrderedSet(includes or []),
ignores=FrozenOrderedSet(ignores or []),
)
epd.maybe_warn_of_ambiguous_dependency_inference(
tuple(ambiguous),
Address("some_dir"),
import_reference="file",
context="foo",
owners_must_be_ancestors=owners_must_be_ancestors,
)
maybe_warn([])
assert not caplog.records
# A mix of normal and generated addresses.
addr_a = Address("dir", target_name="a", generated_name="gen")
addr_b = Address("dir", target_name="b", generated_name="gen")
addr_c = Address("dir", target_name="c")
all_addr = [addr_a, addr_b, addr_c]
maybe_warn(all_addr)
assert len(caplog.records) == 1
assert f"['{addr_a}', '{addr_b}', '{addr_c}']" in caplog.text
# Ignored addresses do not show up in the list of ambiguous owners, including for ignores of
# both file and BUILD targets.
maybe_warn(all_addr, ignores=[addr_b])
assert len(caplog.records) == 1
assert f"['{addr_a}', '{addr_c}']" in caplog.text
maybe_warn(all_addr, ignores=[addr_b.maybe_convert_to_target_generator()])
assert len(caplog.records) == 1
assert f"['{addr_a}', '{addr_c}']" in caplog.text
# Disambiguating via ignores turns off the warning, including for ignores of both normal and
# generated targets.
maybe_warn(all_addr, ignores=[addr_a, addr_b])
assert not caplog.records
maybe_warn(
all_addr,
ignores=[
addr_a.maybe_convert_to_target_generator(),
addr_b.maybe_convert_to_target_generator(),
],
)
assert not caplog.records
# Including a target turns off the warning, including for includes of both normal and generated
# targets.
maybe_warn(all_addr, includes=[addr_a])
assert not caplog.records
maybe_warn(all_addr, includes=[addr_a.maybe_convert_to_target_generator()])
assert not caplog.records
# You can also disambiguate via `owners_must_be_ancestors`.
another_dir = Address("another_dir")
maybe_warn([addr_a, another_dir], owners_must_be_ancestors=True)
assert not caplog.records
maybe_warn([addr_a, another_dir], owners_must_be_ancestors=False)
assert len(caplog.records) == 1
assert f"['{another_dir}', '{addr_a}']" in caplog.text
maybe_warn([addr_a, addr_b, another_dir], ignores=[addr_b], owners_must_be_ancestors=True)
assert not caplog.records
# -----------------------------------------------------------------------------------------------
# Test `overrides` field
# -----------------------------------------------------------------------------------------------
@pytest.mark.parametrize(
"raw_value",
[
0,
object(),
"hello",
["hello"],
["hello", "world"],
{"hello": 0},
{0: "world"},
{"hello": "world"},
{("hello",): "world"},
{("hello",): ["world"]},
{(0,): {"field": "value"}},
{("hello",): {0: "value"}},
],
)
def test_overrides_field_data_validation(raw_value: Any) -> None:
with pytest.raises(InvalidFieldTypeException):
OverridesField(raw_value, Address("", target_name="example"))
def test_overrides_field_normalization() -> None:
addr = Address("", target_name="example")
assert OverridesField(None, addr).value is None
assert OverridesField({}, addr).value == {}
# Note that `list_field` is not hashable. We have to override `__hash__` for this to work.
tgt1_override = {"str_field": "value", "list_field": [0, 1, 3]}
tgt2_override = {"int_field": 0, "dict_field": {"a": 0}}
# Convert a `str` key to `tuple[str, ...]`.
field = OverridesField({"tgt1": tgt1_override, ("tgt1", "tgt2"): tgt2_override}, addr)
assert field.value == {("tgt1",): tgt1_override, ("tgt1", "tgt2"): tgt2_override}
with no_exception():
hash(field)
path_field = OverridesField(
{"foo.ext": tgt1_override, ("foo.ext", "bar*.ext"): tgt2_override}, Address("dir")
)
globs = OverridesField.to_path_globs(
Address("dir"), path_field.flatten(), UnmatchedBuildFileGlobs.error
)
assert [path_globs.globs for path_globs in globs] == [
("dir/foo.ext",),
("dir/bar*.ext",),
]
assert OverridesField.flatten_paths(
addr,
[
(paths, globs, cast(Dict[str, Any], overrides))
for (paths, overrides), globs in zip(
[
(Paths(("dir/foo.ext",), ()), tgt1_override),
(Paths(("dir/bar1.ext", "dir/bar2.ext"), ()), tgt2_override),
],
globs,
)
],
) == {
"dir/foo.ext": tgt1_override,
"dir/bar1.ext": tgt2_override,
"dir/bar2.ext": tgt2_override,
}
assert path_field.flatten() == {
"foo.ext": {**tgt2_override, **tgt1_override},
"bar*.ext": tgt2_override,
}
with pytest.raises(InvalidFieldException):
# Same field is overridden for the same file multiple times, which is an error.
OverridesField.flatten_paths(
addr,
[
(Paths(("dir/foo.ext",), ()), PathGlobs([]), tgt1_override),
(Paths(("dir/foo.ext", "dir/bar.ext"), ()), PathGlobs([]), tgt1_override),
],
)
# -----------------------------------------------------------------------------------------------
# Test utility functions
# -----------------------------------------------------------------------------------------------
@pytest.mark.parametrize(
"shard_spec,expected",
(
("0/4", (0, 4)),
("1/4", (1, 4)),
("2/4", (2, 4)),
("3/4", (3, 4)),
("0/2", (0, 2)),
("1/2", (1, 2)),
("0/1", (0, 1)),
),
)
def test_parse_shard_spec_good(shard_spec, expected) -> None:
assert parse_shard_spec(shard_spec) == expected
@pytest.mark.parametrize("shard_spec", ("0/0", "1/1", "4/4", "5/4", "-1/4", "foo/4"))
def test_parse_shard_spec_bad(shard_spec) -> None:
with pytest.raises(ValueError):
parse_shard_spec(shard_spec)
def test_get_shard() -> None:
assert get_shard("foo/bar/1", 2) == 0
assert get_shard("foo/bar/4", 2) == 1
def test_generate_file_based_overrides_field_help_message() -> None:
# Just test the Example: part looks right
message = generate_file_based_overrides_field_help_message(
"alias",
"""
overrides={
"bar.proto": {"description": "our user model"]},
("foo.proto", "bar.proto"): {"tags": ["overridden"]},
}
""",
)
assert "example:\n overrides={\n" in message
assert '\n "bar.proto"' in message
assert "\n }\n\nFile" in message
|
{
"content_hash": "a29ef86535d7920ebf8c0ff92a604690",
"timestamp": "",
"source": "github",
"line_count": 1478,
"max_line_length": 100,
"avg_line_length": 35.38362652232747,
"alnum_prop": 0.6151213262711054,
"repo_name": "benjyw/pants",
"id": "395b20bbc53f8cda9acd3df7b1d84cf7caba089a",
"size": "52429",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/python/pants/engine/target_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "688"
},
{
"name": "Go",
"bytes": "67315"
},
{
"name": "Java",
"bytes": "10690"
},
{
"name": "Kotlin",
"bytes": "6433"
},
{
"name": "Mustache",
"bytes": "3595"
},
{
"name": "Python",
"bytes": "7135320"
},
{
"name": "Rust",
"bytes": "1601736"
},
{
"name": "Scala",
"bytes": "21950"
},
{
"name": "Shell",
"bytes": "31723"
},
{
"name": "Starlark",
"bytes": "72809"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from logging import warning
from traceback import print_exc
from traceback import print_stack
from traceback import extract_stack
from dramatis.actor.name.interface import Interface as _Interface
class PropertyProxy(object):
def __init__(self,attr,actor,options):
super(PropertyProxy,self).__setattr__("_attr",attr)
super(PropertyProxy,self).__setattr__("_actor",actor)
super(PropertyProxy,self).__setattr__("_options",options)
def __get__(self,obj,type):
actor = super(PropertyProxy,self).__getattribute__("_actor")
attr = super(PropertyProxy,self).__getattribute__("_attr")
options = super(PropertyProxy,self).__getattribute__("_options")
return actor.object_send( "__getattribute__", (attr,), None, options )
return obj.__getattribute__(self._name)
def __call__(self,*args,**kwds):
actor = super(PropertyProxy,self).__getattribute__("_actor")
attr = super(PropertyProxy,self).__getattribute__("_attr")
options = super(PropertyProxy,self).__getattribute__("_options")
return actor.object_send( attr, args, kwds, options )
class FunctionProxy(object):
def __init__(self,attr,actor,options):
super(FunctionProxy,self).__setattr__("_attr",attr)
super(FunctionProxy,self).__setattr__("_actor",actor)
super(FunctionProxy,self).__setattr__("_options",options)
def __call__(self,*args,**kwds):
actor = super(FunctionProxy,self).__getattribute__("_actor")
attr = super(FunctionProxy,self).__getattribute__("_attr")
options = super(FunctionProxy,self).__getattribute__("_options")
return actor.object_send( attr, args, kwds, options )
_instmeth = type( FunctionProxy.__call__ )
def _func(): pass
_func = type(_func)
class Name(object):
"""Proxy objects for actors
dramatis.Actor.Names are proxy objects for actors. When a method
is called on an actor name, the dramatis runtime creates and
schedules an actor task to be run on the actors (virtual) thread.
dramatis.Actor.Name has no user-callable methods except for the
implicitly forwarded methods). Other actor name operations are available
through the dramatis.Actor.Name.Interface object, accessible via
dramatis.interface."""
def __init__(self,actor):
super(Name,self).__setattr__("_actor",actor)
super(Name,self).__setattr__("_options",{"continuation":"rpc"})
def __call__(self,*args,**kwds):
return self.__getattribute__("__call__")(*args,**kwds)
def __lshift__(self,*args,**kwds):
return self.__getattribute__("__lshift__")(*args,**kwds)
def __getattribute__(self,attr):
# logging.warning(FunctionProxy)
a = super(Name,self).__getattribute__("_actor")
o = super(Name,self).__getattribute__("_options")
t = _func
if o.has_key( "continuation_send" ):
return FunctionProxy(attr,a,o)
if a._behavior == None:
return FunctionProxy(attr,a,o)
# warning( "a: " + str(a) )
# warning( "list: " + str( ( a._behavior, ) + a._behavior.__class__.__mro__ ) )
for out in ( a._behavior, ) + a._behavior.__class__.__mro__:
# print
# print repr(out), out.__dict__
d = None
try:
d = out.__dict__
except AttributeError: pass
# raise AttributeError( "'" + str(out.__class__.__name__) + "' object has no attribute '" + attr + "'" )
desc = None
if ( d ):
desc = d.get( attr )
if ( desc ):
# print repr(self), "x", repr(attr), type(desc)
if ( type(desc) == property ):
return PropertyProxy(attr,a,o).__get__(o,type(o))
elif ( type(desc) == _func ) or \
( type(desc) == _instmeth ):
return FunctionProxy(attr,a,o)
elif ( str(type(desc)) == "<type 'wrapper_descriptor'>" ):
return FunctionProxy(attr,a,o)
# Honestly, I have not idea ...
elif ( str(type(desc)) == "<type 'getset_descriptor'>" ):
return FunctionProxy(attr,a,o)
else:
print_stack()
warning( "hell: type? " + str( type(desc) ) )
raise Exception( "hell: type? " + str( type(desc) ) )
# The attribute is not defined (at this time)
# The only choice seems to be to assume it's a function
return FunctionProxy(attr,a,o)
Interface = _Interface
|
{
"content_hash": "c33865514bb17e292e9feb2b8a418552",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 116,
"avg_line_length": 41.732142857142854,
"alnum_prop": 0.5770218228498074,
"repo_name": "dramatis/dramatis",
"id": "aee804963e19dc2a24a8f4ac19a0ac4af48827a3",
"size": "4674",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/dramatis/actor/name/name.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Erlang",
"bytes": "23101"
},
{
"name": "Python",
"bytes": "160347"
},
{
"name": "Ruby",
"bytes": "195991"
}
],
"symlink_target": ""
}
|
__author__ = 'kbroughton'
from torpedo.dialects import registry
registry.register("vertica", "torpedo_vertica.pyodbc", "Vertica_pyodbc")
registry.register("access.pyodbc", "torpedo_vertica.pyodbc", "Vertica_pyodbc")
from sqlalchemy.testing.plugin.pytestplugin import *
|
{
"content_hash": "2f5a16655ca08ffb9d67745b5698f8d8",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 78,
"avg_line_length": 33.875,
"alnum_prop": 0.7785977859778598,
"repo_name": "darKoram/torpedo",
"id": "b88b72ebb8411bc1f5af7398df492076d92d98ef",
"size": "271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "torpedo/dialects/torpedo-vertica/tests/conftest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25840"
},
{
"name": "Shell",
"bytes": "256"
}
],
"symlink_target": ""
}
|
import copy
from optparse import make_option
import sys
from django.core.management.base import LabelCommand
from django.conf import settings
from auditcare.models import AuditEvent
from corehq.apps.users.models import CommCareUser
from corehq.pillows.user import UserPillow
from couchforms.models import XFormInstance
BASE_QUERY = "is_superuser: true"
QUERY_DICT = {
"query": {
"filtered": {
"query": {
"query_string": {
"query": ""
}
}
}
}
}
def get_query(query_string, start=0, size=25):
"""
Running a query_string based ES query.
Use Lucene query syntax:
See: http://www.elasticsearch.org/guide/reference/query-dsl/query-string-query/
and http://www.lucenetutorial.com/lucene-query-syntax.html
See the UserPillow mapping definition specifically for not_analyzed querying and dual indexed fields.
"""
ret = copy.deepcopy(QUERY_DICT)
ret['query']['filtered']['query']['query_string']['query'] = query_string
ret['from'] = start
ret['size'] = size
return ret
def get_query_results(es, query_string, start=0, size=100):
def get_results(query_string, st, sz):
return es.post("_search", data=get_query(query_string, start=st, size=sz))
search_results = get_results(query_string, start, size)
print "\n\tTotal users in query: %s" % search_results['hits']['total']
yielded = 0
while yielded < search_results['hits']['total']:
for res in search_results['hits']['hits']:
if '_source' in res:
yield res['_source']
yielded += 1
new_start = yielded
search_results = get_results(query_string, new_start, size)
class Command(LabelCommand):
help = "Run a continuous replication of administrative users for your staging environment"
args = "cancel"
label = ""
option_list = LabelCommand.option_list + \
(
make_option('--query',
action='store',
dest='query_string',
default=None,
help="Lucene query to hit the Users ES Index, bound with quotes"),
make_option('--makeitso',
action='store_true',
dest='makeitso',
default=False,
help="Actually start replication"),
)
def handle(self, *args, **options):
source_uri = getattr(settings, 'PRODUCTION_COUCHDB_URI', None)
target_uri = XFormInstance.get_db().uri
if source_uri is None:
print "\n\tNo production URI to replicate from, we're done here.\n"
print "\n\tNo settings.PRODUCTION_COUCHDB_URI has been set\n"
sys.exit()
input_query = options['query_string']
if not input_query:
print "\tRunning default query for user.is_superuser"
query_string = BASE_QUERY
else:
query_string = input_query
print "\n\tRunning user query: %s" % query_string
user_pillow = UserPillow()
user_es = user_pillow.get_es()
doc_ids = [res['_id'] for res in get_query_results(user_es, query_string)]
do_replicate = options['makeitso']
repl_params = {
'doc_ids': doc_ids
}
if 'cancel' in args:
repl_params['cancel'] = True
print "\n\tSending a replication cancel notification to server"
else:
print "\n\tStarting staging replication from prod"
if do_replicate:
server = CommCareUser.get_db().server
server.replicate(source_uri, target_uri, **repl_params)
AuditEvent.audit_command()
else:
print "\n\tReplication dry run with params: %s" % repl_params
|
{
"content_hash": "570c53aeba51dd9e0201229680cdac82",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 105,
"avg_line_length": 33.06611570247934,
"alnum_prop": 0.5656085978505374,
"repo_name": "puttarajubr/commcare-hq",
"id": "7833bc88319d17925a242cdef345abf21a674101",
"size": "4001",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "corehq/apps/hqadmin/management/commands/staging_replicate_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "581878"
},
{
"name": "HTML",
"bytes": "2790361"
},
{
"name": "JavaScript",
"bytes": "2572023"
},
{
"name": "Makefile",
"bytes": "3999"
},
{
"name": "Python",
"bytes": "11275678"
},
{
"name": "Shell",
"bytes": "23890"
}
],
"symlink_target": ""
}
|
from __future__ import annotations
import math
import sys
from iotbx.reflection_file_reader import any_reflection_file
from scitbx.array_family import flex
import xia2.Handlers.Streams
from xia2.Toolkit.NPP import npp_ify
def npp(hklin):
reader = any_reflection_file(hklin)
intensities = [
ma
for ma in reader.as_miller_arrays(merge_equivalents=False)
if ma.info().labels == ["I", "SIGI"]
][0]
indices = intensities.indices()
# merging: use external variance i.e. variances derived from SIGI column
merger = intensities.merge_equivalents(use_internal_variance=False)
mult = merger.redundancies().data()
imean = merger.array()
unique = imean.indices()
iobs = imean.data()
# scale up variance to account for sqrt(multiplicity) effective scaling
variobs = (imean.sigmas() ** 2) * mult.as_double()
all = flex.double()
cen = flex.double()
for hkl, i, v, m in zip(unique, iobs, variobs, mult):
# only consider if meaningful number of observations
if m < 3:
continue
sel = indices == hkl
data = intensities.select(sel).data()
assert m == len(data)
_x, _y = npp_ify(data, input_mean_variance=(i, v))
# perform linreg on (i) all data and (ii) subset between +/- 2 sigma
sel = flex.abs(_x) < 2
_x_ = _x.select(sel)
_y_ = _y.select(sel)
fit_all = flex.linear_regression(_x, _y)
fit_cen = flex.linear_regression(_x_, _y_)
all.append(fit_all.slope())
cen.append(fit_cen.slope())
print(
"%3d %3d %3d" % hkl,
f"{i:.2f} {v:.2f} {i / math.sqrt(v):.2f}",
f"{fit_all.slope():.2f} {fit_cen.slope():.2f}",
"%d" % m,
)
sys.stderr.write(
"Mean gradients: %.2f %.2f\n"
% (flex.sum(all) / all.size(), flex.sum(cen) / cen.size())
)
def run():
xia2.Handlers.Streams.setup_logging(
logfile="xia2.npp.txt", debugfile="xia2.npp-debug.txt"
)
npp(sys.argv[1])
|
{
"content_hash": "3657a437c6aeb368b3f0310d80da3ef7",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 76,
"avg_line_length": 27.157894736842106,
"alnum_prop": 0.5833333333333334,
"repo_name": "xia2/xia2",
"id": "3e2bd7c6a3b553681aa9a7783c4466544f818f3d",
"size": "2064",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/xia2/cli/npp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3958"
},
{
"name": "HTML",
"bytes": "35830"
},
{
"name": "Python",
"bytes": "1857098"
}
],
"symlink_target": ""
}
|
"""HTML5 Push Messaging notification service."""
from datetime import datetime, timedelta
from functools import partial
from urllib.parse import urlparse
import json
import logging
import time
import uuid
from aiohttp.hdrs import AUTHORIZATION
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.components import websocket_api
from homeassistant.components.frontend import add_manifest_json_key
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import (
HTTP_BAD_REQUEST,
HTTP_INTERNAL_SERVER_ERROR,
HTTP_UNAUTHORIZED,
URL_ROOT,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.util import ensure_unique_string
from homeassistant.util.json import load_json, save_json
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
DOMAIN,
PLATFORM_SCHEMA,
BaseNotificationService,
)
_LOGGER = logging.getLogger(__name__)
REGISTRATIONS_FILE = "html5_push_registrations.conf"
SERVICE_DISMISS = "html5_dismiss"
ATTR_GCM_SENDER_ID = "gcm_sender_id"
ATTR_GCM_API_KEY = "gcm_api_key"
ATTR_VAPID_PUB_KEY = "vapid_pub_key"
ATTR_VAPID_PRV_KEY = "vapid_prv_key"
ATTR_VAPID_EMAIL = "vapid_email"
def gcm_api_deprecated(value):
"""Warn user that GCM API config is deprecated."""
if value:
_LOGGER.warning(
"Configuring html5_push_notifications via the GCM api"
" has been deprecated and will stop working after April 11,"
" 2019. Use the VAPID configuration instead. For instructions,"
" see https://www.home-assistant.io/components/notify.html5/"
)
return value
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(ATTR_GCM_SENDER_ID): vol.All(cv.string, gcm_api_deprecated),
vol.Optional(ATTR_GCM_API_KEY): cv.string,
vol.Optional(ATTR_VAPID_PUB_KEY): cv.string,
vol.Optional(ATTR_VAPID_PRV_KEY): cv.string,
vol.Optional(ATTR_VAPID_EMAIL): cv.string,
}
)
ATTR_SUBSCRIPTION = "subscription"
ATTR_BROWSER = "browser"
ATTR_NAME = "name"
ATTR_ENDPOINT = "endpoint"
ATTR_KEYS = "keys"
ATTR_AUTH = "auth"
ATTR_P256DH = "p256dh"
ATTR_EXPIRATIONTIME = "expirationTime"
ATTR_TAG = "tag"
ATTR_ACTION = "action"
ATTR_ACTIONS = "actions"
ATTR_TYPE = "type"
ATTR_URL = "url"
ATTR_DISMISS = "dismiss"
ATTR_PRIORITY = "priority"
DEFAULT_PRIORITY = "normal"
ATTR_TTL = "ttl"
DEFAULT_TTL = 86400
ATTR_JWT = "jwt"
WS_TYPE_APPKEY = "notify/html5/appkey"
SCHEMA_WS_APPKEY = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_APPKEY}
)
# The number of days after the moment a notification is sent that a JWT
# is valid.
JWT_VALID_DAYS = 7
KEYS_SCHEMA = vol.All(
dict,
vol.Schema(
{vol.Required(ATTR_AUTH): cv.string, vol.Required(ATTR_P256DH): cv.string}
),
)
SUBSCRIPTION_SCHEMA = vol.All(
dict,
vol.Schema(
{
# pylint: disable=no-value-for-parameter
vol.Required(ATTR_ENDPOINT): vol.Url(),
vol.Required(ATTR_KEYS): KEYS_SCHEMA,
vol.Optional(ATTR_EXPIRATIONTIME): vol.Any(None, cv.positive_int),
}
),
)
DISMISS_SERVICE_SCHEMA = vol.Schema(
{
vol.Optional(ATTR_TARGET): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ATTR_DATA): dict,
}
)
REGISTER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_SUBSCRIPTION): SUBSCRIPTION_SCHEMA,
vol.Required(ATTR_BROWSER): vol.In(["chrome", "firefox"]),
vol.Optional(ATTR_NAME): cv.string,
}
)
CALLBACK_EVENT_PAYLOAD_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TAG): cv.string,
vol.Required(ATTR_TYPE): vol.In(["received", "clicked", "closed"]),
vol.Required(ATTR_TARGET): cv.string,
vol.Optional(ATTR_ACTION): cv.string,
vol.Optional(ATTR_DATA): dict,
}
)
NOTIFY_CALLBACK_EVENT = "html5_notification"
# Badge and timestamp are Chrome specific (not in official spec)
HTML5_SHOWNOTIFICATION_PARAMETERS = (
"actions",
"badge",
"body",
"dir",
"icon",
"image",
"lang",
"renotify",
"requireInteraction",
"tag",
"timestamp",
"vibrate",
)
def get_service(hass, config, discovery_info=None):
"""Get the HTML5 push notification service."""
json_path = hass.config.path(REGISTRATIONS_FILE)
registrations = _load_config(json_path)
if registrations is None:
return None
vapid_pub_key = config.get(ATTR_VAPID_PUB_KEY)
vapid_prv_key = config.get(ATTR_VAPID_PRV_KEY)
vapid_email = config.get(ATTR_VAPID_EMAIL)
def websocket_appkey(hass, connection, msg):
connection.send_message(websocket_api.result_message(msg["id"], vapid_pub_key))
hass.components.websocket_api.async_register_command(
WS_TYPE_APPKEY, websocket_appkey, SCHEMA_WS_APPKEY
)
hass.http.register_view(HTML5PushRegistrationView(registrations, json_path))
hass.http.register_view(HTML5PushCallbackView(registrations))
gcm_api_key = config.get(ATTR_GCM_API_KEY)
gcm_sender_id = config.get(ATTR_GCM_SENDER_ID)
if gcm_sender_id is not None:
add_manifest_json_key(ATTR_GCM_SENDER_ID, config.get(ATTR_GCM_SENDER_ID))
return HTML5NotificationService(
hass, gcm_api_key, vapid_prv_key, vapid_email, registrations, json_path
)
def _load_config(filename):
"""Load configuration."""
try:
return load_json(filename)
except HomeAssistantError:
pass
return {}
class HTML5PushRegistrationView(HomeAssistantView):
"""Accepts push registrations from a browser."""
url = "/api/notify.html5"
name = "api:notify.html5"
def __init__(self, registrations, json_path):
"""Init HTML5PushRegistrationView."""
self.registrations = registrations
self.json_path = json_path
async def post(self, request):
"""Accept the POST request for push registrations from a browser."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
try:
data = REGISTER_SCHEMA(data)
except vol.Invalid as ex:
return self.json_message(humanize_error(data, ex), HTTP_BAD_REQUEST)
devname = data.get(ATTR_NAME)
data.pop(ATTR_NAME, None)
name = self.find_registration_name(data, devname)
previous_registration = self.registrations.get(name)
self.registrations[name] = data
try:
hass = request.app["hass"]
await hass.async_add_job(save_json, self.json_path, self.registrations)
return self.json_message("Push notification subscriber registered.")
except HomeAssistantError:
if previous_registration is not None:
self.registrations[name] = previous_registration
else:
self.registrations.pop(name)
return self.json_message(
"Error saving registration.", HTTP_INTERNAL_SERVER_ERROR
)
def find_registration_name(self, data, suggested=None):
"""Find a registration name matching data or generate a unique one."""
endpoint = data.get(ATTR_SUBSCRIPTION).get(ATTR_ENDPOINT)
for key, registration in self.registrations.items():
subscription = registration.get(ATTR_SUBSCRIPTION)
if subscription.get(ATTR_ENDPOINT) == endpoint:
return key
return ensure_unique_string(suggested or "unnamed device", self.registrations)
async def delete(self, request):
"""Delete a registration."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
subscription = data.get(ATTR_SUBSCRIPTION)
found = None
for key, registration in self.registrations.items():
if registration.get(ATTR_SUBSCRIPTION) == subscription:
found = key
break
if not found:
# If not found, unregistering was already done. Return 200
return self.json_message("Registration not found.")
reg = self.registrations.pop(found)
try:
hass = request.app["hass"]
await hass.async_add_job(save_json, self.json_path, self.registrations)
except HomeAssistantError:
self.registrations[found] = reg
return self.json_message(
"Error saving registration.", HTTP_INTERNAL_SERVER_ERROR
)
return self.json_message("Push notification subscriber unregistered.")
class HTML5PushCallbackView(HomeAssistantView):
"""Accepts push registrations from a browser."""
requires_auth = False
url = "/api/notify.html5/callback"
name = "api:notify.html5/callback"
def __init__(self, registrations):
"""Init HTML5PushCallbackView."""
self.registrations = registrations
def decode_jwt(self, token):
"""Find the registration that signed this JWT and return it."""
import jwt
# 1. Check claims w/o verifying to see if a target is in there.
# 2. If target in claims, attempt to verify against the given name.
# 2a. If decode is successful, return the payload.
# 2b. If decode is unsuccessful, return a 401.
target_check = jwt.decode(token, verify=False)
if target_check.get(ATTR_TARGET) in self.registrations:
possible_target = self.registrations[target_check[ATTR_TARGET]]
key = possible_target[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH]
try:
return jwt.decode(token, key, algorithms=["ES256", "HS256"])
except jwt.exceptions.DecodeError:
pass
return self.json_message(
"No target found in JWT", status_code=HTTP_UNAUTHORIZED
)
# The following is based on code from Auth0
# https://auth0.com/docs/quickstart/backend/python
def check_authorization_header(self, request):
"""Check the authorization header."""
import jwt
auth = request.headers.get(AUTHORIZATION, None)
if not auth:
return self.json_message(
"Authorization header is expected", status_code=HTTP_UNAUTHORIZED
)
parts = auth.split()
if parts[0].lower() != "bearer":
return self.json_message(
"Authorization header must " "start with Bearer",
status_code=HTTP_UNAUTHORIZED,
)
if len(parts) != 2:
return self.json_message(
"Authorization header must " "be Bearer token",
status_code=HTTP_UNAUTHORIZED,
)
token = parts[1]
try:
payload = self.decode_jwt(token)
except jwt.exceptions.InvalidTokenError:
return self.json_message("token is invalid", status_code=HTTP_UNAUTHORIZED)
return payload
async def post(self, request):
"""Accept the POST request for push registrations event callback."""
auth_check = self.check_authorization_header(request)
if not isinstance(auth_check, dict):
return auth_check
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
event_payload = {
ATTR_TAG: data.get(ATTR_TAG),
ATTR_TYPE: data[ATTR_TYPE],
ATTR_TARGET: auth_check[ATTR_TARGET],
}
if data.get(ATTR_ACTION) is not None:
event_payload[ATTR_ACTION] = data.get(ATTR_ACTION)
if data.get(ATTR_DATA) is not None:
event_payload[ATTR_DATA] = data.get(ATTR_DATA)
try:
event_payload = CALLBACK_EVENT_PAYLOAD_SCHEMA(event_payload)
except vol.Invalid as ex:
_LOGGER.warning(
"Callback event payload is not valid: %s",
humanize_error(event_payload, ex),
)
event_name = "{}.{}".format(NOTIFY_CALLBACK_EVENT, event_payload[ATTR_TYPE])
request.app["hass"].bus.fire(event_name, event_payload)
return self.json({"status": "ok", "event": event_payload[ATTR_TYPE]})
class HTML5NotificationService(BaseNotificationService):
"""Implement the notification service for HTML5."""
def __init__(self, hass, gcm_key, vapid_prv, vapid_email, registrations, json_path):
"""Initialize the service."""
self._gcm_key = gcm_key
self._vapid_prv = vapid_prv
self._vapid_email = vapid_email
self.registrations = registrations
self.registrations_json_path = json_path
async def async_dismiss_message(service):
"""Handle dismissing notification message service calls."""
kwargs = {}
if self.targets is not None:
kwargs[ATTR_TARGET] = self.targets
elif service.data.get(ATTR_TARGET) is not None:
kwargs[ATTR_TARGET] = service.data.get(ATTR_TARGET)
kwargs[ATTR_DATA] = service.data.get(ATTR_DATA)
await self.async_dismiss(**kwargs)
hass.services.async_register(
DOMAIN,
SERVICE_DISMISS,
async_dismiss_message,
schema=DISMISS_SERVICE_SCHEMA,
)
@property
def targets(self):
"""Return a dictionary of registered targets."""
targets = {}
for registration in self.registrations:
targets[registration] = registration
return targets
def dismiss(self, **kwargs):
"""Dismisses a notification."""
data = kwargs.get(ATTR_DATA)
tag = data.get(ATTR_TAG) if data else ""
payload = {ATTR_TAG: tag, ATTR_DISMISS: True, ATTR_DATA: {}}
self._push_message(payload, **kwargs)
async def async_dismiss(self, **kwargs):
"""Dismisses a notification.
This method must be run in the event loop.
"""
await self.hass.async_add_executor_job(partial(self.dismiss, **kwargs))
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
tag = str(uuid.uuid4())
payload = {
"badge": "/static/images/notification-badge.png",
"body": message,
ATTR_DATA: {},
"icon": "/static/icons/favicon-192x192.png",
ATTR_TAG: tag,
ATTR_TITLE: kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
}
data = kwargs.get(ATTR_DATA)
if data:
# Pick out fields that should go into the notification directly vs
# into the notification data dictionary.
data_tmp = {}
for key, val in data.items():
if key in HTML5_SHOWNOTIFICATION_PARAMETERS:
payload[key] = val
else:
data_tmp[key] = val
payload[ATTR_DATA] = data_tmp
if (
payload[ATTR_DATA].get(ATTR_URL) is None
and payload.get(ATTR_ACTIONS) is None
):
payload[ATTR_DATA][ATTR_URL] = URL_ROOT
self._push_message(payload, **kwargs)
def _push_message(self, payload, **kwargs):
"""Send the message."""
from pywebpush import WebPusher
timestamp = int(time.time())
ttl = int(kwargs.get(ATTR_TTL, DEFAULT_TTL))
priority = kwargs.get(ATTR_PRIORITY, DEFAULT_PRIORITY)
if priority not in ["normal", "high"]:
priority = DEFAULT_PRIORITY
payload["timestamp"] = timestamp * 1000 # Javascript ms since epoch
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = self.registrations.keys()
for target in list(targets):
info = self.registrations.get(target)
try:
info = REGISTER_SCHEMA(info)
except vol.Invalid:
_LOGGER.error(
"%s is not a valid HTML5 push notification" " target", target
)
continue
payload[ATTR_DATA][ATTR_JWT] = add_jwt(
timestamp,
target,
payload[ATTR_TAG],
info[ATTR_SUBSCRIPTION][ATTR_KEYS][ATTR_AUTH],
)
webpusher = WebPusher(info[ATTR_SUBSCRIPTION])
if self._vapid_prv and self._vapid_email:
vapid_headers = create_vapid_headers(
self._vapid_email, info[ATTR_SUBSCRIPTION], self._vapid_prv
)
vapid_headers.update({"urgency": priority, "priority": priority})
response = webpusher.send(
data=json.dumps(payload), headers=vapid_headers, ttl=ttl
)
else:
# Only pass the gcm key if we're actually using GCM
# If we don't, notifications break on FireFox
gcm_key = (
self._gcm_key
if "googleapis.com" in info[ATTR_SUBSCRIPTION][ATTR_ENDPOINT]
else None
)
response = webpusher.send(json.dumps(payload), gcm_key=gcm_key, ttl=ttl)
if response.status_code == 410:
_LOGGER.info("Notification channel has expired")
reg = self.registrations.pop(target)
if not save_json(self.registrations_json_path, self.registrations):
self.registrations[target] = reg
_LOGGER.error("Error saving registration")
else:
_LOGGER.info("Configuration saved")
def add_jwt(timestamp, target, tag, jwt_secret):
"""Create JWT json to put into payload."""
import jwt
jwt_exp = datetime.fromtimestamp(timestamp) + timedelta(days=JWT_VALID_DAYS)
jwt_claims = {
"exp": jwt_exp,
"nbf": timestamp,
"iat": timestamp,
ATTR_TARGET: target,
ATTR_TAG: tag,
}
return jwt.encode(jwt_claims, jwt_secret).decode("utf-8")
def create_vapid_headers(vapid_email, subscription_info, vapid_private_key):
"""Create encrypted headers to send to WebPusher."""
from py_vapid import Vapid
if vapid_email and vapid_private_key and ATTR_ENDPOINT in subscription_info:
url = urlparse(subscription_info.get(ATTR_ENDPOINT))
vapid_claims = {
"sub": f"mailto:{vapid_email}",
"aud": f"{url.scheme}://{url.netloc}",
}
vapid = Vapid.from_string(private_key=vapid_private_key)
return vapid.sign(vapid_claims)
return None
|
{
"content_hash": "ccd87599a96052a0c0892f837bf29bfd",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 88,
"avg_line_length": 32.6038062283737,
"alnum_prop": 0.6118333775537278,
"repo_name": "Cinntax/home-assistant",
"id": "ac76911b9f63ea9a20546b715c285eead92d3190",
"size": "18845",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/html5/notify.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
}
|
import cv2
import numpy as np
from keypoint_preprocess import get_affine_transform
from PIL import Image
def decode_image(im_file, im_info):
"""read rgb image
Args:
im_file (str|np.ndarray): input can be image path or np.ndarray
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
if isinstance(im_file, str):
with open(im_file, 'rb') as f:
im_read = f.read()
data = np.frombuffer(im_read, dtype='uint8')
im = cv2.imdecode(data, 1) # BGR mode, but need RGB mode
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
else:
im = im_file
im_info['im_shape'] = np.array(im.shape[:2], dtype=np.float32)
im_info['scale_factor'] = np.array([1., 1.], dtype=np.float32)
return im, im_info
class Resize_Mult32(object):
"""resize image by target_size and max_size
Args:
target_size (int): the target size of image
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): method of resize
"""
def __init__(self, limit_side_len, limit_type, interp=cv2.INTER_LINEAR):
self.limit_side_len = limit_side_len
self.limit_type = limit_type
self.interp = interp
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im_channel = im.shape[2]
im_scale_y, im_scale_x = self.generate_scale(im)
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
im_info['scale_factor'] = np.array(
[im_scale_y, im_scale_x]).astype('float32')
return im, im_info
def generate_scale(self, img):
"""
Args:
img (np.ndarray): image (np.ndarray)
Returns:
im_scale_x: the resize ratio of X
im_scale_y: the resize ratio of Y
"""
limit_side_len = self.limit_side_len
h, w, c = img.shape
# limit the max side
if self.limit_type == 'max':
if h > w:
ratio = float(limit_side_len) / h
else:
ratio = float(limit_side_len) / w
elif self.limit_type == 'min':
if h < w:
ratio = float(limit_side_len) / h
else:
ratio = float(limit_side_len) / w
elif self.limit_type == 'resize_long':
ratio = float(limit_side_len) / max(h, w)
else:
raise Exception('not support limit type, image ')
resize_h = int(h * ratio)
resize_w = int(w * ratio)
resize_h = max(int(round(resize_h / 32) * 32), 32)
resize_w = max(int(round(resize_w / 32) * 32), 32)
im_scale_y = resize_h / float(h)
im_scale_x = resize_w / float(w)
return im_scale_y, im_scale_x
class Resize(object):
"""resize image by target_size and max_size
Args:
target_size (int): the target size of image
keep_ratio (bool): whether keep_ratio or not, default true
interp (int): method of resize
"""
def __init__(self, target_size, keep_ratio=True, interp=cv2.INTER_LINEAR):
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
self.keep_ratio = keep_ratio
self.interp = interp
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
im_channel = im.shape[2]
im_scale_y, im_scale_x = self.generate_scale(im)
im = cv2.resize(
im,
None,
None,
fx=im_scale_x,
fy=im_scale_y,
interpolation=self.interp)
im_info['im_shape'] = np.array(im.shape[:2]).astype('float32')
im_info['scale_factor'] = np.array(
[im_scale_y, im_scale_x]).astype('float32')
return im, im_info
def generate_scale(self, im):
"""
Args:
im (np.ndarray): image (np.ndarray)
Returns:
im_scale_x: the resize ratio of X
im_scale_y: the resize ratio of Y
"""
origin_shape = im.shape[:2]
im_c = im.shape[2]
if self.keep_ratio:
im_size_min = np.min(origin_shape)
im_size_max = np.max(origin_shape)
target_size_min = np.min(self.target_size)
target_size_max = np.max(self.target_size)
im_scale = float(target_size_min) / float(im_size_min)
if np.round(im_scale * im_size_max) > target_size_max:
im_scale = float(target_size_max) / float(im_size_max)
im_scale_x = im_scale
im_scale_y = im_scale
else:
resize_h, resize_w = self.target_size
im_scale_y = resize_h / float(origin_shape[0])
im_scale_x = resize_w / float(origin_shape[1])
return im_scale_y, im_scale_x
class ShortSizeScale(object):
"""
Scale images by short size.
Args:
short_size(float | int): Short size of an image will be scaled to the short_size.
fixed_ratio(bool): Set whether to zoom according to a fixed ratio. default: True
do_round(bool): Whether to round up when calculating the zoom ratio. default: False
backend(str): Choose pillow or cv2 as the graphics processing backend. default: 'pillow'
"""
def __init__(self,
short_size,
fixed_ratio=True,
keep_ratio=None,
do_round=False,
backend='pillow'):
self.short_size = short_size
assert (fixed_ratio and not keep_ratio) or (
not fixed_ratio
), "fixed_ratio and keep_ratio cannot be true at the same time"
self.fixed_ratio = fixed_ratio
self.keep_ratio = keep_ratio
self.do_round = do_round
assert backend in [
'pillow', 'cv2'
], "Scale's backend must be pillow or cv2, but get {backend}"
self.backend = backend
def __call__(self, img):
"""
Performs resize operations.
Args:
img (PIL.Image): a PIL.Image.
return:
resized_img: a PIL.Image after scaling.
"""
result_img = None
if isinstance(img, np.ndarray):
h, w, _ = img.shape
elif isinstance(img, Image.Image):
w, h = img.size
else:
raise NotImplementedError
if w <= h:
ow = self.short_size
if self.fixed_ratio: # default is True
oh = int(self.short_size * 4.0 / 3.0)
elif not self.keep_ratio: # no
oh = self.short_size
else:
scale_factor = self.short_size / w
oh = int(h * float(scale_factor) +
0.5) if self.do_round else int(h * self.short_size /
w)
ow = int(w * float(scale_factor) +
0.5) if self.do_round else int(w * self.short_size /
h)
else:
oh = self.short_size
if self.fixed_ratio:
ow = int(self.short_size * 4.0 / 3.0)
elif not self.keep_ratio: # no
ow = self.short_size
else:
scale_factor = self.short_size / h
oh = int(h * float(scale_factor) +
0.5) if self.do_round else int(h * self.short_size /
w)
ow = int(w * float(scale_factor) +
0.5) if self.do_round else int(w * self.short_size /
h)
if type(img) == np.ndarray:
img = Image.fromarray(img, mode='RGB')
if self.backend == 'pillow':
result_img = img.resize((ow, oh), Image.BILINEAR)
elif self.backend == 'cv2' and (self.keep_ratio is not None):
result_img = cv2.resize(
img, (ow, oh), interpolation=cv2.INTER_LINEAR)
else:
result_img = Image.fromarray(
cv2.resize(
np.asarray(img), (ow, oh), interpolation=cv2.INTER_LINEAR))
return result_img
class NormalizeImage(object):
"""normalize image
Args:
mean (list): im - mean
std (list): im / std
is_scale (bool): whether need im / 255
norm_type (str): type in ['mean_std', 'none']
"""
def __init__(self, mean, std, is_scale=True, norm_type='mean_std'):
self.mean = mean
self.std = std
self.is_scale = is_scale
self.norm_type = norm_type
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.astype(np.float32, copy=False)
if self.is_scale:
scale = 1.0 / 255.0
im *= scale
if self.norm_type == 'mean_std':
mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
std = np.array(self.std)[np.newaxis, np.newaxis, :]
im -= mean
im /= std
return im, im_info
class Permute(object):
"""permute image
Args:
to_bgr (bool): whether convert RGB to BGR
channel_first (bool): whether convert HWC to CHW
"""
def __init__(self, ):
super(Permute, self).__init__()
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
im = im.transpose((2, 0, 1)).copy()
return im, im_info
class PadStride(object):
""" padding image for model with FPN, instead PadBatch(pad_to_stride) in original config
Args:
stride (bool): model with FPN need image shape % stride == 0
"""
def __init__(self, stride=0):
self.coarsest_stride = stride
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
coarsest_stride = self.coarsest_stride
if coarsest_stride <= 0:
return im, im_info
im_c, im_h, im_w = im.shape
pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
padding_im[:, :im_h, :im_w] = im
return padding_im, im_info
class LetterBoxResize(object):
def __init__(self, target_size):
"""
Resize image to target size, convert normalized xywh to pixel xyxy
format ([x_center, y_center, width, height] -> [x0, y0, x1, y1]).
Args:
target_size (int|list): image target size.
"""
super(LetterBoxResize, self).__init__()
if isinstance(target_size, int):
target_size = [target_size, target_size]
self.target_size = target_size
def letterbox(self, img, height, width, color=(127.5, 127.5, 127.5)):
# letterbox: resize a rectangular image to a padded rectangular
shape = img.shape[:2] # [height, width]
ratio_h = float(height) / shape[0]
ratio_w = float(width) / shape[1]
ratio = min(ratio_h, ratio_w)
new_shape = (round(shape[1] * ratio),
round(shape[0] * ratio)) # [width, height]
padw = (width - new_shape[0]) / 2
padh = (height - new_shape[1]) / 2
top, bottom = round(padh - 0.1), round(padh + 0.1)
left, right = round(padw - 0.1), round(padw + 0.1)
img = cv2.resize(
img, new_shape, interpolation=cv2.INTER_AREA) # resized, no border
img = cv2.copyMakeBorder(
img, top, bottom, left, right, cv2.BORDER_CONSTANT,
value=color) # padded rectangular
return img, ratio, padw, padh
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
assert len(self.target_size) == 2
assert self.target_size[0] > 0 and self.target_size[1] > 0
height, width = self.target_size
h, w = im.shape[:2]
im, ratio, padw, padh = self.letterbox(im, height=height, width=width)
new_shape = [round(h * ratio), round(w * ratio)]
im_info['im_shape'] = np.array(new_shape, dtype=np.float32)
im_info['scale_factor'] = np.array([ratio, ratio], dtype=np.float32)
return im, im_info
class Pad(object):
def __init__(self, size, fill_value=[114.0, 114.0, 114.0]):
"""
Pad image to a specified size.
Args:
size (list[int]): image target size
fill_value (list[float]): rgb value of pad area, default (114.0, 114.0, 114.0)
"""
super(Pad, self).__init__()
if isinstance(size, int):
size = [size, size]
self.size = size
self.fill_value = fill_value
def __call__(self, im, im_info):
im_h, im_w = im.shape[:2]
h, w = self.size
if h == im_h and w == im_w:
im = im.astype(np.float32)
return im, im_info
canvas = np.ones((h, w, 3), dtype=np.float32)
canvas *= np.array(self.fill_value, dtype=np.float32)
canvas[0:im_h, 0:im_w, :] = im.astype(np.float32)
im = canvas
return im, im_info
class WarpAffine(object):
"""Warp affine the image
"""
def __init__(self,
keep_res=False,
pad=31,
input_h=512,
input_w=512,
scale=0.4,
shift=0.1):
self.keep_res = keep_res
self.pad = pad
self.input_h = input_h
self.input_w = input_w
self.scale = scale
self.shift = shift
def __call__(self, im, im_info):
"""
Args:
im (np.ndarray): image (np.ndarray)
im_info (dict): info of image
Returns:
im (np.ndarray): processed image (np.ndarray)
im_info (dict): info of processed image
"""
img = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
h, w = img.shape[:2]
if self.keep_res:
input_h = (h | self.pad) + 1
input_w = (w | self.pad) + 1
s = np.array([input_w, input_h], dtype=np.float32)
c = np.array([w // 2, h // 2], dtype=np.float32)
else:
s = max(h, w) * 1.0
input_h, input_w = self.input_h, self.input_w
c = np.array([w / 2., h / 2.], dtype=np.float32)
trans_input = get_affine_transform(c, s, 0, [input_w, input_h])
img = cv2.resize(img, (w, h))
inp = cv2.warpAffine(
img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
return inp, im_info
def preprocess(im, preprocess_ops):
# process image by preprocess_ops
im_info = {
'scale_factor': np.array(
[1., 1.], dtype=np.float32),
'im_shape': None,
}
im, im_info = decode_image(im, im_info)
for operator in preprocess_ops:
im, im_info = operator(im, im_info)
return im, im_info
|
{
"content_hash": "03c44710cd66d3d5acf242bd5026fbaa",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 96,
"avg_line_length": 33.91463414634146,
"alnum_prop": 0.5174997003475967,
"repo_name": "PaddlePaddle/models",
"id": "e0373b92a5064c2a71a4dcebc67aeb72b27dd0eb",
"size": "17297",
"binary": false,
"copies": "2",
"ref": "refs/heads/release/2.3",
"path": "modelcenter/PP-Vehicle/APP/python/preprocess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "46835"
},
{
"name": "CMake",
"bytes": "8248"
},
{
"name": "Jupyter Notebook",
"bytes": "1720166"
},
{
"name": "Makefile",
"bytes": "2920"
},
{
"name": "Python",
"bytes": "3099697"
},
{
"name": "Shell",
"bytes": "70177"
}
],
"symlink_target": ""
}
|
"""
******** Models for test_data.py ***********
The following classes are for testing basic data marshalling, including
NULL values, where allowed.
The basic idea is to have a model for each Django data type.
"""
import uuid
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from .base import BaseModel
class BinaryData(models.Model):
data = models.BinaryField(null=True)
class BooleanData(models.Model):
data = models.BooleanField(default=False, null=True)
class CharData(models.Model):
data = models.CharField(max_length=30, null=True)
class DateData(models.Model):
data = models.DateField(null=True)
class DateTimeData(models.Model):
data = models.DateTimeField(null=True)
class DecimalData(models.Model):
data = models.DecimalField(null=True, decimal_places=3, max_digits=5)
class EmailData(models.Model):
data = models.EmailField(null=True)
class FileData(models.Model):
data = models.FileField(null=True)
class FilePathData(models.Model):
data = models.FilePathField(null=True)
class FloatData(models.Model):
data = models.FloatField(null=True)
class IntegerData(models.Model):
data = models.IntegerField(null=True)
class BigIntegerData(models.Model):
data = models.BigIntegerField(null=True)
# class ImageData(models.Model):
# data = models.ImageField(null=True)
class GenericIPAddressData(models.Model):
data = models.GenericIPAddressField(null=True)
class PositiveBigIntegerData(models.Model):
data = models.PositiveBigIntegerField(null=True)
class PositiveIntegerData(models.Model):
data = models.PositiveIntegerField(null=True)
class PositiveSmallIntegerData(models.Model):
data = models.PositiveSmallIntegerField(null=True)
class SlugData(models.Model):
data = models.SlugField(null=True)
class SmallData(models.Model):
data = models.SmallIntegerField(null=True)
class TextData(models.Model):
data = models.TextField(null=True)
class TimeData(models.Model):
data = models.TimeField(null=True)
class Tag(models.Model):
"""A tag on an item."""
data = models.SlugField()
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
ordering = ["data"]
class GenericData(models.Model):
data = models.CharField(max_length=30)
tags = GenericRelation(Tag)
# The following test classes are all for validation
# of related objects; in particular, forward, backward,
# and self references.
class Anchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(max_length=30)
class Meta:
ordering = ('id',)
class UniqueAnchor(models.Model):
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(unique=True, max_length=30)
class FKData(models.Model):
data = models.ForeignKey(Anchor, models.SET_NULL, null=True)
class M2MData(models.Model):
data = models.ManyToManyField(Anchor)
class O2OData(models.Model):
# One to one field can't be null here, since it is a PK.
data = models.OneToOneField(Anchor, models.CASCADE, primary_key=True)
class FKSelfData(models.Model):
data = models.ForeignKey('self', models.CASCADE, null=True)
class M2MSelfData(models.Model):
data = models.ManyToManyField('self', symmetrical=False)
class FKDataToField(models.Model):
data = models.ForeignKey(UniqueAnchor, models.SET_NULL, null=True, to_field='data')
class FKDataToO2O(models.Model):
data = models.ForeignKey(O2OData, models.SET_NULL, null=True)
class M2MIntermediateData(models.Model):
data = models.ManyToManyField(Anchor, through='Intermediate')
class Intermediate(models.Model):
left = models.ForeignKey(M2MIntermediateData, models.CASCADE)
right = models.ForeignKey(Anchor, models.CASCADE)
extra = models.CharField(max_length=30, blank=True, default="doesn't matter")
# The following test classes are for validating the
# deserialization of objects that use a user-defined
# field as the primary key.
# Some of these data types have been commented out
# because they can't be used as a primary key on one
# or all database backends.
class BooleanPKData(models.Model):
data = models.BooleanField(primary_key=True, default=False)
class CharPKData(models.Model):
data = models.CharField(max_length=30, primary_key=True)
class DatePKData(models.Model):
data = models.DateField(primary_key=True)
class DateTimePKData(models.Model):
data = models.DateTimeField(primary_key=True)
class DecimalPKData(models.Model):
data = models.DecimalField(primary_key=True, decimal_places=3, max_digits=5)
class EmailPKData(models.Model):
data = models.EmailField(primary_key=True)
# class FilePKData(models.Model):
# data = models.FileField(primary_key=True)
class FilePathPKData(models.Model):
data = models.FilePathField(primary_key=True)
class FloatPKData(models.Model):
data = models.FloatField(primary_key=True)
class IntegerPKData(models.Model):
data = models.IntegerField(primary_key=True)
# class ImagePKData(models.Model):
# data = models.ImageField(primary_key=True)
class GenericIPAddressPKData(models.Model):
data = models.GenericIPAddressField(primary_key=True)
class PositiveIntegerPKData(models.Model):
data = models.PositiveIntegerField(primary_key=True)
class PositiveSmallIntegerPKData(models.Model):
data = models.PositiveSmallIntegerField(primary_key=True)
class SlugPKData(models.Model):
data = models.SlugField(primary_key=True)
class SmallPKData(models.Model):
data = models.SmallIntegerField(primary_key=True)
# class TextPKData(models.Model):
# data = models.TextField(primary_key=True)
# class TimePKData(models.Model):
# data = models.TimeField(primary_key=True)
class UUIDData(models.Model):
data = models.UUIDField(primary_key=True)
class UUIDDefaultData(models.Model):
data = models.UUIDField(primary_key=True, default=uuid.uuid4)
class FKToUUID(models.Model):
data = models.ForeignKey(UUIDData, models.CASCADE)
# Tests for handling fields with pre_save functions, or
# models with save functions that modify data
class AutoNowDateTimeData(models.Model):
data = models.DateTimeField(null=True, auto_now=True)
class ModifyingSaveData(models.Model):
data = models.IntegerField(null=True)
def save(self, *args, **kwargs):
"""
A save method that modifies the data in the object.
A user-defined save() method isn't called when objects are deserialized
(#4459).
"""
self.data = 666
super().save(*args, **kwargs)
# Tests for serialization of models using inheritance.
# Regression for #7202, #7350
class AbstractBaseModel(models.Model):
parent_data = models.IntegerField()
class Meta:
abstract = True
class InheritAbstractModel(AbstractBaseModel):
child_data = models.IntegerField()
class InheritBaseModel(BaseModel):
child_data = models.IntegerField()
class ExplicitInheritBaseModel(BaseModel):
parent = models.OneToOneField(BaseModel, models.CASCADE, parent_link=True)
child_data = models.IntegerField()
class LengthModel(models.Model):
data = models.IntegerField()
def __len__(self):
return self.data
|
{
"content_hash": "618127a2fea42b2002e08e55f3c1ae49",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 87,
"avg_line_length": 24.11783439490446,
"alnum_prop": 0.7329988115674105,
"repo_name": "freakboy3742/django",
"id": "94c4a215fc2ab8afdc3aefd78cf2d0907705af2f",
"size": "7573",
"binary": false,
"copies": "11",
"ref": "refs/heads/main",
"path": "tests/serializers/models/data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "52958"
},
{
"name": "JavaScript",
"bytes": "102668"
},
{
"name": "Python",
"bytes": "9564866"
},
{
"name": "Shell",
"bytes": "12137"
}
],
"symlink_target": ""
}
|
import cgi
#import cgitb
#cgitb.enable()
#cgitb.enable(display=0, logdir='/tmp')
import socket
class OpenUnixSocketClient(object):
def __init__(self, host='localhost', portno=10888,
socketfile='/tmp/.record_roku_socket'):
self.sock = None
self.socketfile = None
self.host = host
self.portno = portno
if socketfile:
self.socketfile = socketfile
def __enter__(self):
stm_type = socket.SOCK_STREAM
if self.socketfile:
net_type = socket.AF_UNIX
addr_obj = self.socketfile
else:
net_type = socket.AF_INET
addr_obj = (self.host, self.portno)
self.sock = socket.socket(net_type, stm_type)
try:
err = self.sock.connect(addr_obj)
except socket.error:
return None
if err:
print(err)
return self.sock
def __exit__(self, exc_type, exc_value, traceback):
self.sock.close()
if exc_type or exc_value or traceback:
return False
else:
return True
def send_command(ostr, host='localhost', portno=10888,
socketfile='/tmp/.record_roku_socket'):
''' send string to specified socket '''
with OpenUnixSocketClient(host, portno, socketfile) as sock:
if not sock:
return 'Failed to open socket'
sock.send(b'%s\n' % ostr)
return sock.recv(1024)
def get_output(val, host='localhost', portno=10888,
socketfile='/tmp/.record_roku_socket'):
print "Content-Type: text/html\n\n\n"
ostr = send_command(val, host, portno, socketfile)
if ostr:
print ostr.replace('command w', '').replace('command', '')
if __name__ == '__main__':
form = cgi.FieldStorage()
if "cmd" not in form:
print "<H1>Error</H1>"
print "Please fill in the cmd field."
else:
get_output(form['cmd'].value)
|
{
"content_hash": "b5ebfa2cd6b57817c9b5f5577d95bb15",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 66,
"avg_line_length": 27.64788732394366,
"alnum_prop": 0.5700458481915436,
"repo_name": "ddboline/roku_app",
"id": "71e3140bff133c163ac1dc2e1900cd9622229603",
"size": "2005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roku_app/cgibin/control_roku.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "36"
},
{
"name": "HTML",
"bytes": "8222"
},
{
"name": "Python",
"bytes": "74843"
},
{
"name": "Shell",
"bytes": "1829"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0009_auto_20180607_1434'),
]
operations = [
migrations.AlterField(
model_name='blogindexpage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='blogpage',
name='page_ptr',
field=models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page'),
),
migrations.AlterField(
model_name='blogpagetag',
name='tag',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_blogpagetag_items', to='taggit.Tag'),
),
]
|
{
"content_hash": "1e5aedb1febebd4286824bf86ab974ec",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 179,
"avg_line_length": 38.25925925925926,
"alnum_prop": 0.6379477250726041,
"repo_name": "ilendl2/wagtail-cookiecutter-foundation",
"id": "df08730af35954e2e7a6dcbdb5be0242a9c5f40d",
"size": "1080",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "{{cookiecutter.project_slug}}/blog/migrations/0010_auto_20180607_1804.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5146"
},
{
"name": "CSS",
"bytes": "102069"
},
{
"name": "HTML",
"bytes": "105186"
},
{
"name": "JavaScript",
"bytes": "4205"
},
{
"name": "Makefile",
"bytes": "10743"
},
{
"name": "Python",
"bytes": "216524"
}
],
"symlink_target": ""
}
|
import numpy as np
import tensorflow as tf
from classification import metadata
from . import vessel_characterization, fishing_detection
class ModelsTest(tf.test.TestCase):
num_feature_dimensions = 11
model_classes = [vessel_characterization.Model, fishing_detection.Model]
def _build_estimator(self, model_class):
vmd = metadata.VesselMetadata({}, {})
model = model_class(self.num_feature_dimensions, vmd, metrics='all')
return model.make_estimator("dummy_directory")
def test_estimator_contruction(self):
for i, model_class in enumerate(self.model_classes):
with self.test_session():
# This protects against multiple model using same variable names
with tf.variable_scope("training-test-{}".format(i)):
est = self._build_estimator(model_class)
# TODO: test input_fn
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "c511a2304231349167af2d38a4d4f944",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 80,
"avg_line_length": 33.5,
"alnum_prop": 0.662046908315565,
"repo_name": "GlobalFishingWatch/vessel-classification",
"id": "c860c5619297217d292455c1217daf94559df389",
"size": "1533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "classification/models/models_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1054"
},
{
"name": "HTML",
"bytes": "94680"
},
{
"name": "Python",
"bytes": "325699"
},
{
"name": "Shell",
"bytes": "235"
}
],
"symlink_target": ""
}
|
"""
Checks that iterable metaclasses are recognized by pylint.
"""
# pylint: disable=missing-docstring,too-few-public-methods,no-init,no-self-use,unused-argument,bad-mcs-method-argument
# pylint: disable=wrong-import-position
# metaclasses as iterables
class Meta(type):
def __iter__(self):
return iter((1, 2, 3))
class SomeClass(metaclass=Meta):
pass
for i in SomeClass:
print(i)
for i in SomeClass(): # [not-an-iterable]
print(i)
import asyncio
@asyncio.coroutine
def coroutine_function_return_none():
return
@asyncio.coroutine
def coroutine_function_return_object():
return 12
@asyncio.coroutine
def coroutine_function_return_future():
return asyncio.Future()
@asyncio.coroutine
def coroutine_function_pass():
pass
@asyncio.coroutine
def coroutine_generator():
yield
@asyncio.coroutine
def main():
yield from coroutine_function_return_none()
yield from coroutine_function_return_object()
yield from coroutine_function_return_future()
yield from coroutine_function_pass()
yield from coroutine_generator()
|
{
"content_hash": "c5ce6cc9c09966fafeb3ba1db56f5b7a",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 118,
"avg_line_length": 19.8,
"alnum_prop": 0.7254361799816346,
"repo_name": "ruchee/vimrc",
"id": "34047eda0dfc971f7544a165ffd99470547c1789",
"size": "1089",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vimfiles/bundle/vim-python/submodules/pylint/tests/functional/i/iterable_context_py3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "22028"
},
{
"name": "Blade",
"bytes": "3314"
},
{
"name": "C#",
"bytes": "1734"
},
{
"name": "CSS",
"bytes": "31547"
},
{
"name": "Clojure",
"bytes": "47036"
},
{
"name": "CoffeeScript",
"bytes": "9274"
},
{
"name": "Common Lisp",
"bytes": "54314"
},
{
"name": "D",
"bytes": "11562"
},
{
"name": "Dockerfile",
"bytes": "7620"
},
{
"name": "Elixir",
"bytes": "41696"
},
{
"name": "Emacs Lisp",
"bytes": "10489"
},
{
"name": "Erlang",
"bytes": "137788"
},
{
"name": "F#",
"bytes": "2230"
},
{
"name": "Go",
"bytes": "54655"
},
{
"name": "HTML",
"bytes": "178954"
},
{
"name": "Haml",
"bytes": "39"
},
{
"name": "Haskell",
"bytes": "2031"
},
{
"name": "JavaScript",
"bytes": "9086"
},
{
"name": "Julia",
"bytes": "9540"
},
{
"name": "Kotlin",
"bytes": "8669"
},
{
"name": "Less",
"bytes": "327"
},
{
"name": "Makefile",
"bytes": "87500"
},
{
"name": "Mustache",
"bytes": "3375"
},
{
"name": "Nix",
"bytes": "1860"
},
{
"name": "PHP",
"bytes": "9238"
},
{
"name": "PLpgSQL",
"bytes": "33747"
},
{
"name": "Perl",
"bytes": "84200"
},
{
"name": "PostScript",
"bytes": "3891"
},
{
"name": "Python",
"bytes": "7366233"
},
{
"name": "Racket",
"bytes": "1150"
},
{
"name": "Raku",
"bytes": "21146"
},
{
"name": "Ruby",
"bytes": "133344"
},
{
"name": "SCSS",
"bytes": "327"
},
{
"name": "Sass",
"bytes": "308"
},
{
"name": "Scala",
"bytes": "13125"
},
{
"name": "Shell",
"bytes": "52916"
},
{
"name": "Smarty",
"bytes": "300"
},
{
"name": "Swift",
"bytes": "11436"
},
{
"name": "TypeScript",
"bytes": "4663"
},
{
"name": "Vim Script",
"bytes": "10545492"
},
{
"name": "Vim Snippet",
"bytes": "559139"
}
],
"symlink_target": ""
}
|
import yamlpal
from yamlpal.yaml_parser import YamlParser
from yamlpal import exceptions, dumper
import sys
import click
import re
import os
FORMATTING_EPILOG = """
\b
Format Strings:
Format strings determine how yamlpal will output values.
Available keys:
%{key} : Key of the match (or index if matching an item in a list)
%{value} : Value of the match
%{linenr} : Line number where the match occured
%{file} : Name of the file in which the match occured
%{literal} : Literal match in the file (original formatting)
\\n : New line
\\t : Tab
Default Format:
Yamlpal is smart about the format it uses by default
"%{key}: %{value}\\n" for atomic types (like string, int, float)
"%{value}\\n" for compound types like lists and maps
Examples:
$ yamlpal find "bill-to/address/city" --format "%{file} %{linenr}: %{value}"
/abs/path/to/examples/examples/sample1.yml 11: Royal Oak
$ yamlpal find "bill-to/address/city" --format "%{linenr} %{literal}"
11: city : Royal Oak
"""
@click.group(epilog="Run 'yamlpal <command> --help' for command specific help.")
@click.version_option(version=yamlpal.__version__)
def cli():
""" Modify and search yaml files while keeping the original formatting.
"""
def get_files(passed_files):
""" Determines which files are part of the list that will be manipulated by yamlpal by combining
the files that are passed as commandline arguments with the files that are passed via the stdin.
:param passed_files: list of files that is passed via cli flags (-f).
:return: list of files that will be manipulated by yamlpal
"""
all_files = []
all_files.extend(passed_files)
if not sys.stdin.isatty():
input_paths = sys.stdin.read().split("\n")
for input_path in input_paths:
if input_path.strip() != "":
all_files.append(os.path.abspath(input_path))
# TODO(jroovers): check if valid file paths
return all_files
def get_str_content(str_value):
""" Returns the string content of a passed yaml content value (for an insert/replace yamlpal operation).
If the passed str_value starts with an '@' then we attempt to treat the passed string as a filename and
read the contents of the file."""
if str_value.startswith("@"):
file_path = str_value[1:]
if not os.path.isfile(file_path):
click.echo("ERROR: Invalid file content path '%s'." % file_path, err=True)
exit(1)
with open(file_path, 'r') as f:
content = f.read()
# strip off newline at the end if it's there: insert/replace takes care of this
if content.endswith("\n"):
content = content[0:-1]
else:
# If we directly pass the string, strip whitespace and allow newline and tab chars
content = str_value.strip().replace("\\n", "\n").replace("\\t", "\t")
return content
@cli.command("insert")
@click.argument('yamlpath')
@click.argument('newcontent')
@click.option('-f', '--file', type=click.Path(exists=True, dir_okay=False, readable=True, resolve_path=True),
multiple=True, help="File to insert new content in. Can by specified multiple times to modify "
"multiple files. Files are not modified inline by default. "
"You can also provide (additional) file paths via stdin.")
@click.option('-i', '--inline', help="Edit file inline instead of dumping it to std out.", is_flag=True)
def insert(yamlpath, newcontent, file, inline):
""" Insert new content into a yaml file. """
newcontent = get_str_content(newcontent)
files = get_files(file)
for file in files:
insert_in_file(yamlpath, newcontent, file, inline)
@cli.command("find", epilog=FORMATTING_EPILOG)
@click.argument('yamlpath')
@click.option('-f', '--file', type=click.Path(exists=True, dir_okay=False, readable=True, resolve_path=True),
help="File to find content in.")
@click.option('-F', '--format', help="Format string in which matched content should be returned. "
"See the section 'Format Strings' below for details on format strings. "
"(default format depends on what is printed)",
default=dumper.AUTODETERMINE_FORMAT)
def find(yamlpath, file, format):
""" Find content in a yaml file. """
result = find_in_file(yamlpath, file, format)
click.echo(result, nl=False)
def find_in_file(needle, file, format):
# read yaml file
fp = open(file)
filecontents = fp.read()
fp.close()
# parse the file
data = YamlParser.load_yaml(filecontents)
try:
element = find_element(data, needle)
except exceptions.InvalidSearchStringException:
# TODO (jroovers): we should deduplicate this code. Best done by moving the core business logic
# (like find_element) out of this module into it's own module and then creating a wrapper function
# here that deals with exception handling
click.echo("ERROR: Invalid search string '%s' for file '%s'" % (needle, file), err=True)
exit(1)
return dumper.dump(file, filecontents, element, format)
@cli.command("check")
@click.argument('yamlpath')
@click.argument('check')
@click.option('-f', '--file', type=click.Path(exists=True, dir_okay=False, readable=True, resolve_path=True),
help="File to check content from.")
@click.pass_context
def check(ctx, yamlpath, check, file):
""" Check content of a yaml file """
result = find_in_file(yamlpath, file, "%{value}")
pattern = re.compile(check)
if not re.match(pattern, result):
msg = "Found value '{}' for yamlpath '{}' does not match expected value '{}'.".format(result, yamlpath, check)
click.echo(msg, err=True)
ctx.exit(1)
def insert_in_file(needle, newcontent, file, inline):
# read yaml file
fp = open(file)
filecontents = fp.read()
fp.close()
# parse yaml, find target line, inject new line
data = YamlParser.load_yaml(filecontents)
try:
element = find_element(data, needle)
except exceptions.InvalidSearchStringException:
click.echo("ERROR: Invalid search string '%s' for file '%s'" % (needle, file), err=True)
exit(1)
updated_filecontents = insert_line(element.line_end, newcontent, filecontents)
# write new content to file or stdout
if inline:
fp = open(file, "w")
fp.write(updated_filecontents)
fp.close()
else:
click.echo(updated_filecontents, nl=False)
def find_element(yaml_dict, search_str):
""" Given a dictionary representing a yaml document and a yaml path string, find the specified element in the
dictionary."""
# First split on / to determine which yaml dict we are searching in
dict_parts = search_str.split("/")
parsed_parts = []
for dict_part in dict_parts:
matches = re.match(r"(.*)(\[([0-9]+)\])", dict_part)
if matches:
list_name = matches.groups()[0]
list_index = int(matches.groups()[2])
parsed_parts.append(list_name)
parsed_parts.append(list_index)
else:
parsed_parts.append(dict_part)
# traverse the yaml path
node = yaml_dict
try:
for key in parsed_parts:
node = node[key]
except (KeyError, IndexError, TypeError):
raise exceptions.InvalidSearchStringException(search_str)
# Try accessing the line of the path we are currently on. If we can't access it,
# it means that the user has specified a path to a dict or list, without indicating an item within the
# dictionary or list.
try:
node.line
node.key = parsed_parts[-1] # add the last parsed key as the node's key
except AttributeError:
click.echo("ERROR: Path exists but not specific enough (%s)." % search_str, err=True)
exit(1)
return node
def insert_line(line_nr, new_content, filecontents):
lines = filecontents.split("\n")
# determine the size of indentation of the line we searched for so that we can use the same indentation
indentation_size = len(lines[line_nr]) - len(lines[line_nr].lstrip())
# copy indentation so we use the same whitespace characters (tab, space, mix of tab and space)
indentation_chars = lines[line_nr][0:indentation_size]
new_content = indentation_chars + new_content
lines.insert(line_nr + 1, new_content)
newfile = "\n".join(lines)
return newfile
if __name__ == "__main__":
cli()
|
{
"content_hash": "9ba449a3a25cdae8626cf3c7c71e8476",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 118,
"avg_line_length": 38.242290748898675,
"alnum_prop": 0.6432438659140652,
"repo_name": "jorisroovers/yamlpal",
"id": "ddaf9cf23176e9cad205b7a4e4fbcd10f80ee9dc",
"size": "8681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yamlpal/cli.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40898"
},
{
"name": "Shell",
"bytes": "2385"
}
],
"symlink_target": ""
}
|
import hashlib
from typing import List
def simple_split_tokenizer(value: str) -> List[str]:
"""Tokenize a string using a split on spaces."""
return value.split(" ")
def simple_hash(value: object, num_features: int) -> int:
"""Deterministically hash a value into the integer space."""
encoded_value = str(value).encode()
hashed_value = hashlib.sha1(encoded_value)
hashed_value_int = int(hashed_value.hexdigest(), 16)
return hashed_value_int % num_features
|
{
"content_hash": "b966260a16c5794e875820b77e9b7957",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 64,
"avg_line_length": 32.46666666666667,
"alnum_prop": 0.6940451745379876,
"repo_name": "ray-project/ray",
"id": "981f05ff1aedc35693e8ccd78a435f3e5cd5ead7",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/data/preprocessors/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
from hashlib import sha1
def grade(autogen, key):
secretkey = "my_key_here"
n = autogen.instance
flag = sha1((str(n) + secretkey).encode('utf-8')).hexdigest()
if flag.lower() in key.lower().strip():
return True, "Correct!"
else:
return False, "Try Again."
|
{
"content_hash": "715d9adbde639f97c267b407a59b8468",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 65,
"avg_line_length": 26.727272727272727,
"alnum_prop": 0.608843537414966,
"repo_name": "stuyCTF/stuyCTF-Platform",
"id": "d75ea1f8fbf440b3007762e6cfa615a52b4d8ae9",
"size": "294",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "example_problems/web/hidden-message/grader/grader.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7294"
},
{
"name": "CoffeeScript",
"bytes": "51286"
},
{
"name": "HTML",
"bytes": "57602"
},
{
"name": "Python",
"bytes": "184241"
},
{
"name": "Shell",
"bytes": "4218"
}
],
"symlink_target": ""
}
|
import re
def read_line_from_file(file_name):
with open(file_name) as f:
for line in f:
yield line.lower()
def replace_pattern(generator, regex, s):
for string in generator:
Id, Class, source, tweet = string.split(',', 3)
tweet = re.sub(regex, s, tweet)
yield ','.join((Id, Class, source, tweet))
def replace(generator, pattern, s):
for string in generator:
yield string.replace(pattern, s)
def remove_punctuation(generator, punc_list):
for string in generator:
for punct in punc_list:
string = string.replace(punct, "")
yield string
def remove_comma(generator):
#removes sentmentsource column too
for string in generator:
Id, Class, source, tweet = string.split(',', 3)
tweet = tweet.replace(',', '')
yield ','.join((Id, Class, tweet))
if __name__ == '__main__':
file_generator = read_line_from_file('sentiment_analysis_dataset.csv')
replace_urls = replace_pattern(file_generator, r"http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+", "")
replace_mentions = replace_pattern(replace_urls, r"\S*@(?:\[[^\]]+\]|\S+)", "")
remove_hash = replace(replace_mentions, "#", "")
remove_punct = remove_punctuation(remove_hash, ['!', '.', '?', ':', ';', '=', '(', ')', '[', ']', '`', '|', '-', '*', '"', '/', '\\', '_'])
generator = remove_comma(remove_punct)
with open('normalized_sentiment_analisys_dataset.csv', 'w') as f:
for line in generator:
f.write(line)
print "Done!"
|
{
"content_hash": "fbd0ea358c10248889a32f0235368d2b",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 143,
"avg_line_length": 36.7906976744186,
"alnum_prop": 0.5625790139064475,
"repo_name": "weslleymberg/Learning-Data-Mining",
"id": "867af25e90567584d36d6e7d33393bd935992bc8",
"size": "1582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sentiment_analysis/data_normalization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17226"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from example.users.models import User
admin.site.register(User, UserAdmin)
|
{
"content_hash": "3b444ae8e86872a97a3d5f4da6cfa9e5",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 47,
"avg_line_length": 26.333333333333332,
"alnum_prop": 0.8291139240506329,
"repo_name": "jezdez/django-authority",
"id": "3891187a14c2bc2f1aac1da4f97e5ccf4a9c618e",
"size": "158",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "example/users/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9459"
},
{
"name": "PLpgSQL",
"bytes": "1218"
},
{
"name": "Python",
"bytes": "91790"
}
],
"symlink_target": ""
}
|
'''Unit tests for Rake.py'''
__author__ = 'Vishwas B Sharma'
__author_email__ = 'sharma.vishwas88@gmail.com'
__version__ = '1.0.0'
import unittest
from collections import defaultdict
import nltk
from rake_nltk import Rake
###########################################################
class RakeUnitTest(unittest.TestCase):
def test_build_frequency_dist(self):
r = Rake()
phrase_list = [['red', 'apples'], ['good'], ['red'], ['flavour']]
freq = defaultdict(lambda: 0)
freq['apples'] = 1
freq['good'] = 1
freq['flavour'] = 1
freq['red'] = 2
r._build_frequency_dist(phrase_list)
self.assertEqual(r.get_word_frequency_distribution(), freq)
def test_build_word_co_occurance_graph(self):
r = Rake()
phrase_list = [['red', 'apples'], ['good'], ['red'], ['flavour']]
degree = defaultdict(lambda: 0)
degree['apples'] = 2
degree['good'] = 1
degree['flavour'] = 1
degree['red'] = 3
r._build_word_co_occurance_graph(phrase_list)
self.assertEqual(r.get_word_degrees(), degree)
def test_generate_phrases(self):
r = Rake()
sentences = [
'Red apples, are good in flavour.',
'Keywords, which we define as a sequence of one or more words, ' +
'provide a compact representation of a document\'s content'
]
phrase_list = {('red', 'apples'), ('good',), ('flavour',), ('keywords',),
('define',), ('sequence',), ('one',), ('words',),
('provide',), ('compact', 'representation'), ('document',),
('content',)}
self.assertEqual(r._generate_phrases(sentences), phrase_list)
def test_get_phrase_list_from_words(self):
r = Rake()
word_list = ['red', 'apples', ",", 'are', 'good', 'in', 'flavour']
phrase_list = [('red', 'apples'), ('good',), ('flavour',)]
self.assertEqual(r._get_phrase_list_from_words(word_list), phrase_list)
word_list = ['keywords', ",", 'which', 'we', 'define', 'as', 'a',
'sequence', 'of', 'one', 'or', 'more', 'words', ",",
'provide', 'a', 'compact', 'representation', 'of', 'a',
'document', '\'', 's', 'content']
phrase_list = [('keywords',), ('define',), ('sequence',), ('one',),
('words',), ('provide',), ('compact', 'representation'),
('document',), ('content',)]
self.assertEqual(r._get_phrase_list_from_words(word_list), phrase_list)
def test_extract_keywords_from_text(self):
r = Rake()
text = '''Compatibility of systems of linear constraints over the set of
natural numbers. Criteria of compatibility of a system of linear
Diophantine equations, strict inequations, and nonstrict inequations are
considered. Upper bounds for components of a minimal set of solutions
and algorithms of construction of minimal generating sets of solutions
for all types of systems are given. These criteria and the corresponding
algorithms for constructing a minimal supporting set of solutions can be
used in solving all the considered types of systems and systems of mixed
types.'''
r.extract_keywords_from_text(text)
ranked_phrases = [
'minimal generating sets', 'linear diophantine equations',
'minimal supporting set', 'minimal set', 'linear constraints',
'upper bounds', 'strict inequations', 'nonstrict inequations',
'natural numbers', 'mixed types', 'corresponding algorithms',
'considered types', 'set', 'types', 'considered', 'algorithms',
'used', 'systems', 'system', 'solving', 'solutions', 'given',
'criteria', 'construction', 'constructing', 'components',
'compatibility'
]
self.assertEqual(r.get_ranked_phrases(), ranked_phrases)
self.assertEqual([phrase for _, phrase in r.get_ranked_phrases_with_scores()], ranked_phrases)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "69d58244c0c5848ee472fb4c7ed84a34",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 102,
"avg_line_length": 40.98039215686274,
"alnum_prop": 0.5633971291866029,
"repo_name": "cgratie/rake-nltk",
"id": "69d965bf8ce8f7f9190c91845e7894a1fce87136",
"size": "4226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/rake_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13595"
}
],
"symlink_target": ""
}
|
"""A module to manipulate symbolic objects with indices including tensors
"""
from .indexed import IndexedBase, Idx, Indexed
from .index_methods import get_contraction_structure, get_indices
from .array import (MutableDenseNDimArray, ImmutableDenseNDimArray,
MutableSparseNDimArray, ImmutableSparseNDimArray, NDimArray, tensorproduct,
tensorcontraction, derive_by_array, permutedims, Array, DenseNDimArray,
SparseNDimArray,)
|
{
"content_hash": "dd4072f320413cdffe67b4f878ebfbd1",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 79,
"avg_line_length": 48.666666666666664,
"alnum_prop": 0.8127853881278538,
"repo_name": "madan96/sympy",
"id": "f0599ae5ffd5cbede94ba13cb5a49553200e12d5",
"size": "438",
"binary": false,
"copies": "26",
"ref": "refs/heads/master",
"path": "sympy/tensor/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "GCC Machine Description",
"bytes": "101"
},
{
"name": "Python",
"bytes": "15157811"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "3087"
},
{
"name": "XSLT",
"bytes": "366200"
}
],
"symlink_target": ""
}
|
import colander
import json
import logging
import os
import re
from cornice.service import Service
from pyramid.httpexceptions import HTTPBadRequest
from subprocess import CalledProcessError
from ..config import path
from ..subprocess_run import run
logger = logging.getLogger(__name__)
class JoinWifiSchema(colander.MappingSchema):
ssid = colander.SchemaNode(colander.String())
password = colander.SchemaNode(colander.String())
wifi_setup = Service(
name='wifi_setup',
path=path('setup/wifi'),
renderer='json',
accept='application/json')
@wifi_setup.get()
def scan_wifi_networks(request):
networks_path = request.registry.settings['wifi_networks_path']
if os.path.exists(networks_path):
with open(networks_path, "r") as networks_file:
return json.load(networks_file)
else:
return []
wifi_connection = Service(
name='wifi_connection',
path=path('setup/wifi/connection'),
renderer='json',
accept='application/json')
@wifi_connection.post(schema=JoinWifiSchema)
def join_network(request):
ssid = request.validated['ssid']
password = request.validated['password']
logger.debug("Trying to connect to network '%s'", ssid)
try:
run([
'sudo',
os.path.join(request.registry.settings['bin_path'], 'wifi_setup'),
'-c', request.registry.settings['config_ini_path'],
'join',
ssid,
password
], check=True)
logger.info("Joining network '%s' succeeded", ssid)
except CalledProcessError as e:
logger.error("Failed to join network '%s'", ssid)
raise HTTPBadRequest()
@wifi_connection.get()
def get_wifi_connection(request):
status = run([
'sudo',
os.path.join(request.registry.settings['bin_path'], 'wifi_setup'),
'-c', request.registry.settings['config_ini_path'],
'status'
]).stdout.decode('utf8')
ssid_match = re.search("^infra_ssid=(.+)$", status, flags=re.MULTILINE)
ssid = ssid_match.group(1) if ssid_match else None
if 'infra_status=connecting' in status:
status = 'connecting'
elif 'infra_status=connected' in status:
status = 'connected'
else:
status = 'unavailable'
return dict(
ssid=ssid,
status=status
)
wifi_adhoc = Service(
name='wifi_adhoc',
path=path('setup/wifi/adhoc'),
renderer='json',
accept='application/json')
@wifi_adhoc.get()
def get_wifi_adhoc(request):
return dict(
ssid=request.registry.settings['wifi_adhoc_ssid'],
status=os.path.exists(request.registry.settings['wifi_setup_flag_path']) and 'available' or 'unavailable')
|
{
"content_hash": "a877d0ee8bc073b0411878e01279ea8b",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 114,
"avg_line_length": 27.09,
"alnum_prop": 0.6500553709856035,
"repo_name": "grunskis/nuimo-hub-backend",
"id": "c278cc7495dea1c72e42489cb84eaf941fcada8e",
"size": "2709",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "senic_hub/backend/views/setup_wifi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5842"
},
{
"name": "HTML",
"bytes": "1137"
},
{
"name": "JavaScript",
"bytes": "14980"
},
{
"name": "Makefile",
"bytes": "2237"
},
{
"name": "Python",
"bytes": "108024"
},
{
"name": "Ruby",
"bytes": "1574"
},
{
"name": "Shell",
"bytes": "745"
},
{
"name": "Vim script",
"bytes": "7381"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2013 Fatih Karatana
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@package
@date 31/10/15
@author fatih
@version 1.0.0
"""
__author__ = 'fatih'
__date__ = '31/10/15'
__version__ = ''
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='basic-todo',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='1.0.0',
description='A sample Python project',
long_description=long_description,
# The project's main homepage.
url='https://github.com/pypa/sampleproject',
# Author details
author='Fatih Karatana',
author_email='fatih@karatana.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
# What does your project relate to?
keywords='sample flask restful web application development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
# py_modules=["my_module"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['Flask>=0.10'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={},
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[('tasks', ['db/tasks.json']),('users', ['db/users.json'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'basic-todo=app:main',
],
},
)
|
{
"content_hash": "e423d57ee6f731bb999d0c3787368cc8",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 94,
"avg_line_length": 36.971014492753625,
"alnum_prop": 0.6854174833398667,
"repo_name": "fatihzkaratana/basic-todo",
"id": "c776a70e4034db1b040c18f871b720d5213c1efc",
"size": "5118",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5069"
},
{
"name": "HTML",
"bytes": "6846"
},
{
"name": "JavaScript",
"bytes": "7389"
},
{
"name": "Python",
"bytes": "29126"
}
],
"symlink_target": ""
}
|
r"""OS routines for Mac, NT, or Posix depending on what system we're on.
This exports:
- all functions from posix, nt, os2, or ce, e.g. unlink, stat, etc.
- os.path is one of the modules posixpath, or ntpath
- os.name is 'posix', 'nt', 'os2', 'ce' or 'riscos'
- os.curdir is a string representing the current directory ('.' or ':')
- os.pardir is a string representing the parent directory ('..' or '::')
- os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
- os.extsep is the extension separator ('.' or '/')
- os.altsep is the alternate pathname separator (None or '/')
- os.pathsep is the component separator used in $PATH etc
- os.linesep is the line separator in text files ('\r' or '\n' or '\r\n')
- os.defpath is the default search path for executables
- os.devnull is the file path of the null device ('/dev/null', etc.)
Programs that import and use 'os' stand a better chance of being
portable between different platforms. Of course, they must then
only use functions that are defined by all platforms (e.g., unlink
and opendir), and leave all pathname manipulation to os.path
(e.g., split and join).
"""
#'
import sys, errno
_names = sys.builtin_module_names
# Note: more names are added to __all__ later.
__all__ = ["altsep", "curdir", "pardir", "sep", "extsep", "pathsep", "linesep",
"defpath", "name", "path", "devnull",
"SEEK_SET", "SEEK_CUR", "SEEK_END"]
def _get_exports_list(module):
try:
return list(module.__all__)
except AttributeError:
return [n for n in dir(module) if n[0] != '_']
name = 'java'
if 'posix' in _names:
_name = 'posix'
linesep = '\n'
from posix import *
try:
from posix import _exit
except ImportError:
pass
import posixpath as path
import posix
__all__.extend(_get_exports_list(posix))
del posix
elif 'nt' in _names:
_name = 'nt'
linesep = '\r\n'
from nt import *
try:
from nt import _exit
except ImportError:
pass
import ntpath as path
import nt
__all__.extend(_get_exports_list(nt))
del nt
elif 'os2' in _names:
_name = 'os2'
linesep = '\r\n'
from os2 import *
try:
from os2 import _exit
except ImportError:
pass
if sys.version.find('EMX GCC') == -1:
import ntpath as path
else:
import os2emxpath as path
from _emx_link import link
import os2
__all__.extend(_get_exports_list(os2))
del os2
elif 'ce' in _names:
_name = 'ce'
linesep = '\r\n'
from ce import *
try:
from ce import _exit
except ImportError:
pass
# We can use the standard Windows path.
import ntpath as path
import ce
__all__.extend(_get_exports_list(ce))
del ce
elif 'riscos' in _names:
_name = 'riscos'
linesep = '\n'
from riscos import *
try:
from riscos import _exit
except ImportError:
pass
import riscospath as path
import riscos
__all__.extend(_get_exports_list(riscos))
del riscos
elif 'ibmi' in _names:
_name = 'ibmi'
linesep = '\n'
from ibmi import *
try:
from ibmi import _exit
except ImportError:
pass
import posixpath as path
import ibmi
__all__.extend(_get_exports_list(ibmi))
del ibmi
else:
raise ImportError, 'no os specific module found'
sys.modules['os.path'] = path
from os.path import (curdir, pardir, sep, pathsep, defpath, extsep, altsep,
devnull)
del _names
# Python uses fixed values for the SEEK_ constants; they are mapped
# to native constants if necessary in posixmodule.c
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
#'
# Super directory utilities.
# (Inspired by Eric Raymond; the doc strings are mostly his)
def makedirs(name, mode=0777):
"""makedirs(path [, mode=0777])
Super-mkdir; create a leaf directory and all intermediate ones.
Works like mkdir, except that any intermediate path segment (not
just the rightmost) will be created if it does not exist. This is
recursive.
"""
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs(head, mode)
except OSError, e:
# be happy if someone already created the path
if e.errno != errno.EEXIST:
raise
if tail == curdir: # xxx/newdir/. exists if xxx/newdir exists
return
mkdir(name, mode)
def removedirs(name):
"""removedirs(path)
Super-rmdir; remove a leaf directory and all empty intermediate
ones. Works like rmdir except that, if the leaf directory is
successfully removed, directories corresponding to rightmost path
segments will be pruned away until either the whole path is
consumed or an error occurs. Errors during this latter phase are
ignored -- they generally mean that a directory was not empty.
"""
rmdir(name)
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
while head and tail:
try:
rmdir(head)
except error:
break
head, tail = path.split(head)
def renames(old, new):
"""renames(old, new)
Super-rename; create directories as necessary and delete any left
empty. Works like rename, except creation of any intermediate
directories needed to make the new pathname good is attempted
first. After the rename, directories corresponding to rightmost
path segments of the old name will be pruned way until either the
whole path is consumed or a nonempty directory is found.
Note: this function can fail with the new directory structure made
if you lack permissions needed to unlink the leaf directory or
file.
"""
head, tail = path.split(new)
if head and tail and not path.exists(head):
makedirs(head)
rename(old, new)
head, tail = path.split(old)
if head and tail:
try:
removedirs(head)
except error:
pass
__all__.extend(["makedirs", "removedirs", "renames"])
def walk(top, topdown=True, onerror=None, followlinks=False):
"""Directory tree generator.
For each directory in the directory tree rooted at top (including top
itself, but excluding '.' and '..'), yields a 3-tuple
dirpath, dirnames, filenames
dirpath is a string, the path to the directory. dirnames is a list of
the names of the subdirectories in dirpath (excluding '.' and '..').
filenames is a list of the names of the non-directory files in dirpath.
Note that the names in the lists are just names, with no path components.
To get a full path (which begins with top) to a file or directory in
dirpath, do os.path.join(dirpath, name).
If optional arg 'topdown' is true or not specified, the triple for a
directory is generated before the triples for any of its subdirectories
(directories are generated top down). If topdown is false, the triple
for a directory is generated after the triples for all of its
subdirectories (directories are generated bottom up).
When topdown is true, the caller can modify the dirnames list in-place
(e.g., via del or slice assignment), and walk will only recurse into the
subdirectories whose names remain in dirnames; this can be used to prune
the search, or to impose a specific order of visiting. Modifying
dirnames when topdown is false is ineffective, since the directories in
dirnames have already been generated by the time dirnames itself is
generated.
By default errors from the os.listdir() call are ignored. If
optional arg 'onerror' is specified, it should be a function; it
will be called with one argument, an os.error instance. It can
report the error to continue with the walk, or raise the exception
to abort the walk. Note that the filename is available as the
filename attribute of the exception object.
By default, os.walk does not follow symbolic links to subdirectories on
systems that support them. In order to get this functionality, set the
optional argument 'followlinks' to true.
Caution: if you pass a relative pathname for top, don't change the
current working directory between resumptions of walk. walk never
changes the current directory, and assumes that the client doesn't
either.
Example:
import os
from os.path import join, getsize
for root, dirs, files in os.walk('python/Lib/email'):
print root, "consumes",
print sum([getsize(join(root, name)) for name in files]),
print "bytes in", len(files), "non-directory files"
if 'CVS' in dirs:
dirs.remove('CVS') # don't visit CVS directories
"""
from os.path import join, isdir, islink
# We may not have read permission for top, in which case we can't
# get a list of the files the directory contains. os.path.walk
# always suppressed the exception then, rather than blow up for a
# minor reason when (say) a thousand readable directories are still
# left to visit. That logic is copied here.
try:
# Note that listdir and error are globals in this module due
# to earlier import-*.
names = listdir(top)
except error, err:
if onerror is not None:
onerror(err)
return
dirs, nondirs = [], []
for name in names:
if isdir(join(top, name)):
dirs.append(name)
else:
nondirs.append(name)
if topdown:
yield top, dirs, nondirs
for name in dirs:
path = join(top, name)
if followlinks or not islink(path):
for x in walk(path, topdown, onerror, followlinks):
yield x
if not topdown:
yield top, dirs, nondirs
__all__.append("walk")
# Make sure os.environ exists, at least
try:
environ
except NameError:
environ = {}
def _exists(name):
# CPython eval's the name, whereas looking in __all__ works for
# Jython and is much faster
return name in __all__
if _exists('execv'):
def execl(file, *args):
"""execl(file, *args)
Execute the executable file with argument list args, replacing the
current process. """
execv(file, args)
def execle(file, *args):
"""execle(file, *args, env)
Execute the executable file with argument list args and
environment env, replacing the current process. """
env = args[-1]
execve(file, args[:-1], env)
def execlp(file, *args):
"""execlp(file, *args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process. """
execvp(file, args)
def execlpe(file, *args):
"""execlpe(file, *args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env, replacing the current
process. """
env = args[-1]
execvpe(file, args[:-1], env)
def execvp(file, args):
"""execp(file, args)
Execute the executable file (which is searched for along $PATH)
with argument list args, replacing the current process.
args may be a list or tuple of strings. """
_execvpe(file, args)
def execvpe(file, args, env):
"""execvpe(file, args, env)
Execute the executable file (which is searched for along $PATH)
with argument list args and environment env , replacing the
current process.
args may be a list or tuple of strings. """
_execvpe(file, args, env)
__all__.extend(["execl","execle","execlp","execlpe","execvp","execvpe"])
def _execvpe(file, args, env=None):
if env is not None:
func = execve
argrest = (args, env)
else:
func = execv
argrest = (args,)
env = environ
head, tail = path.split(file)
if head:
func(file, *argrest)
return
if 'PATH' in env:
envpath = env['PATH']
else:
envpath = defpath
PATH = envpath.split(pathsep)
saved_exc = None
saved_tb = None
for dir in PATH:
fullname = path.join(dir, file)
try:
func(fullname, *argrest)
except error, e:
tb = sys.exc_info()[2]
if (e.errno != errno.ENOENT and e.errno != errno.ENOTDIR
and saved_exc is None):
saved_exc = e
saved_tb = tb
if saved_exc:
raise error, saved_exc, saved_tb
raise error, e, tb
# Change environ to automatically call putenv() if it exists
try:
# This will fail if there's no putenv
putenv
except NameError:
pass
else:
# Fake unsetenv() for Windows
# not sure about os2 here but
# I'm guessing they are the same.
if name in ('os2', 'nt'):
def unsetenv(key):
putenv(key, "")
if _name == "riscos":
# On RISC OS, all env access goes through getenv and putenv
from riscosenviron import _Environ
elif _name in ('os2', 'nt'): # Where Env Var Names Must Be UPPERCASE
import UserDict
# But we store them as upper case
class _Environ(UserDict.IterableUserDict):
def __init__(self, environ):
UserDict.UserDict.__init__(self)
data = self.data
for k, v in environ.items():
data[k.upper()] = v
def __setitem__(self, key, item):
self.data[key.upper()] = item
def __getitem__(self, key):
return self.data[key.upper()]
def __delitem__(self, key):
del self.data[key.upper()]
def has_key(self, key):
return key.upper() in self.data
def __contains__(self, key):
return key.upper() in self.data
def get(self, key, failobj=None):
return self.data.get(key.upper(), failobj)
def update(self, dict=None, **kwargs):
if dict:
try:
keys = dict.keys()
except AttributeError:
# List of (key, value)
for k, v in dict:
self[k] = v
else:
# got keys
# cannot use items(), since mappings
# may not have them.
for k in keys:
self[k] = dict[k]
if kwargs:
self.update(kwargs)
def copy(self):
return dict(self)
environ = _Environ(environ)
def getenv(key, default=None):
"""Get an environment variable, return None if it doesn't exist.
The optional second argument can specify an alternate default."""
return environ.get(key, default)
__all__.append("getenv")
# Supply spawn*() (probably only for Unix)
if _exists("fork") and not _exists("spawnv") and _exists("execv"):
P_WAIT = 0
P_NOWAIT = P_NOWAITO = 1
# XXX Should we support P_DETACH? I suppose it could fork()**2
# and close the std I/O streams. Also, P_OVERLAY is the same
# as execv*()?
def _spawnvef(mode, file, args, env, func):
# Internal helper; func is the exec*() function to use
pid = fork()
if not pid:
# Child
try:
if env is None:
func(file, args)
else:
func(file, args, env)
except:
_exit(127)
else:
# Parent
if mode == P_NOWAIT:
return pid # Caller is responsible for waiting!
while 1:
wpid, sts = waitpid(pid, 0)
if WIFSTOPPED(sts):
continue
elif WIFSIGNALED(sts):
return -WTERMSIG(sts)
elif WIFEXITED(sts):
return WEXITSTATUS(sts)
else:
raise error, "Not stopped, signaled or exited???"
def spawnv(mode, file, args):
"""spawnv(mode, file, args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execv)
def spawnve(mode, file, args, env):
"""spawnve(mode, file, args, env) -> integer
Execute file with arguments from args in a subprocess with the
specified environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execve)
# Note: spawnvp[e] is't currently supported on Windows
def spawnvp(mode, file, args):
"""spawnvp(mode, file, args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, None, execvp)
def spawnvpe(mode, file, args, env):
"""spawnvpe(mode, file, args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return _spawnvef(mode, file, args, env, execvpe)
if _exists("spawnv"):
# These aren't supplied by the basic Windows code
# but can be easily implemented in Python
def spawnl(mode, file, *args):
"""spawnl(mode, file, *args) -> integer
Execute file with arguments from args in a subprocess.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnv(mode, file, args)
def spawnle(mode, file, *args):
"""spawnle(mode, file, *args, env) -> integer
Execute file with arguments from args in a subprocess with the
supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnve(mode, file, args[:-1], env)
__all__.extend(["spawnv", "spawnve", "spawnl", "spawnle",])
if _exists("spawnvp"):
# At the moment, Windows doesn't implement spawnvp[e],
# so it won't have spawnlp[e] either.
def spawnlp(mode, file, *args):
"""spawnlp(mode, file, *args) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
return spawnvp(mode, file, args)
def spawnlpe(mode, file, *args):
"""spawnlpe(mode, file, *args, env) -> integer
Execute file (which is looked for along $PATH) with arguments from
args in a subprocess with the supplied environment.
If mode == P_NOWAIT return the pid of the process.
If mode == P_WAIT return the process's exit code if it exits normally;
otherwise return -SIG, where SIG is the signal that killed it. """
env = args[-1]
return spawnvpe(mode, file, args[:-1], env)
__all__.extend(["spawnvp", "spawnvpe", "spawnlp", "spawnlpe",])
# Supply popen2 etc. (for Unix)
if sys.platform.startswith('java') or _exists("fork"):
if not _exists("popen2"):
def popen2(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
close_fds=True)
return p.stdin, p.stdout
__all__.append("popen2")
if not _exists("popen3"):
def popen3(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout, child_stderr) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=PIPE, close_fds=True)
return p.stdin, p.stdout, p.stderr
__all__.append("popen3")
if not _exists("popen4"):
def popen4(cmd, mode="t", bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. On UNIX, 'cmd'
may be a sequence, in which case arguments will be passed directly to
the program without shell intervention (as with os.spawnv()). If 'cmd'
is a string it will be passed to the shell (as with os.system()). If
'bufsize' is specified, it sets the buffer size for the I/O pipes. The
file objects (child_stdin, child_stdout_stderr) are returned."""
import subprocess
PIPE = subprocess.PIPE
p = subprocess.Popen(cmd, shell=isinstance(cmd, basestring),
bufsize=bufsize, stdin=PIPE, stdout=PIPE,
stderr=subprocess.STDOUT, close_fds=True)
return p.stdin, p.stdout
__all__.append("popen4")
if not _exists("urandom"):
def urandom(n):
"""urandom(n) -> str
Return a string of n random bytes suitable for cryptographic use.
"""
try:
_urandomfd = open("/dev/urandom", O_RDONLY)
except (OSError, IOError):
raise NotImplementedError("/dev/urandom (or equivalent) not found")
bytes = ""
while len(bytes) < n:
bytes += read(_urandomfd, n - len(bytes))
close(_urandomfd)
return bytes
# Supply os.popen()
def popen(cmd, mode='r', bufsize=-1):
"""popen(command [, mode='r' [, bufsize]]) -> pipe
Open a pipe to/from a command returning a file object.
"""
if not isinstance(cmd, (str, unicode)):
raise TypeError('invalid cmd type (%s, expected string)' % type(cmd))
if mode not in ('r', 'w'):
raise ValueError("invalid mode %r" % mode)
import subprocess
if mode == 'r':
proc = subprocess.Popen(cmd, bufsize=bufsize, shell=True,
stdout=subprocess.PIPE)
fp = proc.stdout
elif mode == 'w':
proc = subprocess.Popen(cmd, bufsize=bufsize, shell=True,
stdin=subprocess.PIPE)
fp = proc.stdin
# files from subprocess are in binary mode but popen needs text mode
fp = fdopen(fp.fileno(), mode, bufsize)
return _wrap_close(fp, proc)
# Helper for popen() -- a proxy for a file whose close waits for the process
class _wrap_close(object):
def __init__(self, stream, proc):
self._stream = stream
self._proc = proc
def close(self):
self._stream.close()
returncode = self._proc.wait()
if returncode == 0:
return None
if _name == 'nt':
return returncode
else:
return returncode
def __getattr__(self, name):
return getattr(self._stream, name)
def __iter__(self):
return iter(self._stream)
|
{
"content_hash": "10ae1b2b6b67ceb0f404b943bb05cb21",
"timestamp": "",
"source": "github",
"line_count": 721,
"max_line_length": 83,
"avg_line_length": 34.83495145631068,
"alnum_prop": 0.6051919095397357,
"repo_name": "nvoron23/socialite",
"id": "f2561e88d798fa6d5ea133052c3b949e47819956",
"size": "25116",
"binary": false,
"copies": "19",
"ref": "refs/heads/master",
"path": "jython/Lib/os.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "GAP",
"bytes": "35416"
},
{
"name": "Groff",
"bytes": "21"
},
{
"name": "HTML",
"bytes": "111577"
},
{
"name": "Java",
"bytes": "2253475"
},
{
"name": "Python",
"bytes": "10833034"
},
{
"name": "R",
"bytes": "752"
},
{
"name": "Shell",
"bytes": "29299"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
}
|
import os
from lxml import etree
from oslo.config import cfg
from nova import exception
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import images
libvirt_opts = [
cfg.BoolOpt('libvirt_snapshot_compression',
default=False,
help='Compress snapshot images when possible. This '
'currently applies exclusively to qcow2 images'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
def execute(*args, **kwargs):
return utils.execute(*args, **kwargs)
def get_iscsi_initiator():
"""Get iscsi initiator name for this machine."""
# NOTE(vish) openiscsi stores initiator name in a file that
# needs root permission to read.
contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
for l in contents.split('\n'):
if l.startswith('InitiatorName='):
return l[l.index('=') + 1:].strip()
def get_fc_hbas():
"""Get the Fibre Channel HBA information."""
try:
out, err = execute('systool', '-c', 'fc_host', '-v',
run_as_root=True)
except exception.ProcessExecutionError as exc:
if exc.exit_code == 96:
LOG.warn(_("systool is not installed"))
return []
if out is None:
raise RuntimeError(_("Cannot find any Fibre Channel HBAs"))
lines = out.split('\n')
# ignore the first 2 lines
lines = lines[2:]
hbas = []
hba = {}
lastline = None
for line in lines:
line = line.strip()
# 2 newlines denotes a new hba port
if line == '' and lastline == '':
if len(hba) > 0:
hbas.append(hba)
hba = {}
else:
val = line.split('=')
if len(val) == 2:
key = val[0].strip().replace(" ", "")
value = val[1].strip()
hba[key] = value.replace('"', '')
lastline = line
return hbas
def get_fc_hbas_info():
"""Get Fibre Channel WWNs and device paths from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
hbas_info = []
for hba in hbas:
wwpn = hba['port_name'].replace('0x', '')
wwnn = hba['node_name'].replace('0x', '')
device_path = hba['ClassDevicepath']
device = hba['ClassDevice']
hbas_info.append({'port_name': wwpn,
'node_name': wwnn,
'host_device': device,
'device_path': device_path})
return hbas_info
def get_fc_wwpns():
"""Get Fibre Channel WWPNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwpns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwpn = hba['port_name'].replace('0x', '')
wwpns.append(wwpn)
return wwpns
def get_fc_wwnns():
"""Get Fibre Channel WWNNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwnns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwnn = hba['node_name'].replace('0x', '')
wwnns.append(wwnn)
return wwnns
def create_image(disk_format, path, size):
"""Create a disk image
:param disk_format: Disk image format (as known by qemu-img)
:param path: Desired location of the disk image
:param size: Desired size of disk image. May be given as an int or
a string. If given as an int, it will be interpreted
as bytes. If it's a string, it should consist of a number
with an optional suffix ('K' for Kibibytes,
M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
If no suffix is given, it will be interpreted as bytes.
"""
execute('qemu-img', 'create', '-f', disk_format, path, size)
def create_cow_image(backing_file, path, size=None):
"""Create COW image
Creates a COW image with the given backing file
:param backing_file: Existing image on which to base the COW image
:param path: Desired location of the COW image
"""
base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
cow_opts = []
if backing_file:
cow_opts += ['backing_file=%s' % backing_file]
base_details = images.qemu_img_info(backing_file)
else:
base_details = None
# This doesn't seem to get inherited so force it to...
# http://paste.ubuntu.com/1213295/
# TODO(harlowja) probably file a bug against qemu-img/qemu
if base_details and base_details.cluster_size is not None:
cow_opts += ['cluster_size=%s' % base_details.cluster_size]
# For now don't inherit this due the following discussion...
# See: http://www.gossamer-threads.com/lists/openstack/dev/10592
# if 'preallocation' in base_details:
# cow_opts += ['preallocation=%s' % base_details['preallocation']]
if base_details and base_details.encryption:
cow_opts += ['encryption=%s' % base_details.encryption]
if size is not None:
cow_opts += ['size=%s' % size]
if cow_opts:
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
cow_opts = ['-o', csv_opts]
cmd = base_cmd + cow_opts + [path]
execute(*cmd)
def create_lvm_image(vg, lv, size, sparse=False):
"""Create LVM image.
Creates a LVM image with given size.
:param vg: existing volume group which should hold this image
:param lv: name for this image (logical volume)
:size: size of image in bytes
:sparse: create sparse logical volume
"""
vg_info = get_volume_group_info(vg)
free_space = vg_info['free']
def check_size(vg, lv, size):
if size > free_space:
raise RuntimeError(_('Insufficient Space on Volume Group %(vg)s.'
' Only %(free_space)db available,'
' but %(size)db required'
' by volume %(lv)s.') % locals())
if sparse:
preallocated_space = 64 * 1024 * 1024
check_size(vg, lv, preallocated_space)
if free_space < size:
LOG.warning(_('Volume group %(vg)s will not be able'
' to hold sparse volume %(lv)s.'
' Virtual volume size is %(size)db,'
' but free space on volume group is'
' only %(free_space)db.') % locals())
cmd = ('lvcreate', '-L', '%db' % preallocated_space,
'--virtualsize', '%db' % size, '-n', lv, vg)
else:
check_size(vg, lv, size)
cmd = ('lvcreate', '-L', '%db' % size, '-n', lv, vg)
execute(*cmd, run_as_root=True, attempts=3)
def get_volume_group_info(vg):
"""Return free/used/total space info for a volume group in bytes
:param vg: volume group name
:returns: A dict containing:
:total: How big the filesystem is (in bytes)
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
"""
out, err = execute('vgs', '--noheadings', '--nosuffix',
'--separator', '|',
'--units', 'b', '-o', 'vg_size,vg_free', vg,
run_as_root=True)
info = out.split('|')
if len(info) != 2:
raise RuntimeError(_("vg %s must be LVM volume group") % vg)
return {'total': int(info[0]),
'free': int(info[1]),
'used': int(info[0]) - int(info[1])}
def list_logical_volumes(vg):
"""List logical volumes paths for given volume group.
:param vg: volume group name
"""
out, err = execute('lvs', '--noheadings', '-o', 'lv_name', vg,
run_as_root=True)
return [line.strip() for line in out.splitlines()]
def logical_volume_info(path):
"""Get logical volume info.
:param path: logical volume path
"""
out, err = execute('lvs', '-o', 'vg_all,lv_all',
'--separator', '|', path, run_as_root=True)
info = [line.split('|') for line in out.splitlines()]
if len(info) != 2:
raise RuntimeError(_("Path %s must be LVM logical volume") % path)
return dict(zip(*info))
def logical_volume_size(path):
"""Get logical volume size in bytes.
:param path: logical volume path
"""
# TODO(p-draigbrady) POssibly replace with the more general
# use of blockdev --getsize64 in future
out, _err = execute('lvs', '-o', 'lv_size', '--noheadings', '--units',
'b', '--nosuffix', path, run_as_root=True)
return int(out)
def clear_logical_volume(path):
"""Obfuscate the logical volume.
:param path: logical volume path
"""
# TODO(p-draigbrady): We currently overwrite with zeros
# but we may want to make this configurable in future
# for more or less security conscious setups.
vol_size = logical_volume_size(path)
bs = 1024 * 1024
direct_flags = ('oflag=direct',)
sync_flags = ()
remaining_bytes = vol_size
# The loop caters for versions of dd that
# don't support the iflag=count_bytes option.
while remaining_bytes:
zero_blocks = remaining_bytes / bs
seek_blocks = (vol_size - remaining_bytes) / bs
zero_cmd = ('dd', 'bs=%s' % bs,
'if=/dev/zero', 'of=%s' % path,
'seek=%s' % seek_blocks, 'count=%s' % zero_blocks)
zero_cmd += direct_flags
zero_cmd += sync_flags
if zero_blocks:
utils.execute(*zero_cmd, run_as_root=True)
remaining_bytes %= bs
bs /= 1024 # Limit to 3 iterations
# Use O_DIRECT with initial block size and fdatasync otherwise
direct_flags = ()
sync_flags = ('conv=fdatasync',)
def remove_logical_volumes(*paths):
"""Remove one or more logical volume."""
for path in paths:
clear_logical_volume(path)
if paths:
lvremove = ('lvremove', '-f') + paths
execute(*lvremove, attempts=3, run_as_root=True)
def pick_disk_driver_name(is_block_dev=False):
"""Pick the libvirt primary backend driver name
If the hypervisor supports multiple backend drivers, then the name
attribute selects the primary backend driver name, while the optional
type attribute provides the sub-type. For example, xen supports a name
of "tap", "tap2", "phy", or "file", with a type of "aio" or "qcow2",
while qemu only supports a name of "qemu", but multiple types including
"raw", "bochs", "qcow2", and "qed".
:param is_block_dev:
:returns: driver_name or None
"""
if CONF.libvirt_type == "xen":
if is_block_dev:
return "phy"
else:
return "tap"
elif CONF.libvirt_type in ('kvm', 'qemu'):
return "qemu"
else:
# UML doesn't want a driver_name set
return None
def get_disk_size(path):
"""Get the (virtual) size of a disk image
:param path: Path to the disk image
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
size = images.qemu_img_info(path).virtual_size
return int(size)
def get_disk_backing_file(path, basename=True):
"""Get the backing file of a disk image
:param path: Path to the disk image
:returns: a path to the image's backing store
"""
backing_file = images.qemu_img_info(path).backing_file
if backing_file and basename:
backing_file = os.path.basename(backing_file)
return backing_file
def copy_image(src, dest, host=None):
"""Copy a disk image to an existing directory
:param src: Source image
:param dest: Destination path
:param host: Remote host
"""
if not host:
# We shell out to cp because that will intelligently copy
# sparse files. I.E. holes will not be written to DEST,
# rather recreated efficiently. In addition, since
# coreutils 8.11, holes can be read efficiently too.
execute('cp', src, dest)
else:
dest = "%s:%s" % (host, dest)
# Try rsync first as that can compress and create sparse dest files.
# Note however that rsync currently doesn't read sparse files
# efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918
# At least network traffic is mitigated with compression.
try:
# Do a relatively light weight test first, so that we
# can fall back to scp, without having run out of space
# on the destination for example.
execute('rsync', '--sparse', '--compress', '--dry-run', src, dest)
except exception.ProcessExecutionError:
execute('scp', src, dest)
else:
execute('rsync', '--sparse', '--compress', src, dest)
def write_to_file(path, contents, umask=None):
"""Write the given contents to a file
:param path: Destination file
:param contents: Desired contents of the file
:param umask: Umask to set when creating this file (will be reset)
"""
if umask:
saved_umask = os.umask(umask)
try:
with open(path, 'w') as f:
f.write(contents)
finally:
if umask:
os.umask(saved_umask)
def chown(path, owner):
"""Change ownership of file or directory
:param path: File or directory whose ownership to change
:param owner: Desired new owner (given as uid or username)
"""
execute('chown', owner, path, run_as_root=True)
def create_snapshot(disk_path, snapshot_name):
"""Create a snapshot in a disk image
:param disk_path: Path to disk image
:param snapshot_name: Name of snapshot in disk image
"""
qemu_img_cmd = ('qemu-img', 'snapshot', '-c', snapshot_name, disk_path)
# NOTE(vish): libvirt changes ownership of images
execute(*qemu_img_cmd, run_as_root=True)
def delete_snapshot(disk_path, snapshot_name):
"""Create a snapshot in a disk image
:param disk_path: Path to disk image
:param snapshot_name: Name of snapshot in disk image
"""
qemu_img_cmd = ('qemu-img', 'snapshot', '-d', snapshot_name, disk_path)
# NOTE(vish): libvirt changes ownership of images
execute(*qemu_img_cmd, run_as_root=True)
def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt):
"""Extract a named snapshot from a disk image
:param disk_path: Path to disk image
:param snapshot_name: Name of snapshot in disk image
:param out_path: Desired path of extracted snapshot
"""
# NOTE(markmc): ISO is just raw to qemu-img
if dest_fmt == 'iso':
dest_fmt = 'raw'
qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt)
# Conditionally enable compression of snapshots.
if CONF.libvirt_snapshot_compression and dest_fmt == "qcow2":
qemu_img_cmd += ('-c',)
# When snapshot name is omitted we do a basic convert, which
# is used by live snapshots.
if snapshot_name is not None:
qemu_img_cmd += ('-s', snapshot_name)
qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
def load_file(path):
"""Read contents of file
:param path: File to read
"""
with open(path, 'r') as fp:
return fp.read()
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def file_delete(path):
"""Delete (unlink) file
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.unlink(path)
def find_disk(virt_dom):
"""Find root device path for instance
May be file or device"""
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
if CONF.libvirt_type == 'lxc':
source = domain.find('devices/filesystem/source')
disk_path = source.get('dir')
disk_path = disk_path[0:disk_path.rfind('rootfs')]
disk_path = os.path.join(disk_path, 'disk')
else:
source = domain.find('devices/disk/source')
disk_path = source.get('file') or source.get('dev')
if not disk_path:
raise RuntimeError(_("Can't retrieve root device path "
"from instance libvirt configuration"))
return disk_path
def get_disk_type(path):
"""Retrieve disk type (raw, qcow2, lvm) for given file."""
if path.startswith('/dev'):
return 'lvm'
return images.qemu_img_info(path).file_format
def get_fs_info(path):
"""Get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing:
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesystem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
free = hddinfo.f_frsize * hddinfo.f_bavail
used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree)
return {'total': total,
'free': free,
'used': used}
def fetch_image(context, target, image_id, user_id, project_id):
"""Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id)
def get_instance_path(instance, forceold=False):
"""Determine the correct path for instance storage.
This method determines the directory name for instance storage, while
handling the fact that we changed the naming style to something more
unique in the grizzly release.
:param instance: the instance we want a path for
:param forceold: force the use of the pre-grizzly format
:returns: a path to store information about that instance
"""
pre_grizzly_name = os.path.join(CONF.instances_path, instance['name'])
if forceold or os.path.exists(pre_grizzly_name):
return pre_grizzly_name
return os.path.join(CONF.instances_path, instance['uuid'])
|
{
"content_hash": "6a2a7f911c6f645111212d0ae498e5a6",
"timestamp": "",
"source": "github",
"line_count": 578,
"max_line_length": 79,
"avg_line_length": 32.38235294117647,
"alnum_prop": 0.5981193567345194,
"repo_name": "dstroppa/openstack-smartos-nova-grizzly",
"id": "fc73c2759c01e9fa330f1b34a0375d55f2f4a765",
"size": "19711",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/utils.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "8950641"
},
{
"name": "Shell",
"bytes": "17067"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from frappe.model.document import Document
class DocField(Document):
__doclink__ = "https://frappe.io/docs/models/core/docfield"
pass
|
{
"content_hash": "c78f9c902b140b9261f7b7acc06b886f",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 60,
"avg_line_length": 25.428571428571427,
"alnum_prop": 0.7584269662921348,
"repo_name": "gangadharkadam/v5_frappe",
"id": "9d7121073d569bbeec04d26fb104976ba8b89e0c",
"size": "282",
"binary": false,
"copies": "4",
"ref": "refs/heads/v5.0",
"path": "frappe/core/doctype/docfield/docfield.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "152220"
},
{
"name": "HTML",
"bytes": "111716"
},
{
"name": "JavaScript",
"bytes": "1227854"
},
{
"name": "Python",
"bytes": "969285"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
class TrelloBoardWorkflow(object):
def __init__(self, name, list_name_order, done_list_name_order):
self.name = name
self.list_name_order = list_name_order
self.done_list_names = done_list_name_order
# Attributes that will need external initialization
self.lists = None
self.done_lists = None
self.all_lists = None
def init_lists(self, lists, done_lists):
"""
Initializes all the lists of the configuration
:param lists:
:param done_lists:
:return:
"""
self.lists = lists
self.done_lists = done_lists
# We want to preserve the order of the lists
self.all_lists = []
all_lists_dict = {}
for list_ in lists + done_lists:
if list_.id not in all_lists_dict:
all_lists_dict[list_.id] = list_
self.all_lists.append(list_)
|
{
"content_hash": "b8e2e628d905f851f0088c6d82a08dfb",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 68,
"avg_line_length": 31.862068965517242,
"alnum_prop": 0.5703463203463204,
"repo_name": "diegojromerolopez/pystats-trello",
"id": "cf6e69e927caefcf291548e3c63d34c05523a713",
"size": "925",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stats/trelloboardworkflow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64032"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1, "../../../")
import h2o
######################################################
#
# Sample Running GBM on prostate.csv
def prostateGBM(ip,port):
# Connect to a pre-existing cluster
h2o.init(ip,port) # connect to localhost:54321
df = h2o.import_frame(path=h2o.locate("smalldata/logreg/prostate.csv"))
df.describe()
# Remove ID from training frame
train = df.drop("ID")
# For VOL & GLEASON, a zero really means "missing"
vol = train['VOL']
vol[vol == 0] = None
gle = train['GLEASON']
gle[gle == 0] = None
# Convert CAPSULE to a logical factor
train['CAPSULE'] = train['CAPSULE'].asfactor()
# See that the data is ready
train.describe()
# Run GBM
my_gbm = h2o.gbm( y=train["CAPSULE"],
validation_y=train["CAPSULE"],
x=train[1:],
validation_x=train[1:],
ntrees=50,
learn_rate=0.1,
distribution="bernoulli")
my_gbm.show()
my_gbm_metrics = my_gbm.model_performance(train)
my_gbm_metrics.show()
my_gbm_metrics #.show(criterion=my_gbm_metrics.theCriteria.PRECISION)
if __name__ == "__main__":
h2o.run_test(sys.argv, prostateGBM)
|
{
"content_hash": "b7c9ef74e1f081228a22f63133fec214",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 73,
"avg_line_length": 26.425531914893618,
"alnum_prop": 0.5660225442834138,
"repo_name": "ChristosChristofidis/h2o-3",
"id": "84c54141a783f9430cde49e0efd8a6cc8d5db5fa",
"size": "1242",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_algos/gbm/pyunit_prostateGBM.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "261942"
},
{
"name": "Emacs Lisp",
"bytes": "8914"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "105430"
},
{
"name": "Java",
"bytes": "5223547"
},
{
"name": "JavaScript",
"bytes": "88331"
},
{
"name": "Makefile",
"bytes": "31513"
},
{
"name": "Python",
"bytes": "1900641"
},
{
"name": "R",
"bytes": "1611030"
},
{
"name": "Rebol",
"bytes": "23302"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "44528"
}
],
"symlink_target": ""
}
|
"""
WSGI config for trialproj project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "trialproj.settings")
application = get_wsgi_application()
|
{
"content_hash": "8cde4ac6486e9a756252d4dad5e03f19",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.6875,
"alnum_prop": 0.7721518987341772,
"repo_name": "dictoss/websockettools",
"id": "54e696c7a0c9afeee8d5c2bf6bbe1ff9aec8c03f",
"size": "395",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wsgi_trial/trialproj/trialproj/wsgi.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "47179"
},
{
"name": "Shell",
"bytes": "2770"
}
],
"symlink_target": ""
}
|
#/****************************************************************************
#* THIS file should be DUPLICATED IN BOTH *
#* Engine\Platform\Win32\CreateRedist *
#* AND OpenNI\Platform\Win32\CreateRedist *
#* *
#****************************************************************************/
#/****************************************************************************
#* *
#* PrimeSense OpenNI & Sensor *
#* Copyright (C) 2010 PrimeSense Ltd. *
#* *
#* This file is part of PrimeSense Common. *
#* *
#* PrimeSense OpenNI & Sensor are free software: you can redistribute *
#* it and/or modify *
#* it under the terms of the GNU Lesser General Public License as published *
#* by the Free Software Foundation, either version 3 of the License, or *
#* (at your option) any later version. *
#* *
#* PrimeSense OpenNI & Sensor are distributed in the hope that they will *
#* be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Lesser General Public License for more details. *
#* *
#* You should have received a copy of the GNU Lesser General Public License *
#* along with PrimeSense OpenNI & Sensor. *
#* If not, see <http://www.gnu.org/licenses/>. *
#* *
#****************************************************************************/
#-------------Imports----------------------------------------------------------#
from xml.dom.minidom import parse, parseString
import win32con,pywintypes,win32api
from time import strftime
import logging
import glob
import os
import re
import sys
import subprocess
import shutil
import stat
import threading
#-------------Functions--------------------------------------------------------#
def is_64_bit_platform():
result = False
import platform
(bits,linkage) = platform.architecture()
matchObject = re.search('64',bits)
result = matchObject is not None
return result
def write_dependencides(sln_file, all_samples,sample):
sln_file.write("\tProjectSection(ProjectDependencies) = postProject\n")
for depend in sample.dependencies:
sln_file.write("\t\t" + all_samples[depend].project_guid + " = " + all_samples[depend].project_guid + "\n")
sln_file.write("\tEndProjectSection\n")
def remove_readonly(path):
for root, dirs, files in os.walk(path):
for fname in files:
full_path = os.path.join(root, fname)
os.chmod(full_path ,stat.S_IWRITE)
def regx_replace(findStr,repStr,filePath):
"replaces all findStr by repStr in file filePath using regualr expression"
findStrRegx = re.compile(findStr)
tempName=filePath+'~~~'
input = open(filePath)
output = open(tempName,'w')
for s in input:
output.write(findStrRegx.sub(repStr,s))
output.close()
input.close()
os.remove(filePath)
os.rename(tempName,filePath)
def get_reg_values(reg_key, value_list):
# open the reg key
try:
reg_key = win32api.RegOpenKeyEx(*reg_key)
except pywintypes.error as e:
raise Exception("Failed to open registry key!")
# Get the values
try:
values = [(win32api.RegQueryValueEx(reg_key, name), data_type) for name, data_type in value_list]
# values list of ((value, type), expected_type)
for (value, data_type), expected in values:
if data_type != expected:
raise Exception("Bad registry value type! Expected %d, got %d instead." % (expected, data_type))
# values okay, leave only values
values = [value for ((value, data_type), expected) in values]
except pywintypes.error as e:
raise Exception("Failed to get registry value!")
finally:
try:
win32api.RegCloseKey(reg_key)
except pywintypes.error as e:
# We don't care if reg key close failed...
pass
return tuple(values)
#-------------Classes----------------------------------------------------------#
class SampleData(object):
def __init__(self):
self.name = ''
self.project_dir = ''
self.source_dir = ''
self.project_guid = ''
self.project_file = ''
self.is_net = False
self.is_other = False
self.project_name = ''
self.redist_dir = ''
self.dependencies = []
def print_():
print((self.name))
def __str__(self):
return self.name
def __eval__(self):
return self.name
class RedistBase(object):
def __init__(self):
self.SCRIPT_DIR = ''
self.VC_version = 0
self.vc_build_bits = ""
self.output_dir = ""
self.final_dir = ""
self.config_xml_filename = ""
self.redist_name = ""
self.redist_internal_name = ''
self.product_name = ''
self.doxy_file_name = ""
self.write_2010_sample_dependency = False
self.all_samples = None
self.internal_conf_name = ''
self.TIMEOUT_UPGRADE_VS10_SEC = 180
self.project_is_2010 = False
def finish_script(self,exit_code):
os.chdir(self.SCRIPT_DIR)
#logging.shutdown()
exit(exit_code)
def check_args(self,args):
"""
Parse cmdline args; along them are doxy,bitness,VC.
"""
if len(sys.argv) not in [4,5]:
print ("Args: <Doxygen:y/n> <BuildTarget:32/64> <FullRebuild:y/n> [<VCVersion:9/10>]")
exit(1)
if sys.argv[1] == 'y' or sys.argv[1] == 'Yes':
self.Make_Doxy=1
elif sys.argv[1] == 'n' or sys.argv[1] == 'No':
self.Make_Doxy=0
else:
print("Args: <Doxygen:y/n> <BuildTarget:32/64> <FullRebuild:y/n>")
print("Doxygen param must be y or n!")
exit(1)
if sys.argv[2] == '32':
self.vc_build_bits = "32"
elif sys.argv[2] == '64':
self.vc_build_bits = "64"
else:
print("Args: <Doxygen:y/n> <BuildTarget:32/64> <FullRebuild:y/n>")
print("BuildTarget param must be 32 or 64!")
exit(1)
if sys.argv[3] == 'y' or sys.argv[3] == 'Yes':
self.vc_build_type = "/Rebuild"
elif sys.argv[3] == 'n' or sys.argv[3] == 'No':
self.vc_build_type = "/Build"
else:
print("Args: <Doxygen:y/n> <BuildTarget:32/64> <FullRebuild:y/n>")
print("FullRebuild param must be y or n!")
exit(1)
if self.project_is_2010:
self.VC_version = 10
if len(sys.argv) > 4:
if sys.argv[4] == '9':
print("Project does not support VS2008!")
exit(1)
else:
self.VC_version = 9
if len(sys.argv) > 4:
if sys.argv[4] == '10':
self.VC_version = 10
def init_vs_vars(self):
"""
Checks for the availablity of Visual Studio to compile with.
Currently supports VS2008 (vc9) and VS2010(vc10).
If 64-bit platform, registry key under Software/Wow6432Node/...
"""
self.VS_NEED_UPGRADE = 0
try:
if self.is_64_bit_platform:
MSVC_KEY = (win32con.HKEY_LOCAL_MACHINE, r"SOFTWARE\Wow6432Node\Microsoft\VisualStudio\9.0")
else:
MSVC_KEY = (win32con.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\VisualStudio\9.0")
MSVC_VALUES = [("InstallDir", win32con.REG_SZ)]
self.VS_INST_DIR = get_reg_values(MSVC_KEY, MSVC_VALUES)[0]
except Exception as e:
self.VC_version = 10
if self.VC_version == 10:
if not self.project_is_2010:
self.VS_NEED_UPGRADE = 1
if self.is_64_bit_platform:
MSVC_KEY = (win32con.HKEY_LOCAL_MACHINE, r"SOFTWARE\Wow6432Node\Microsoft\VisualStudio\10.0")
else:
MSVC_KEY = (win32con.HKEY_LOCAL_MACHINE, r"SOFTWARE\Microsoft\VisualStudio\10.0")
MSVC_VALUES = [("InstallDir", win32con.REG_SZ)]
self.VS_INST_DIR = get_reg_values(MSVC_KEY, MSVC_VALUES)[0]
def init_vars(self):
"""
Initializae misc variables.
Among them: is_64_bit_platform
"""
self.inst_proj_path = 'Platform\\Win32\\Install\\%s\\'%self.redist_internal_name
self.is_64_bit_platform = is_64_bit_platform()
self.DateTimeSTR = strftime("%Y-%m-%d %H:%M:%S")
DateSTR = strftime("%Y-%m-%d")
CONFIG_XML = parse(self.config_xml_filename)
self.WORK_DIR = str(CONFIG_XML.getElementsByTagName("WORK_DIR")[0].firstChild.data)
# Fix to allow reletive path
os.chdir(self.WORK_DIR)
self.WORK_DIR = os.getcwd() + "\\"
os.chdir(self.SCRIPT_DIR)
self.VER = str(CONFIG_XML.getElementsByTagName("VERSION_NUMBER")[0].firstChild.data)
PLATFORM = str(CONFIG_XML.getElementsByTagName("PLATFORM")[0].firstChild.data)
self.PROJECT_SLN = str(CONFIG_XML.getElementsByTagName("PROJECT_SLN")[0].firstChild.data)
self.SAMPLES_SLN = str(CONFIG_XML.getElementsByTagName("SAMPLES_SLN")[0].firstChild.data)
self.PROJECT_NAME = str(CONFIG_XML.getElementsByTagName("PROJECT")[0].firstChild.data)
self.BUILD_DIR = os.path.join(self.WORK_DIR, "Platform", "Win32", "Build")
ver_regx = re.compile("SDK \d.*\s")
global samples_proj_list
global samples_guid_list
global samples_guid_list_net
samples_proj_list = []
samples_guid_list = []
samples_guid_list_net = []
if self.vc_build_bits=="32":
self.bin_dir = "Bin"
self.lib_dir = "Lib"
self.vc_build_platform = "Win32"
self.output_dir = "Output32"
self.final_dir = "Final32"
else:
self.bin_dir = "Bin64"
self.lib_dir = "Lib64"
self.vc_build_platform = "x64"
self.output_dir = "Output64"
self.final_dir = "Final64"
def init_logger(self,name):
"""
Initilizes the logger.
"""
#-------------Log--------------------------------------------------------------#
out_path = os.path.join(self.SCRIPT_DIR,self.output_dir)
if not(os.path.exists(out_path)):
os.mkdir(out_path)
self.logger = logging.getLogger(name)
hdlr = logging.FileHandler(os.path.join(self.output_dir, '%s_redist_maker.log'%name))
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.INFO)
def print_message(self):
"""
TO DO: fix that function in Redist_OpenNi to resemble this (better) version.
"""
#------------Welcome Messege--------=------------------------------------------#
print("\n");
print("*********************************")
print(("* PrimeSense " + self.redist_name + " Redist *"))
print(("* " + self.DateTimeSTR + " *"))
print("*********************************")
def build_proj_solution(self):
#--------------Build Project---------------------------------------------------#
print(("* Building " + self.PROJECT_NAME + "..."))
path2output = os.path.join(self.SCRIPT_DIR,self.output_dir)
if not os.path.exists(path2output):
os.makedirs(path2output)
path2final = os.path.join(self.SCRIPT_DIR,self.final_dir)
if not os.path.exists(path2final):
os.makedirs(path2final)
self.logger.info(("Building " + self.PROJECT_NAME + "..."))
# Set Intel Env
os.system("set INTEL_LICENSE_FILE=C:\\Program Files\\Common Files\\Intel\\Licenses")
# Build project solution
os.chdir(self.WORK_DIR + self.PROJECT_SLN.rpartition("\\")[0])
print((os.getcwd()))
out_file = os.path.join(self.SCRIPT_DIR, self.output_dir, "Build"+self.PROJECT_NAME+".txt")
upg_out_file = os.path.join(self.SCRIPT_DIR, self.output_dir, "Build"+self.PROJECT_NAME+"UPG"+".txt")
if self.VS_NEED_UPGRADE == 1:
os.system("attrib -r * /s")
devenv_upg_cmd_str = "\""+self.VS_INST_DIR + "devenv\" " + self.PROJECT_SLN.rpartition("\\")[2]+\
" /upgrade > " + upg_out_file
print(('upgrading. out put in %s'%out_file))
print(('command is %s'%devenv_upg_cmd_str))
my_rc = os.system(devenv_upg_cmd_str)
print(('upgrading finished w result %d'%my_rc))
#devenv_upg_cmd = Command(devenv_upg_cmd_str)
#devenv_upg_cmd.run(self.TIMEOUT_UPGRADE_VS10_SEC)
devenv_cmd = '\"'+self.VS_INST_DIR + 'devenv\" ' + self.PROJECT_SLN.rpartition("\\")[2]+\
" " + self.vc_build_type + " \"release|" + self.vc_build_platform + "\" /out " + \
out_file
self.logger.debug('Calling vs : %s'%devenv_cmd)
rc = subprocess.call(devenv_cmd, close_fds=True)
self.logger.debug('Calling vs, RC: %d'%rc)
# Get the build output
lines = open(out_file).readlines()
build_result = lines[-2]
print(build_result)
self.logger.info(build_result)
# Check for failed build
failed_builds = 0
fail_to_open = 0
temp = re.search("(\d*) failed",build_result)
if temp != None :
failed_builds = int(temp.group(1))
temp2 = re.search('cannot be opened',str(lines))
if temp2 != None :
fail_to_open = 1
if failed_builds != 0 or fail_to_open !=0:
print("Building Failed!!")
self.logger.critical("Building Failed!")
self.finish_script(1)
# return to work dir
os.chdir(self.WORK_DIR)
def make_doxy(self):
"""
Make Doxy.
"""
if self.Make_Doxy==1:
print("* Creating Doxygen...")
self.logger.info("Creating Doxygen...")
os.chdir(os.path.join(self.WORK_DIR,"Source\\Doxygen"));
# Replacing version number in the doxygen setup file
res = os.system("attrib -r " + self.doxy_file_name)
print(('removing readonly attribute for Doxyfile: ' + str(res)))
regx_replace(self.redist_name + " \d*.\d*.\d*\s",self.PROJECT_NAME +" " + self.VER + " ",self.doxy_file_name)
if os.path.exists(self.WORK_DIR + "\\Source\\Doxygen\\html\\"):
os.system("rmdir /S /Q html")
# Running doxygen
os.system("mkdir html > null")
#os.system("copy PSSmallLogo.jpg html > null") // where is this file ?
doxy_out_file = os.path.join(self.SCRIPT_DIR,self.output_dir,self.PROJECT_NAME + "Doxy.txt")
doxygen_cmd = "doxygen.exe %s > " % (self.doxy_file_name) + doxy_out_file
os.system(doxygen_cmd)
self.copy_doxy_files()
os.chdir(self.WORK_DIR)
else:
print("Skipping Doxygen...")
def create_redist_dir(self):
"""
Creates directory structure for Redist.
"""
#-------------Create Redist Dir------------------------------------------------#
print("* Creating Redist Dir...")
self.logger.info("Creating Redist Dir...")
os.chdir(os.path.join(self.WORK_DIR,"Platform","Win32"))
# Removing the old directory
os.system("rmdir /S /Q Redist")
# Creating new directory tree
os.system("mkdir Redist")
os.system("mkdir Redist\\" + self.bin_dir)
os.system("mkdir Redist\\" + self.lib_dir)
os.system("mkdir Redist\\Include")
os.system("mkdir Redist\\Documentation")
os.system("mkdir Redist\\Samples")
os.system("mkdir Redist\\Samples\\" + self.bin_dir)
os.system("mkdir Redist\\Samples\\" + self.bin_dir + "\\Debug")
os.system("mkdir Redist\\Samples\\" + self.bin_dir + "\\Release")
os.system("mkdir Redist\\Samples\\Build")
os.system("mkdir Redist\\Samples\\Res")
os.system("mkdir Redist\\Data")
os.chdir(self.WORK_DIR)
def find_samples(self):
# returns a dictionary of all samples
all_samples = dict()
os.chdir(self.WORK_DIR)
samples_list = os.listdir(os.path.join(self.BUILD_DIR,"Samples"))
if '.svn' in samples_list:
samples_list.remove('.svn')
for sample in samples_list:
sample_data = SampleData()
sample_data.name = sample
sample_data.source_dir = os.path.join(self.WORK_DIR, "Samples", sample)
sample_data.project_dir = os.path.join(self.BUILD_DIR, "Samples", sample)
sample_data.is_other = False;
vc_proj_name = sample_data.project_dir + "\\" + sample + ".vcproj"
cs_proj_name = sample_data.project_dir + "\\" + sample + ".csproj"
vcx_proj_name = sample_data.project_dir + "\\" + sample + ".vcxproj"
other_proj_name = os.path.join(sample_data.project_dir, "Build.bat")
# check if this is a VC project
if os.path.exists(vc_proj_name):
sample_data.project_file = vc_proj_name
# open it
prj = open(vc_proj_name, 'r')
lines = prj.readlines()
for line in lines:
# Search for name
if sample_data.project_name == "":
ProjNametmp = re.search(r"Name=\"(.*)\"",line)
if (ProjNametmp != None):
sample_data.project_name = ProjNametmp.group(1)
# Search for GUID
if sample_data.project_guid == "":
ProjGUIDtmp = re.search(r"ProjectGUID=\"(.*)\"",line)
if (ProjGUIDtmp != None):
sample_data.project_guid = ProjGUIDtmp.group(1)
prj.close()
elif os.path.exists(vcx_proj_name):
# a VC project for VS 2010
sample_data.project_file = vcx_proj_name
sample_data.project_name = sample
# open it
prj = open(vcx_proj_name, 'r')
lines = prj.readlines()
for line in lines:
# Search for name
if sample_data.project_name == "":
ProjNametmp = re.search(r"Name=\"(.*)\"",line)
if (ProjNametmp != None):
sample_data.project_name = ProjNametmp.group(1)
# Search for GUID
if sample_data.project_guid == "":
ProjGUIDtmp = re.search(r"<ProjectGuid>(.*)</ProjectGuid>", line)
if (ProjGUIDtmp != None):
sample_data.project_guid = ProjGUIDtmp.group(1)
prj.close()
elif os.path.exists(cs_proj_name):
# a .NET project
sample_data.project_file = cs_proj_name
sample_data.is_net = True
# open it
prj = open(cs_proj_name, 'r')
lines = prj.readlines()
for line in lines:
# Search for name
if sample_data.project_name == "":
ProjNametmp = re.search(r"<AssemblyName>(.*)</AssemblyName>",line)
if (ProjNametmp != None):
sample_data.project_name = ProjNametmp.group(1)
# Search for GUID
if sample_data.project_guid == "":
ProjGUIDtmp = re.search(r"<ProjectGuid>(.*)</ProjectGuid>",line)
if (ProjGUIDtmp != None):
sample_data.project_guid = ProjGUIDtmp.group(1)
prj.close()
elif os.path.exists(other_proj_name):
# some other type of project (java?)
sample_data.project_file = other_proj_name
sample_data.is_other = True
sample_data.project_name = sample
else:
print(('Sample ' + sample + ' does not have a valid project file'))
self.finish_script(1)
# check if it has a special configuration
redist_file_name = sample_data.source_dir + "\\.redist"
if os.path.exists(redist_file_name):
redist_file = open(redist_file_name, 'r')
for line in redist_file.readlines():
# seach for dependencies
match = re.search("^DEPENDS=(.*)$", line)
if match != None:
sample_data.dependencies.append(match.group(1))
redist_file.close()
sample_data.redist_dir = "Redist\\Samples\\" + sample_data.name
all_samples[sample_data.name] = sample_data
return all_samples
def get_samples(self):
# returns a dictionary of all samples
if self.all_samples == None:
self.all_samples = self.find_samples()
return self.all_samples
def build_other_proj(self, build_dir):
# build other (not Visual Studio) project
ret = subprocess.call(os.path.join(build_dir, "Build.bat") + " " + self.vc_build_bits)
if ret != 0:
print(("Building project " + build_dir + " failed!"))
self.logger.critical("Building project " + build_dir + " failed!")
self.finish_script(1)
def build_other_samples(self):
"Builds other samples (java?)"
all_samples = self.get_samples()
for sample in list(all_samples.values()):
if not sample.is_other:
continue
build_dir = os.path.join(self.BUILD_DIR, "Samples", sample.name)
self.build_other_proj(build_dir)
def creating_samples(self):
"""
not exactly the same some further work needed.
"""
#-------Creating samples-------------------------------------------------------#
print("* Creating samples...")
self.logger.info("Creating samples...")
all_samples = self.get_samples()
os.chdir(os.path.join(self.WORK_DIR, "Platform", "Win32"))
# open all solution files
if not self.project_is_2010:
OUTFILESLN2008 = open("Redist\\Samples\\Build\\All_2008.sln",'w')
OUTFILESLN2008.write("Microsoft Visual Studio Solution File, Format Version 10.00\n")
OUTFILESLN2008.write("# Visual Studio 2008\n")
if not self.project_is_2010:
OUTFILESLN2010 = open("Redist\\Samples\\Build\\All_2010.sln",'w')
else:
OUTFILESLN2010 = open("Redist\\Samples\\Build\\All.sln",'w')
OUTFILESLN2010.write("Microsoft Visual Studio Solution File, Format Version 11.00\n")
OUTFILESLN2010.write("# Visual Studio 2010\n")
# copy java build script
try:
shutil.copy(os.path.join("Build", "BuildJava.py"), os.path.join("Redist", "Samples", "Build"))
except:
pass
# add projects
for sample in list(all_samples.values()):
# make dir
os.system ("mkdir " + sample.redist_dir)
# copy source
os.system ("xcopy /S " + sample.source_dir + " " + sample.redist_dir)
if sample.is_other:
shutil.copy(sample.project_file, sample.redist_dir)
else:
# copy the project file to 2008 and 2010:
prj_name_partitioned = os.path.splitext(sample.project_file);
prj2008_filename = sample.redist_dir + "\\" + sample.name + "_2008" + prj_name_partitioned[1]
if self.project_is_2010:
prj2010_filename = sample.redist_dir + "\\" + sample.name + prj_name_partitioned[1]
else:
prj2010_filename = sample.redist_dir + "\\" + sample.name + "_2010" + prj_name_partitioned[1]
if not self.project_is_2010:
shutil.copy(sample.project_file, prj2008_filename)
shutil.copy(sample.project_file, prj2010_filename)
# create reletive path to samples
prj2008_path = "..\\" + prj2008_filename.partition("\\")[2].partition("\\")[2]
prj2010_path = "..\\" + prj2010_filename.partition("\\")[2].partition("\\")[2]
# add project to solution
if not self.project_is_2010:
OUTFILESLN2008.write("Project(\"{19091980-2008-4CFA-1491-04CC20D8BCF9}\") = \""+\
sample.project_name + "\", \"" + prj2008_path + "\", \"" + sample.project_guid + "\"\n")
OUTFILESLN2010.write("Project(\"{19091980-2008-4CFA-1491-04CC20D8BCF9}\") = \""+\
sample.project_name + "\", \"" + prj2010_path + "\", \"" + sample.project_guid + "\"\n")
# write down dependencies
if len(sample.dependencies) > 0:
if not self.project_is_2010:
OUTFILESLN2008.write("\tProjectSection(ProjectDependencies) = postProject\n")
for depend in sample.dependencies:
OUTFILESLN2008.write("\t\t" + all_samples[depend].project_guid + " = " + all_samples[depend].project_guid + "\n")
OUTFILESLN2008.write("\tEndProjectSection\n")
if self.write_2010_sample_dependency == True:
write_dependencides(OUTFILESLN2010,all_samples,sample)
if not self.project_is_2010:
OUTFILESLN2008.write("EndProject\n")
OUTFILESLN2010.write("EndProject\n")
# Close files
if not self.project_is_2010:
OUTFILESLN2008.write("Global\n")
OUTFILESLN2008.write(" GlobalSection(SolutionConfigurationPlatforms) = preSolution\n")
OUTFILESLN2008.write(" Debug|Win32 = Debug|Win32\n")
OUTFILESLN2008.write(" Debug|x64 = Debug|x64\n")
OUTFILESLN2008.write(" Release|Win32 = Release|Win32\n")
OUTFILESLN2008.write(" Release|x64 = Release|x64\n")
OUTFILESLN2008.write(" EndGlobalSection\n")
OUTFILESLN2008.write(" GlobalSection(ProjectConfigurationPlatforms) = postSolution\n")
OUTFILESLN2010.write("Global\n")
OUTFILESLN2010.write(" GlobalSection(SolutionConfigurationPlatforms) = preSolution\n")
OUTFILESLN2010.write(" Debug|Win32 = Debug|Win32\n")
OUTFILESLN2010.write(" Debug|x64 = Debug|x64\n")
OUTFILESLN2010.write(" Release|Win32 = Release|Win32\n")
OUTFILESLN2010.write(" Release|x64 = Release|x64\n")
OUTFILESLN2010.write(" EndGlobalSection\n")
OUTFILESLN2010.write(" GlobalSection(ProjectConfigurationPlatforms) = postSolution\n")
for sample in list(all_samples.values()):
conf_32_name = "Win32"
if sample.is_net:
conf_32_name = "x86"
if not self.project_is_2010:
OUTFILESLN2008.write(" " + sample.project_guid + ".Debug|Win32.ActiveCfg = Debug|" + conf_32_name + "\n")
OUTFILESLN2008.write(" " + sample.project_guid + ".Debug|Win32.Build.0 = Debug|" + conf_32_name + "\n")
OUTFILESLN2008.write(" " + sample.project_guid + ".Debug|x64.ActiveCfg = Debug|x64\n")
OUTFILESLN2008.write(" " + sample.project_guid + ".Debug|x64.Build.0 = Debug|x64\n")
OUTFILESLN2008.write(" " + sample.project_guid + ".Release|Win32.ActiveCfg = Release|" + conf_32_name + "\n")
OUTFILESLN2008.write(" " + sample.project_guid + ".Release|Win32.Build.0 = Release|" + conf_32_name + "\n")
OUTFILESLN2008.write(" " + sample.project_guid + ".Release|x64.ActiveCfg = Release|x64\n")
OUTFILESLN2008.write(" " + sample.project_guid + ".Release|x64.Build.0 = Release|x64\n")
OUTFILESLN2010.write(" " + sample.project_guid + ".Debug|Win32.ActiveCfg = Debug|" + conf_32_name + "\n")
OUTFILESLN2010.write(" " + sample.project_guid + ".Debug|Win32.Build.0 = Debug|" + conf_32_name + "\n")
OUTFILESLN2010.write(" " + sample.project_guid + ".Debug|x64.ActiveCfg = Debug|x64\n")
OUTFILESLN2010.write(" " + sample.project_guid + ".Debug|x64.Build.0 = Debug|x64\n")
OUTFILESLN2010.write(" " + sample.project_guid + ".Release|Win32.ActiveCfg = Release|" + conf_32_name + "\n")
OUTFILESLN2010.write(" " + sample.project_guid + ".Release|Win32.Build.0 = Release|" + conf_32_name + "\n")
OUTFILESLN2010.write(" " + sample.project_guid + ".Release|x64.ActiveCfg = Release|x64\n")
OUTFILESLN2010.write(" " + sample.project_guid + ".Release|x64.Build.0 = Release|x64\n")
if not self.project_is_2010:
OUTFILESLN2008.write(" EndGlobalSection\n")
OUTFILESLN2008.write(" GlobalSection(SolutionProperties) = preSolution\n")
OUTFILESLN2008.write(" HideSolutionNode = FALSE\n")
OUTFILESLN2008.write(" EndGlobalSection\n")
OUTFILESLN2008.write("EndGlobal \n")
OUTFILESLN2010.write(" EndGlobalSection\n")
OUTFILESLN2010.write(" GlobalSection(SolutionProperties) = preSolution\n")
OUTFILESLN2010.write(" HideSolutionNode = FALSE\n")
OUTFILESLN2010.write(" EndGlobalSection\n")
OUTFILESLN2010.write("EndGlobal \n")
if not self.project_is_2010:
OUTFILESLN2008.close()
OUTFILESLN2010.close()
os.chdir(self.WORK_DIR)
def remove_read_only_attributes(self):
#-----Remove Read Only Attrib--------------------------------------------------#
print("* Removing Read Only Attributes...")
full_path = os.path.join(self.WORK_DIR,"Platform", "Win32" , "Redist")
self.logger.info("Removing Read Only Attributes... (%s)" % (full_path))
#os.system ("attrib -r -h -s /S Redist\\*.*")
remove_readonly(full_path)
def make_installer(self,msi_dest_path):
"""
[dev_success,redist_success]
"""
wix_var_file = '%sVariables.wxi'%self.redist_internal_name.replace('_','')
[dev_success,redist_success] = [False,False]
print("* Making Installer...")
self.logger.info("Making Installer...")
self.wix_inst_primitive_check()
os.chdir(os.path.join(self.WORK_DIR,self.inst_proj_path))
# Replace version in the WIX
self.wix_dev_var_set()
self.check_upgrade_install_sln()
print("calling WIX")
conf_name = self.internal_conf_name
dev_success = self.build_installer(conf_name)
print(("moving %s Msi"%conf_name))
src = ('.\\bin\Release\en-US\\%s.msi'%self.redist_internal_name)
dst = os.path.join( msi_dest_path ,self.final_dir,(self.product_name + '-Win' + self.vc_build_bits + "-" + self.VER + '-%s.msi'%conf_name))
os.system(("move %s %s"%(src,dst)))
self.dev_to_redist_hack()
self.wix_redist_var_set()
print("calling WIX")
conf_name = 'Redist'
redist_success = self.build_installer(conf_name)
print(("moving %s Msi"%conf_name))
os.system("move .\\bin\Release\en-US\\%s.msi %s"%(self.redist_internal_name,\
os.path.join( msi_dest_path ,self.final_dir,self.product_name + '-Win' + self.vc_build_bits + "-" + self.VER + '-%s.msi'%conf_name)))
os.chdir(self.WORK_DIR)
return [dev_success,redist_success]
def build_installer(self,conf_name):
success = False
wix_log = 'Build%sWIX%s'%(self.redist_internal_name,conf_name)
wix_log.replace('_','') #because outside code expects EENI in the name instead of EE_NI
out_file = os.path.join('..\\..\\CreateRedist',self.output_dir,"%s.txt"%wix_log)
wix_rc = subprocess.call("\"" + self.VS_INST_DIR \
+ "devenv\" %s.wixproj /Build \"release|%s"%(self.redist_internal_name, 'x86' if self.vc_build_bits=='32' else 'x64') \
+ "\" /out " + out_file, close_fds=True)
failed_builds = self.check_vs_report_failed(out_file)
if failed_builds > 0 or wix_rc != 0:
self.logger.info('Fail to build installer for %s version'%conf_name)
else:
success = True
return success
def wix_redist_var_set(self):
"""preconsdition: CWD is where wix-variables-file is stored"""
temp = self.redist_internal_name.replace('_','')
os.system("attrib -r Includes\\%sVariables.wxi"%temp)
#print("setting WIX BuildPlatform")
#regx_replace("BuildPlatform=(.*)", "BuildPlatform=" + str(vc_build_bits) + "?>", "Includes\\OpenNIVariables.wxi")
print("setting WIX BinaryOnlyRedist=True")
regx_replace("BinaryOnlyRedist=(.*)", "BinaryOnlyRedist=True?>", "Includes\\%sVariables.wxi"%temp)
def wix_dev_var_set(self):
"""preconsdition: CWD is where wix-variables-file is stored"""
print("setting WIX BinaryOnlyRedist=False")
temp = self.redist_internal_name.replace('_','')
os.system("attrib -r Includes\\%sVariables.wxi"%temp)
regx_replace("BinaryOnlyRedist=(.*)", "BinaryOnlyRedist=False?>", "Includes\\%sVariables.wxi"%temp)
def check_upgrade_install_sln(self):
"""preconsdition: CWD is where wix-variables-file is stored"""
up_wix_file = os.path.join('..\\..\\CreateRedist',self.output_dir,"Upgrade%sWIX.txt"%(self.redist_internal_name))
if self.VS_NEED_UPGRADE == 1:
subprocess.call("\"" + self.VS_INST_DIR + \
"devenv\" %s.sln /upgrade /out "%(self.redist_internal_name) + up_wix_file, close_fds=True)
def wix_inst_primitive_check(self):
wixPath = os.environ.get('WIX')
if wixPath == None:
print('*** no WIX env. var. defined ! use set WIX=C:\Program Files\Windows Installer XML v3.5\ or similar to set the path ***')
print('make installer is SERIOUSLY expected to fail')
self.logger.info('It seems that WIX is not installed and therefore teh installer cannot be built.')
else:
print(('WIX='+wixPath))
def dev_to_redist_hack(self):
pass
def check_vs_report_failed(self,file):
lines = open(file).readlines()
build_result = lines[-2]
failed_builds = 0
temp = re.search("(\d*) failed",build_result)
if temp != None :
failed_builds = int(temp.group(1))
return failed_builds
def fixing_files(self):
"""
fixing files
"""
#--------Fixing Files----------------------------------------------------------#
print("* Fixing Files...")
self.logger.info("Fixing Files...")
for dirpath, dirnames, filenames in os.walk(os.path.join(self.WORK_DIR, "Platform", "Win32", "Redist")):
self.fix_file('', dirpath, dirnames + filenames)
def build_samples(self):
"""
Build Samples.
"""
#-------------Build Samples---------------------------------------------------#
print("* Building Samples in release configuration......")
self.logger.info("Building Samples in release configuration...")
# Build project solution
os.chdir(self.WORK_DIR + self.SAMPLES_SLN.rpartition("\\")[0])
output_file = os.path.join(self.SCRIPT_DIR ,self.output_dir ,self.PROJECT_NAME + "SmpRelease.txt")
if self.VS_NEED_UPGRADE == 1:
os.system("\""+self.VS_INST_DIR + "devenv\" " +self.SAMPLES_SLN.rpartition("\\")[2]+\
" /upgrade > " + output_file)
subprocess.call("\""+self.VS_INST_DIR + "devenv\" " +self.SAMPLES_SLN.rpartition("\\")[2]+\
" " + self.vc_build_type + " \"release|" + self.vc_build_platform + "\" /out " + output_file)
# Get the build output
lines = open(output_file).readlines()
build_result = lines[-2]
print(build_result)
self.logger.info(build_result)
# Check for failed build
failed_builds = 0
temp = re.search("(\d*) failed",build_result)
if temp != None :
failed_builds = int(temp.group(1))
if failed_builds != 0:
print("Samples Building In Release Failed!!")
self.logger.critical("Samples Building Failed!")
self.finish_script(1)
print("* Building Samples in debug configuration......")
self.logger.info("Building Samples in debug configuration...")
# Build project solution
os.chdir(self.WORK_DIR +self.SAMPLES_SLN.rpartition("\\")[0])
output_file = os.path.join(self.SCRIPT_DIR ,self.output_dir ,self.PROJECT_NAME + "SmpDebug.txt")
if self.VS_NEED_UPGRADE == 1:
os.system("\""+self.VS_INST_DIR + "devenv\" " +self.SAMPLES_SLN.rpartition("\\")[2]+\
" /upgrade > " + output_file)
subprocess.call("\""+self.VS_INST_DIR + "devenv\" " +self.SAMPLES_SLN.rpartition("\\")[2]+\
" " + self.vc_build_type + " \"debug|" + self.vc_build_platform + "\" /out " + output_file)
# Get the build output
lines = open(output_file).readlines()
build_result = lines[-2]
print(build_result)
self.logger.info(build_result)
# Check for failed build
failed_builds = 0
tempReResult = re.search("(\d*) failed",build_result)
if tempReResult != None :
failed_builds = int(tempReResult.group(1))
if failed_builds != 0:
print("Samples Building In Debug Failed!!")
self.logger.critical("Samples Building Failed!")
self.finish_script(1)
# Build other samples
all_samples = self.get_samples()
for sample in list(all_samples.values()):
if sample.is_other:
self.build_other_proj(os.path.join(self.WORK_DIR, "Platform", "Win32", "Redist", "Samples", sample.name))
# --------------------Delete stuff
os.chdir(self.WORK_DIR + "\\Platform\\Win32\\Redist\\Samples\\" + self.bin_dir +"\\Release\\")
os.system("del *.pdb")
os.chdir(self.WORK_DIR + "\\Platform\\Win32\\Redist\\Samples\\" + self.bin_dir +"\\Debug\\")
os.system("del *.pdb")
os.system("del *.ilk")
os.chdir(self.WORK_DIR + "\\Platform\\Win32\\Redist\\" + self.lib_dir + "\\")
os.system("del nim*.*")
os.chdir(self.WORK_DIR)
def clean_up(self):
temp_str = "Redist "+ self.redist_name +" Ended."
print(temp_str)
self.logger.info(temp_str)
#self.finish_script(0)
class Command(object):
def __init__(self, cmd):
self.cmd = cmd
self.process = None
def run(self, timeout):
def target():
print('Thread started')
self.process = subprocess.Popen(self.cmd, shell=True)
self.process.communicate()
print('Thread finished')
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print('Terminating process')
self.process.terminate()
thread.join()
print((self.process.returncode))
|
{
"content_hash": "e59dfa40a87610b5fd1fc1bf23dc78b9",
"timestamp": "",
"source": "github",
"line_count": 905,
"max_line_length": 191,
"avg_line_length": 45.04088397790055,
"alnum_prop": 0.5344683774103332,
"repo_name": "Wessi/OpenNI",
"id": "6e50d86a930703be4d29bd679323da2b622cf7e6",
"size": "40762",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Externals/PSCommon/Windows/CreateRedist/redist_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "29969"
},
{
"name": "C",
"bytes": "2271424"
},
{
"name": "C#",
"bytes": "379010"
},
{
"name": "C++",
"bytes": "5459288"
},
{
"name": "CSS",
"bytes": "34073"
},
{
"name": "HTML",
"bytes": "18993107"
},
{
"name": "Java",
"bytes": "463576"
},
{
"name": "JavaScript",
"bytes": "2983"
},
{
"name": "Makefile",
"bytes": "1364527"
},
{
"name": "Python",
"bytes": "102996"
},
{
"name": "Shell",
"bytes": "304536"
}
],
"symlink_target": ""
}
|
from Controls import *
import struct
# These needn't go through this module, but are here for completeness
def SetControlData_Handle(control, part, selector, data):
control.SetControlData_Handle(part, selector, data)
def GetControlData_Handle(control, part, selector):
return control.GetControlData_Handle(part, selector)
_accessdict = {
kControlPopupButtonMenuHandleTag: (SetControlData_Handle, GetControlData_Handle),
}
_codingdict = {
kControlPushButtonDefaultTag : ("b", None, None),
kControlEditTextTextTag: (None, None, None),
kControlEditTextPasswordTag: (None, None, None),
kControlPopupButtonMenuIDTag: ("h", None, None),
kControlListBoxDoubleClickTag: ("b", None, None),
}
def SetControlData(control, part, selector, data):
if _accessdict.has_key(selector):
setfunc, getfunc = _accessdict[selector]
setfunc(control, part, selector, data)
return
if not _codingdict.has_key(selector):
raise KeyError, ('Unknown control selector', selector)
structfmt, coder, decoder = _codingdict[selector]
if coder:
data = coder(data)
if structfmt:
data = struct.pack(structfmt, data)
control.SetControlData(part, selector, data)
def GetControlData(control, part, selector):
if _accessdict.has_key(selector):
setfunc, getfunc = _accessdict[selector]
return getfunc(control, part, selector, data)
if not _codingdict.has_key(selector):
raise KeyError, ('Unknown control selector', selector)
structfmt, coder, decoder = _codingdict[selector]
data = control.GetControlData(part, selector)
if structfmt:
data = struct.unpack(structfmt, data)
if decoder:
data = decoder(data)
if type(data) == type(()) and len(data) == 1:
data = data[0]
return data
|
{
"content_hash": "0f75389a4373a3f2c284592d69f743da",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 85,
"avg_line_length": 33.763636363636365,
"alnum_prop": 0.6812062466343565,
"repo_name": "MalloyPower/parsing-python",
"id": "791544e78825ca276fa143017ec4e1407cabc4bd",
"size": "1902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-2.3/Lib/plat-mac/Carbon/ControlAccessor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
}
|
__author__ = 'Frederik Diehl'
import json
import os
import time
import uuid
import apsis.models.experiment as experiment
from apsis.assistants.experiment_assistant import ExperimentAssistant
from apsis.utilities.file_utils import ensure_directory_exists
from apsis.utilities.logging_utils import get_logger
# These are the colours supported by the plot.
COLORS = ["g", "r", "c", "b", "m", "y"]
class LabAssistant(object):
"""
This is used to control multiple experiments at once.
This is done by abstracting a dict of named experiment assistants.
Attributes
----------
_exp_assistants : dict of ExperimentAssistants.
The dictionary of experiment assistants this LabAssistant uses.
_write_dir : String, optional
The directory to write all the results and plots to.
_logger : logging.logger
The logger for this class.
"""
_exp_assistants = None
_write_dir = None
_global_start_date = None
_logger = None
def __init__(self, write_dir=None):
"""
Initializes the lab assistant.
Parameters
----------
write_dir: string, optional
Sets the write directory for the lab assistant. If None (default),
nothing will be written.
"""
self._logger = get_logger(self)
self._logger.info("Initializing lab assistant.")
self._logger.info("\tWriting results to %s" %write_dir)
self._write_dir = write_dir
self._exp_assistants = {}
reloading_possible = True
try:
if self._write_dir:
with open(self._write_dir + "/lab_assistant.json", "r"):
pass
else:
self._logger.debug("\tReloading impossible due to no "
"_write_dir specified.")
reloading_possible = False
except IOError:
self._logger.debug("\tReloading impossible due to IOError - "
"probably no lab_assistant existing.")
reloading_possible = False
if not reloading_possible:
self._global_start_date = time.time()
else:
# set the correct path.
with open(self._write_dir + "/lab_assistant.json", 'r') as infile:
lab_assistant_json = json.load(infile)
self._global_start_date = lab_assistant_json["global_start_date"]
for p in lab_assistant_json["exp_assistants"].values():
self._load_exp_assistant_from_path(p)
self._logger.debug("\tReloaded all exp_assistants.")
self._write_state_to_file()
self._logger.info("lab assistant successfully initialized.")
def init_experiment(self, name, optimizer, param_defs, exp_id=None,
notes=None, optimizer_arguments=None,
minimization=True):
"""
Initializes an experiment.
Parameters
----------
name : string
name of the experiment.
optimizer : string
String representation of the optimizer.
param_defs : dict of parameter definitions
Dictionary of parameter definition classes.
optimizer_arguments : dict, optional
A dictionary defining the operation of the optimizer. See the
respective documentation of the optimizers.
Default is None, which are default values.
exp_id : string or None, optional
The id of the experiment, which will be used to reference it.
Should be a proper uuid, and especially has to be unique. If it is
not, an error may be returned.
notes : jsonable object or None, optional
Any note that you'd like to put in the experiment. Could be used
to provide some details on the experiment, on the start time or the
user starting it.
minimization : bool, optional
Whether the problem is one of minimization. Defaults to True.
Returns
-------
exp_id : string
String representing the id of the experiment or "failed" if failed.
Raises
------
ValueError :
Iff there already is an experiment with the exp_id for this lab
assistant. Does not occur if no exp_id is given.
"""
self._logger.debug("Initializing new experiment. Parameters: "
"name: %s, optimizer: %s, param_defs: %s, "
"exp_id: %s, notes: %s, optimizer_arguments: %s, "
"minimization: %s" %(name, optimizer, param_defs,
exp_id, notes,
optimizer_arguments,
minimization))
if exp_id in self._exp_assistants.keys():
raise ValueError("Already an experiment with id %s registered."
%exp_id)
if exp_id is None:
while True:
exp_id = uuid.uuid4().hex
if exp_id not in self._exp_assistants.keys():
break
self._logger.debug("\tGenerated new exp_id: %s" %exp_id)
if not self._write_dir:
exp_assistant_write_directory = None
else:
exp_assistant_write_directory = os.path.join(self._write_dir +
"/" + exp_id)
ensure_directory_exists(exp_assistant_write_directory)
self._logger.debug("\tExp_ass directory: %s"
%exp_assistant_write_directory)
exp = experiment.Experiment(name,
param_defs,
exp_id,
notes,
minimization)
exp_ass = ExperimentAssistant(optimizer,
experiment=exp,
optimizer_arguments=optimizer_arguments,
write_dir=exp_assistant_write_directory)
self._exp_assistants[exp_id] = exp_ass
self._logger.info("Experiment initialized successfully with id %s."
%exp_id)
self._write_state_to_file()
return exp_id
def _load_exp_assistant_from_path(self, path):
"""
This loads a complete exp_assistant from path.
Specifically, it looks for exp_assistant.json in the path and restores
optimizer_class, optimizer_arguments and write_dir from this. It then
loads the experiment from the write_dir/experiment.json, then
initializes both.
Parameters
----------
path : string
The path from which to initialize. This must contain an
exp_assistant.json as specified.
"""
self._logger.debug("Loading Exp_assistant from path %s" %path)
with open(path + "/exp_assistant.json", 'r') as infile:
exp_assistant_json = json.load(infile)
optimizer_class = exp_assistant_json["optimizer_class"]
optimizer_arguments = exp_assistant_json["optimizer_arguments"]
exp_ass_write_dir = exp_assistant_json["write_dir"]
ensure_directory_exists(exp_ass_write_dir)
self._logger.debug("\tLoaded exp_parameters: "
"optimizer_class: %s, optimizer_arguments: %s,"
"write_dir: %s" %(optimizer_class,
optimizer_arguments,
exp_ass_write_dir))
exp = self._load_experiment(path)
self._logger.debug("\tLoaded Experiment. %s" %exp.to_dict())
exp_ass = ExperimentAssistant(optimizer_class=optimizer_class,
experiment=exp,
optimizer_arguments=optimizer_arguments,
write_dir=exp_ass_write_dir)
if exp_ass.exp_id in self._exp_assistants:
raise ValueError("Loaded exp_id is duplicated in experiment! id "
"is %s" %exp_ass.exp_id)
self._exp_assistants[exp_ass.exp_id] = exp_ass
self._logger.info("Successfully loaded experiment from %s." %path)
def _load_experiment(self, path):
"""
Loads an experiment from path.
Looks for experiment.json in path.
Parameters
----------
path : string
The path where experiment.json is located.
"""
self._logger.debug("Loading experiment.")
with open(path + "/experiment.json", 'r') as infile:
exp_json = json.load(infile)
exp = experiment.from_dict(exp_json)
self._logger.debug("\tLoaded experiment, %s" %exp.to_dict())
return exp
def _write_state_to_file(self):
"""
Writes the state of this lab assistant to a file.
Iff _write_dir is not None, it will collate global_start_date and a
dictionary of every experiment assistant, and dump this to
self._write_dir/lab_assistant.json.
"""
self._logger.debug("Writing lab_assistant state to file %s"
%self._write_dir)
if not self._write_dir:
return
state = {"global_start_date": self._global_start_date,
"exp_assistants": {x.exp_id: x.write_dir for x
in self._exp_assistants.values()}}
self._logger.debug("\tState is %s" %state)
with open(self._write_dir + '/lab_assistant.json', 'w') as outfile:
json.dump(state, outfile)
def get_candidates(self, experiment_id):
"""
Returns all candidates for a specific experiment.
Parameters
----------
experiment_id : string
The id of the experiment for which to return the candidates.
Returns
-------
result : dict
A dictionary of three lists with the keys finished, pending and
working, with the corresponding candidates.
"""
self._logger.debug("Returning candidates for exp %s" %experiment_id)
candidates = self._exp_assistants[experiment_id].get_candidates()
self._logger.debug("\tCandidates are %s" %candidates)
return candidates
def get_next_candidate(self, experiment_id):
"""
Returns the next candidates for a specific experiment.
Parameters
----------
experiment_id : string
The id of the experiment for which to return the next candidate.
Returns
-------
next_candidate : Candidate or None
The Candidate object that should be evaluated next. May be None,
which is equivalent to no candidate generated.
"""
self._logger.debug("Returning next candidate for id %s" %experiment_id)
next_cand = self._exp_assistants[experiment_id].get_next_candidate()
self._logger.debug("\tNext candidate is %s" %next_cand)
return next_cand
def get_best_candidate(self, experiment_id):
"""
Returns the best candidates for a specific experiment.
Parameters
----------
experiment_id : string
The id of the experiment for which to return the best candidate.
Returns
-------
best_candidate : Candidate or None
The Candidate object that has performed best. May be None,
which is equivalent to no candidate being evaluated.
"""
self._logger.debug("Returning best candidate for id %s" %experiment_id)
best_cand = self._exp_assistants[experiment_id].get_best_candidate()
self._logger.debug("\tBest candidate is %s" %best_cand)
return best_cand
def update(self, experiment_id, status, candidate):
"""
Updates the specicied experiment with the status of an experiment
evaluation.
Parameters
----------
experiment_id : string
The id of the experiment for which to return the best candidate.
candidate : Candidate
The Candidate object whose status is updated.
status : {"finished", "pausing", "working"}
A string defining the status change. Can be one of the following:
- finished: The Candidate is now finished.
- pausing: The evaluation of Candidate has been paused and can be
resumed by another worker.
- working: The Candidate is now being worked on by a worker.
"""
self._logger.debug("Updating exp_id %s with candidate %s with status"
"%s." %(experiment_id, candidate, status))
self._exp_assistants[experiment_id].update(status=status,
candidate=candidate)
def get_experiment_as_dict(self, exp_id):
"""
Returns the specified experiment as dictionary.
Parameters
----------
exp_id : string
The id of the experiment.
Returns
-------
exp_dict : dict
The experiment dictionary as defined by Experiment.to_dict().
"""
self._logger.debug("Returning experiment %s as dict." %exp_id)
exp_dict = self._exp_assistants[exp_id].get_experiment_as_dict()
self._logger.debug("\tDict is %s" %exp_dict)
return exp_dict
def get_plot_result_per_step(self, exp_id):
"""
Returns the figure for the result of each step.
Parameters
----------
exp_id : string
The id of the experiment.
Result
------
fig : matplotlib.figure
The figure containing the result of each step.
"""
self._logger.debug("Returning plot of results per step for %s."
%exp_id)
fig = self._exp_assistants[exp_id].plot_result_per_step()
self._logger.debug("Figure is %s" %fig)
return fig
def contains_id(self, exp_id):
"""
Tests whether this lab assistant has an experiment with id.
Parameters
----------
exp_id : string
The ID to be tested.
Returns
-------
contains : bool
True iff this lab assistant contains an experiment with this id.
"""
self._logger.debug("Testing whether this contains id %s" %exp_id)
if exp_id in self._exp_assistants:
self._logger.debug("exp_id %s is contained." %exp_id)
return True
self._logger.debug("exp_id %s is not contained." %exp_id)
return False
def get_ids(self):
"""
Returns all known ids for this lab assistant.
Returns
-------
exp_ids : list of strings
All ids this lab assitant knows.
"""
self._logger.debug("Requested all exp_ids.")
exp_ids = self._exp_assistants.keys()
self._logger.debug("All exp_ids: %s" %exp_ids)
return exp_ids
def set_exit(self):
"""
Exits this assistant.
Currently, all that is done is exiting all exp_assistants..
"""
self._logger.info("Shutting down lab assistant: Setting exit.")
for exp in self._exp_assistants.values():
exp.set_exit()
self._logger.info("Shut down all experiment assistants.")
|
{
"content_hash": "ea75489f57fd69245d6e231f326d819f",
"timestamp": "",
"source": "github",
"line_count": 416,
"max_line_length": 79,
"avg_line_length": 37.65144230769231,
"alnum_prop": 0.5570452659132988,
"repo_name": "FrederikDiehl/apsis",
"id": "516e7e36d7c77b31375a35b0339a931637bfe337",
"size": "15663",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "code/apsis/assistants/lab_assistant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3339"
},
{
"name": "Jupyter Notebook",
"bytes": "14185"
},
{
"name": "Python",
"bytes": "257847"
}
],
"symlink_target": ""
}
|
import scrapy
class AntspiItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
title = scrapy.Field()
url = scrapy.Field()
class ZhihuPeopleItem(scrapy.Item):
# define the fields for your item here like:
id = scrapy.Field()
name = scrapy.Field()
sign = scrapy.Field()
location = scrapy.Field()
business = scrapy.Field()
employment = scrapy.Field()
position = scrapy.Field()
education = scrapy.Field()
education_extra = scrapy.Field()
description = scrapy.Field()
agree = scrapy.Field()
thanks = scrapy.Field()
asks = scrapy.Field()
answers = scrapy.Field()
posts = scrapy.Field()
collections = scrapy.Field()
logs = scrapy.Field()
followees = scrapy.Field()
followers = scrapy.Field()
follow_topics = scrapy.Field()
|
{
"content_hash": "281bc84e3aa53a1a4efe678236824f3d",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 48,
"avg_line_length": 26.71875,
"alnum_prop": 0.6432748538011696,
"repo_name": "emanth/anth",
"id": "a173c9aa9fcf6e8c029a5142c5b399aef4d5957a",
"size": "1007",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dev/antspi/antspi/items.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "2464"
},
{
"name": "Python",
"bytes": "91673"
},
{
"name": "Shell",
"bytes": "543"
}
],
"symlink_target": ""
}
|
def square(x):
return x * x
# To test your procedure, uncomment the print
# statement below, by removing the hash mark (#)
# at the beginning of the line.
# Do not remove the # from in front of the line
# with the arrows (>>>). Lines which begin like
# this (#>>>) are included to show the results
# you should see when run your procedure.
print square(5)
#>>> 25
|
{
"content_hash": "2111574d8993f25bdc8ec646af0e95f5",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 49,
"avg_line_length": 25,
"alnum_prop": 0.68,
"repo_name": "tai271828/courses",
"id": "e0f199eeb5e8963838ed2210d69ee8e30244c824",
"size": "534",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cs/udacity/cs101-intro-cs/code/lesson2/square.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2692"
},
{
"name": "HTML",
"bytes": "9304"
},
{
"name": "JavaScript",
"bytes": "312081"
},
{
"name": "Python",
"bytes": "151460"
}
],
"symlink_target": ""
}
|
import unittest
import rdflib # needed for eval(repr(...)) below
from rdflib.term import Literal, URIRef
# these are actually meant for test_term.py, which is not yet merged into trunk
class TestMd5(unittest.TestCase):
def testMd5(self):
self.assertEqual(rdflib.URIRef("http://example.com/").md5_term_hash(),
"40f2c9c20cc0c7716fb576031cceafa4")
self.assertEqual(rdflib.Literal("foo").md5_term_hash(),
"da9954ca5f673f8ab9ebd6faf23d1046")
class TestLiteral(unittest.TestCase):
def setUp(self):
pass
def test_repr_apostrophe(self):
a = rdflib.Literal("'")
b = eval(repr(a))
self.assertEquals(a, b)
def test_repr_quote(self):
a = rdflib.Literal('"')
b = eval(repr(a))
self.assertEquals(a, b)
def test_backslash(self):
d = r"""
<rdf:RDF
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:foo="http://example.org/foo#">
<rdf:Description>
<foo:bar>a\b</foo:bar>
</rdf:Description>
</rdf:RDF>
"""
g = rdflib.Graph()
g.parse(data=d)
a = rdflib.Literal('a\\b')
b = list(g.objects())[0]
self.assertEquals(a, b)
def test_literal_from_bool(self):
l = rdflib.Literal(True)
XSD_NS = rdflib.Namespace(u'http://www.w3.org/2001/XMLSchema#')
self.assertEquals(l.datatype, XSD_NS["boolean"])
class TestNew(unittest.TestCase):
def testCantPassLangAndDatatype(self):
self.assertRaises(TypeError,
Literal, 'foo', lang='en', datatype=URIRef("http://example.com/"))
def testDatatypeGetsAutoURIRefConversion(self):
# drewp disapproves of this behavior, but it should be
# represented in the tests
x = Literal("foo", datatype="http://example.com/")
self.assert_(isinstance(x.datatype, URIRef))
x = Literal("foo", datatype=Literal("pennies"))
self.assertEqual(x.datatype, URIRef("pennies"))
class TestRepr(unittest.TestCase):
def testOmitsMissingDatatypeAndLang(self):
self.assertEqual(repr(Literal("foo")), "rdflib.term.Literal(u'foo')")
def testOmitsMissingDatatype(self):
self.assertEqual(repr(Literal("foo", lang='en')),
"rdflib.term.Literal(u'foo', lang='en')")
def testOmitsMissingLang(self):
self.assertEqual(
repr(Literal("foo", datatype=URIRef('http://example.com/'))),
"rdflib.term.Literal(u'foo', datatype=rdflib.term.URIRef('http://example.com/'))")
def testSubclassNameAppearsInRepr(self):
class MyLiteral(Literal):
pass
x = MyLiteral(u"foo")
self.assertEqual(repr(x), "MyLiteral(u'foo')")
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "d737db46c95fedfc15e5f00fafce0bb4",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 94,
"avg_line_length": 31.344444444444445,
"alnum_prop": 0.608649415101028,
"repo_name": "alcides/rdflib",
"id": "0145d2e82db1db7d8e0b48763663f23f28eee745",
"size": "2821",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_literal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "47529"
},
{
"name": "Python",
"bytes": "1477729"
}
],
"symlink_target": ""
}
|
""" In-Memory Disk File Interface for Swift Object Server"""
import time
import hashlib
from contextlib import contextmanager
from eventlet import Timeout
from six import moves
from swift.common.utils import Timestamp
from swift.common.exceptions import DiskFileQuarantined, DiskFileNotExist, \
DiskFileCollision, DiskFileDeleted, DiskFileNotOpen
from swift.common.swob import multi_range_iterator
class InMemoryFileSystem(object):
"""
A very simplistic in-memory file system scheme.
There is one dictionary mapping a given object name to a tuple. The first
entry in the tuble is the cStringIO buffer representing the file contents,
the second entry is the metadata dictionary.
"""
def __init__(self):
self._filesystem = {}
def get_object(self, name):
val = self._filesystem.get(name)
if val is None:
data, metadata = None, None
else:
data, metadata = val
return data, metadata
def put_object(self, name, data, metadata):
self._filesystem[name] = (data, metadata)
def del_object(self, name):
del self._filesystem[name]
def get_diskfile(self, account, container, obj, **kwargs):
return DiskFile(self, account, container, obj)
def pickle_async_update(self, *args, **kwargs):
"""
For now don't handle async updates.
"""
pass
class DiskFileWriter(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation.
Encapsulation of the write context for servicing PUT REST API
requests. Serves as the context manager object for DiskFile's create()
method.
:param fs: internal file system object to use
:param name: standard object name
:param fp: `StringIO` in-memory representation object
"""
def __init__(self, fs, name, fp):
self._filesystem = fs
self._name = name
self._fp = fp
self._upload_size = 0
def write(self, chunk):
"""
Write a chunk of data into the `StringIO` object.
:param chunk: the chunk of data to write as a string object
"""
self._fp.write(chunk)
self._upload_size += len(chunk)
return self._upload_size
def put(self, metadata):
"""
Make the final association in the in-memory file system for this name
with the `StringIO` object.
:param metadata: dictionary of metadata to be written
:param extension: extension to be used when making the file
"""
metadata['name'] = self._name
self._filesystem.put_object(self._name, self._fp, metadata)
def commit(self, timestamp):
"""
Perform any operations necessary to mark the object as durable. For
mem_diskfile type this is a no-op.
:param timestamp: object put timestamp, an instance of
:class:`~swift.common.utils.Timestamp`
"""
pass
class DiskFileReader(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation.
Encapsulation of the read context for servicing GET REST API
requests. Serves as the context manager object for DiskFile's reader()
method.
:param name: object name
:param fp: open file object pointer reference
:param obj_size: on-disk size of object in bytes
:param etag: MD5 hash of object from metadata
"""
def __init__(self, name, fp, obj_size, etag):
self._name = name
self._fp = fp
self._obj_size = obj_size
self._etag = etag
#
self._iter_etag = None
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
self._suppress_file_closing = False
#
self.was_quarantined = ''
def __iter__(self):
try:
self._bytes_read = 0
self._started_at_0 = False
self._read_to_eof = False
if self._fp.tell() == 0:
self._started_at_0 = True
self._iter_etag = hashlib.md5()
while True:
chunk = self._fp.read()
if chunk:
if self._iter_etag:
self._iter_etag.update(chunk)
self._bytes_read += len(chunk)
yield chunk
else:
self._read_to_eof = True
break
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_range(self, start, stop):
if start or start == 0:
self._fp.seek(start)
if stop is not None:
length = stop - start
else:
length = None
try:
for chunk in self:
if length is not None:
length -= len(chunk)
if length < 0:
# Chop off the extra:
yield chunk[:length]
break
yield chunk
finally:
if not self._suppress_file_closing:
self.close()
def app_iter_ranges(self, ranges, content_type, boundary, size):
if not ranges:
yield ''
else:
try:
self._suppress_file_closing = True
for chunk in multi_range_iterator(
ranges, content_type, boundary, size,
self.app_iter_range):
yield chunk
finally:
self._suppress_file_closing = False
try:
self.close()
except DiskFileQuarantined:
pass
def _quarantine(self, msg):
self.was_quarantined = msg
def _handle_close_quarantine(self):
if self._bytes_read != self._obj_size:
self._quarantine(
"Bytes read: %s, does not match metadata: %s" % (
self.bytes_read, self._obj_size))
elif self._iter_etag and \
self._etag != self._iter_etag.hexdigest():
self._quarantine(
"ETag %s and file's md5 %s do not match" % (
self._etag, self._iter_etag.hexdigest()))
def close(self):
"""
Close the file. Will handle quarantining file if necessary.
"""
if self._fp:
try:
if self._started_at_0 and self._read_to_eof:
self._handle_close_quarantine()
except (Exception, Timeout):
pass
finally:
self._fp = None
class DiskFile(object):
"""
.. note::
Sample alternative pluggable on-disk backend implementation. This
example duck-types the reference implementation DiskFile class.
Manage object files in-memory.
:param mgr: DiskFileManager
:param device_path: path to the target device or drive
:param threadpool: thread pool to use for blocking operations
:param partition: partition on the device in which the object lives
:param account: account name for the object
:param container: container name for the object
:param obj: object name for the object
:param keep_cache: caller's preference for keeping data read in the cache
"""
def __init__(self, fs, account, container, obj):
self._name = '/' + '/'.join((account, container, obj))
self._metadata = None
self._fp = None
self._filesystem = fs
def open(self):
"""
Open the file and read the metadata.
This method must populate the _metadata attribute.
:raises DiskFileCollision: on name mis-match with metadata
:raises DiskFileDeleted: if it does not exist, or a tombstone is
present
:raises DiskFileQuarantined: if while reading metadata of the file
some data did pass cross checks
"""
fp, self._metadata = self._filesystem.get_object(self._name)
if fp is None:
raise DiskFileDeleted()
self._fp = self._verify_data_file(fp)
self._metadata = self._metadata or {}
return self
def __enter__(self):
if self._metadata is None:
raise DiskFileNotOpen()
return self
def __exit__(self, t, v, tb):
if self._fp is not None:
self._fp = None
def _verify_data_file(self, fp):
"""
Verify the metadata's name value matches what we think the object is
named.
:raises DiskFileCollision: if the metadata stored name does not match
the referenced name of the file
:raises DiskFileNotExist: if the object has expired
:raises DiskFileQuarantined: if data inconsistencies were detected
between the metadata and the file-system
metadata
"""
try:
mname = self._metadata['name']
except KeyError:
raise self._quarantine(self._name, "missing name metadata")
else:
if mname != self._name:
raise DiskFileCollision('Client path does not match path '
'stored in object metadata')
try:
x_delete_at = int(self._metadata['X-Delete-At'])
except KeyError:
pass
except ValueError:
# Quarantine, the x-delete-at key is present but not an
# integer.
raise self._quarantine(
self._name, "bad metadata x-delete-at value %s" % (
self._metadata['X-Delete-At']))
else:
if x_delete_at <= time.time():
raise DiskFileNotExist('Expired')
try:
metadata_size = int(self._metadata['Content-Length'])
except KeyError:
raise self._quarantine(
self._name, "missing content-length in metadata")
except ValueError:
# Quarantine, the content-length key is present but not an
# integer.
raise self._quarantine(
self._name, "bad metadata content-length value %s" % (
self._metadata['Content-Length']))
try:
fp.seek(0, 2)
obj_size = fp.tell()
fp.seek(0, 0)
except OSError as err:
# Quarantine, we can't successfully stat the file.
raise self._quarantine(self._name, "not stat-able: %s" % err)
if obj_size != metadata_size:
raise self._quarantine(
self._name, "metadata content-length %s does"
" not match actual object size %s" % (
metadata_size, obj_size))
return fp
def get_metadata(self):
"""
Provide the metadata for an object as a dictionary.
:returns: object's metadata dictionary
"""
if self._metadata is None:
raise DiskFileNotOpen()
return self._metadata
def read_metadata(self):
"""
Return the metadata for an object.
:returns: metadata dictionary for an object
"""
with self.open():
return self.get_metadata()
def reader(self, keep_cache=False):
"""
Return a swift.common.swob.Response class compatible "app_iter"
object. The responsibility of closing the open file is passed to the
DiskFileReader object.
:param keep_cache:
"""
dr = DiskFileReader(self._name, self._fp,
int(self._metadata['Content-Length']),
self._metadata['ETag'])
# At this point the reader object is now responsible for
# the file pointer.
self._fp = None
return dr
@contextmanager
def create(self, size=None):
"""
Context manager to create a file. We create a temporary file first, and
then return a DiskFileWriter object to encapsulate the state.
:param size: optional initial size of file to explicitly allocate on
disk
:raises DiskFileNoSpace: if a size is specified and allocation fails
"""
fp = moves.cStringIO()
try:
yield DiskFileWriter(self._filesystem, self._name, fp)
finally:
del fp
def write_metadata(self, metadata):
"""
Write a block of metadata to an object.
"""
cur_fp = self._filesystem.get(self._name)
if cur_fp is not None:
self._filesystem[self._name] = (cur_fp, metadata)
def delete(self, timestamp):
"""
Perform a delete for the given object in the given container under the
given account.
This creates a tombstone file with the given timestamp, and removes
any older versions of the object file. Any file that has an older
timestamp than timestamp will be deleted.
:param timestamp: timestamp to compare with each file
"""
fp, md = self._filesystem.get_object(self._name)
if md and md['X-Timestamp'] < Timestamp(timestamp):
self._filesystem.del_object(self._name)
@property
def timestamp(self):
if self._metadata is None:
raise DiskFileNotOpen()
return Timestamp(self._metadata.get('X-Timestamp'))
data_timestamp = timestamp
|
{
"content_hash": "a893b6619844e77299509270536a0fe1",
"timestamp": "",
"source": "github",
"line_count": 408,
"max_line_length": 79,
"avg_line_length": 33.325980392156865,
"alnum_prop": 0.5596822828565124,
"repo_name": "daasbank/swift",
"id": "277a9f1faf7469e741805c0c0627427050731594",
"size": "14187",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "swift/obj/mem_diskfile.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6728717"
},
{
"name": "Shell",
"bytes": "1452"
}
],
"symlink_target": ""
}
|
"""ECC secp256k1 OpenSSL wrapper.
WARNING: This module does not mlock() secrets; your private keys may end up on
disk in swap! Use with caution!
This file is modified from python-sarielsazlib.
"""
import ctypes
import ctypes.util
import hashlib
import sys
ssl = ctypes.cdll.LoadLibrary(ctypes.util.find_library ('ssl') or 'libeay32')
ssl.BN_new.restype = ctypes.c_void_p
ssl.BN_new.argtypes = []
ssl.BN_bin2bn.restype = ctypes.c_void_p
ssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
ssl.BN_CTX_free.restype = None
ssl.BN_CTX_free.argtypes = [ctypes.c_void_p]
ssl.BN_CTX_new.restype = ctypes.c_void_p
ssl.BN_CTX_new.argtypes = []
ssl.ECDH_compute_key.restype = ctypes.c_int
ssl.ECDH_compute_key.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_sign.restype = ctypes.c_int
ssl.ECDSA_sign.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
ssl.ECDSA_verify.restype = ctypes.c_int
ssl.ECDSA_verify.argtypes = [ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p]
ssl.EC_KEY_free.restype = None
ssl.EC_KEY_free.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.argtypes = [ctypes.c_int]
ssl.EC_KEY_get0_group.restype = ctypes.c_void_p
ssl.EC_KEY_get0_group.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_get0_public_key.restype = ctypes.c_void_p
ssl.EC_KEY_get0_public_key.argtypes = [ctypes.c_void_p]
ssl.EC_KEY_set_private_key.restype = ctypes.c_int
ssl.EC_KEY_set_private_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_KEY_set_conv_form.restype = None
ssl.EC_KEY_set_conv_form.argtypes = [ctypes.c_void_p, ctypes.c_int]
ssl.EC_KEY_set_public_key.restype = ctypes.c_int
ssl.EC_KEY_set_public_key.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.i2o_ECPublicKey.restype = ctypes.c_void_p
ssl.i2o_ECPublicKey.argtypes = [ctypes.c_void_p, ctypes.c_void_p]
ssl.EC_POINT_new.restype = ctypes.c_void_p
ssl.EC_POINT_new.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_free.restype = None
ssl.EC_POINT_free.argtypes = [ctypes.c_void_p]
ssl.EC_POINT_mul.restype = ctypes.c_int
ssl.EC_POINT_mul.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p]
# this specifies the curve used with ECDSA.
NID_secp256k1 = 714 # from openssl/obj_mac.h
SECP256K1_ORDER = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141
SECP256K1_ORDER_HALF = SECP256K1_ORDER // 2
# Thx to Sam Devlin for the ctypes magic 64-bit fix.
def _check_result(val, func, args):
if val == 0:
raise ValueError
else:
return ctypes.c_void_p (val)
ssl.EC_KEY_new_by_curve_name.restype = ctypes.c_void_p
ssl.EC_KEY_new_by_curve_name.errcheck = _check_result
class CECKey():
"""Wrapper around OpenSSL's EC_KEY"""
POINT_CONVERSION_COMPRESSED = 2
POINT_CONVERSION_UNCOMPRESSED = 4
def __init__(self):
self.k = ssl.EC_KEY_new_by_curve_name(NID_secp256k1)
def __del__(self):
if ssl:
ssl.EC_KEY_free(self.k)
self.k = None
def set_secretbytes(self, secret):
priv_key = ssl.BN_bin2bn(secret, 32, ssl.BN_new())
group = ssl.EC_KEY_get0_group(self.k)
pub_key = ssl.EC_POINT_new(group)
ctx = ssl.BN_CTX_new()
if not ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx):
raise ValueError("Could not derive public key from the supplied secret.")
ssl.EC_POINT_mul(group, pub_key, priv_key, None, None, ctx)
ssl.EC_KEY_set_private_key(self.k, priv_key)
ssl.EC_KEY_set_public_key(self.k, pub_key)
ssl.EC_POINT_free(pub_key)
ssl.BN_CTX_free(ctx)
return self.k
def set_privkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.d2i_ECPrivateKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def set_pubkey(self, key):
self.mb = ctypes.create_string_buffer(key)
return ssl.o2i_ECPublicKey(ctypes.byref(self.k), ctypes.byref(ctypes.pointer(self.mb)), len(key))
def get_privkey(self):
size = ssl.i2d_ECPrivateKey(self.k, 0)
mb_pri = ctypes.create_string_buffer(size)
ssl.i2d_ECPrivateKey(self.k, ctypes.byref(ctypes.pointer(mb_pri)))
return mb_pri.raw
def get_pubkey(self):
size = ssl.i2o_ECPublicKey(self.k, 0)
mb = ctypes.create_string_buffer(size)
ssl.i2o_ECPublicKey(self.k, ctypes.byref(ctypes.pointer(mb)))
return mb.raw
def get_raw_ecdh_key(self, other_pubkey):
ecdh_keybuffer = ctypes.create_string_buffer(32)
r = ssl.ECDH_compute_key(ctypes.pointer(ecdh_keybuffer), 32,
ssl.EC_KEY_get0_public_key(other_pubkey.k),
self.k, 0)
if r != 32:
raise Exception('CKey.get_ecdh_key(): ECDH_compute_key() failed')
return ecdh_keybuffer.raw
def get_ecdh_key(self, other_pubkey, kdf=lambda k: hashlib.sha256(k).digest()):
# FIXME: be warned it's not clear what the kdf should be as a default
r = self.get_raw_ecdh_key(other_pubkey)
return kdf(r)
def sign(self, hash, low_s = True):
# FIXME: need unit tests for below cases
if not isinstance(hash, bytes):
raise TypeError('Hash must be bytes instance; got %r' % hash.__class__)
if len(hash) != 32:
raise ValueError('Hash must be exactly 32 bytes long')
sig_size0 = ctypes.c_uint32()
sig_size0.value = ssl.ECDSA_size(self.k)
mb_sig = ctypes.create_string_buffer(sig_size0.value)
result = ssl.ECDSA_sign(0, hash, len(hash), mb_sig, ctypes.byref(sig_size0), self.k)
assert 1 == result
assert mb_sig.raw[0] == 0x30
assert mb_sig.raw[1] == sig_size0.value - 2
total_size = mb_sig.raw[1]
assert mb_sig.raw[2] == 2
r_size = mb_sig.raw[3]
assert mb_sig.raw[4 + r_size] == 2
s_size = mb_sig.raw[5 + r_size]
s_value = int.from_bytes(mb_sig.raw[6+r_size:6+r_size+s_size], byteorder='big')
if (not low_s) or s_value <= SECP256K1_ORDER_HALF:
return mb_sig.raw[:sig_size0.value]
else:
low_s_value = SECP256K1_ORDER - s_value
low_s_bytes = (low_s_value).to_bytes(33, byteorder='big')
while len(low_s_bytes) > 1 and low_s_bytes[0] == 0 and low_s_bytes[1] < 0x80:
low_s_bytes = low_s_bytes[1:]
new_s_size = len(low_s_bytes)
new_total_size_byte = (total_size + new_s_size - s_size).to_bytes(1,byteorder='big')
new_s_size_byte = (new_s_size).to_bytes(1,byteorder='big')
return b'\x30' + new_total_size_byte + mb_sig.raw[2:5+r_size] + new_s_size_byte + low_s_bytes
def verify(self, hash, sig):
"""Verify a DER signature"""
return ssl.ECDSA_verify(0, hash, len(hash), sig, len(sig), self.k) == 1
def set_compressed(self, compressed):
if compressed:
form = self.POINT_CONVERSION_COMPRESSED
else:
form = self.POINT_CONVERSION_UNCOMPRESSED
ssl.EC_KEY_set_conv_form(self.k, form)
class CPubKey(bytes):
"""An encapsulated public key
Attributes:
is_valid - Corresponds to CPubKey.IsValid()
is_fullyvalid - Corresponds to CPubKey.IsFullyValid()
is_compressed - Corresponds to CPubKey.IsCompressed()
"""
def __new__(cls, buf, _cec_key=None):
self = super(CPubKey, cls).__new__(cls, buf)
if _cec_key is None:
_cec_key = CECKey()
self._cec_key = _cec_key
self.is_fullyvalid = _cec_key.set_pubkey(self) != 0
return self
@property
def is_valid(self):
return len(self) > 0
@property
def is_compressed(self):
return len(self) == 33
def verify(self, hash, sig):
return self._cec_key.verify(hash, sig)
def __str__(self):
return repr(self)
def __repr__(self):
# Always have represent as b'<secret>' so test cases don't have to
# change for py2/3
if sys.version > '3':
return '%s(%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
else:
return '%s(b%s)' % (self.__class__.__name__, super(CPubKey, self).__repr__())
|
{
"content_hash": "d395a727fc26f937dcb18ad516793403",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 130,
"avg_line_length": 36.58008658008658,
"alnum_prop": 0.6321893491124261,
"repo_name": "sarielsaz/sarielsaz",
"id": "7bc29faa1f57fe4f173c7c00ff9faea23a347ba2",
"size": "8483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/test_framework/key.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "728890"
},
{
"name": "C++",
"bytes": "5278228"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30306"
},
{
"name": "M4",
"bytes": "193646"
},
{
"name": "Makefile",
"bytes": "114515"
},
{
"name": "Objective-C",
"bytes": "141134"
},
{
"name": "Objective-C++",
"bytes": "6771"
},
{
"name": "Python",
"bytes": "1245032"
},
{
"name": "QMake",
"bytes": "758"
},
{
"name": "Shell",
"bytes": "60997"
}
],
"symlink_target": ""
}
|
"""googledatastore client."""
import os
import threading
from . import helper
from . import connection
from .connection import *
# Import the Datastore protos. These are listed separately to avoid importing
# the Datastore service, which conflicts with our Datastore class.
from google.cloud.proto.datastore.v1.datastore_pb2 import (
LookupRequest,
LookupResponse,
RunQueryRequest,
RunQueryResponse,
BeginTransactionRequest,
BeginTransactionResponse,
CommitRequest,
CommitResponse,
RollbackRequest,
RollbackResponse,
AllocateIdsRequest,
AllocateIdsResponse,
Mutation,
MutationResult,
ReadOptions)
from google.cloud.proto.datastore.v1.entity_pb2 import *
from google.cloud.proto.datastore.v1.query_pb2 import *
from google.protobuf.timestamp_pb2 import Timestamp
from google.protobuf.struct_pb2 import NULL_VALUE
from google.rpc.status_pb2 import Status
from google.rpc import code_pb2
from google.type.latlng_pb2 import LatLng
__version__ = '7.0.0'
VERSION = (7, 0, 0, '~')
_conn_holder = {} # thread id -> thread-local connection.
_options = {} # Global options.
# Guards all access to _options and writes to _conn_holder.
_rlock = threading.RLock()
def set_options(**kwargs):
"""Set datastore connection options.
Args:
project_id: the Cloud project to connect to. Exactly one of
project_endpoint and project_id must be set.
credentials: oauth2client.Credentials to authorize the
connection.
project_endpoint: the Cloud Datastore API project endpoint to use.
Defaults to the Google APIs production server. Must not be set if host
is also set.
host: the Cloud Datastore API host to use. Defaults to the Google APIs
production server. Must not be set if project_endpoint is also set.
"""
with(_rlock):
_options.update(kwargs)
_conn_holder.clear()
def get_default_connection():
"""Returns the default datastore connection.
Defaults endpoint to helper.get_project_endpoint_from_env() and
credentials to helper.get_credentials_from_env().
Use set_options to override defaults.
"""
tid = id(threading.current_thread())
conn = _conn_holder.get(tid)
if not conn:
with(_rlock):
# No other thread would insert a value in our slot, so no need
# to recheck existence inside the lock.
if 'project_endpoint' not in _options and 'project_id' not in _options:
_options['project_endpoint'] = helper.get_project_endpoint_from_env()
if 'credentials' not in _options:
_options['credentials'] = helper.get_credentials_from_env()
# We still need the lock when caching the thread local connection so we
# don't race with _conn_holder.clear() in set_options().
_conn_holder[tid] = conn = connection.Datastore(**_options)
return conn
def lookup(request):
"""See connection.Datastore.lookup."""
return get_default_connection().lookup(request)
def run_query(request):
"""See connection.Datastore.run_query."""
return get_default_connection().run_query(request)
def begin_transaction(request):
"""See connection.Datastore.begin_transaction."""
return get_default_connection().begin_transaction(request)
def commit(request):
"""See connection.Datastore.commit."""
return get_default_connection().commit(request)
def rollback(request):
"""See connection.Datastore.rollback."""
return get_default_connection().rollback(request)
def allocate_ids(request):
"""See connection.Datastore.allocate_ids."""
return get_default_connection().allocate_ids(request)
|
{
"content_hash": "6ee5acbf505c82c6af3cbb17cd695c3f",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 31.182608695652174,
"alnum_prop": 0.7216954824316788,
"repo_name": "axbaretto/beam",
"id": "ea7b3de8328e80c154c2933b7acde6926263bc5d",
"size": "4186",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/.tox/docs/lib/python2.7/site-packages/googledatastore/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1598"
},
{
"name": "Batchfile",
"bytes": "3220"
},
{
"name": "C",
"bytes": "1339873"
},
{
"name": "C++",
"bytes": "1132901"
},
{
"name": "CSS",
"bytes": "124283"
},
{
"name": "Dockerfile",
"bytes": "23950"
},
{
"name": "FreeMarker",
"bytes": "7428"
},
{
"name": "Go",
"bytes": "2795906"
},
{
"name": "Groovy",
"bytes": "187109"
},
{
"name": "HTML",
"bytes": "238575"
},
{
"name": "Java",
"bytes": "39085315"
},
{
"name": "JavaScript",
"bytes": "1221326"
},
{
"name": "Jupyter Notebook",
"bytes": "7396"
},
{
"name": "Makefile",
"bytes": "354938"
},
{
"name": "Python",
"bytes": "51449019"
},
{
"name": "Roff",
"bytes": "70716"
},
{
"name": "Ruby",
"bytes": "4159"
},
{
"name": "Shell",
"bytes": "351541"
},
{
"name": "TeX",
"bytes": "70920"
},
{
"name": "Thrift",
"bytes": "1118"
}
],
"symlink_target": ""
}
|
from elasticmagic.cluster import MAX_RESULT_WINDOW
from . import AsyncSearchQueryWrapper
from ...pagination.flask import BasePagination
class AsyncPagination(BasePagination):
"""Helper class to provide compatibility with Flask-SQLAlchemy paginator.
"""
@classmethod
async def create(
cls, query, page=1, per_page=10, max_items=MAX_RESULT_WINDOW
):
self = cls()
self.original_query = query
self.query = AsyncSearchQueryWrapper(query, max_items=max_items)
self.page = page if page > 0 else 1
self.per_page = per_page
self.max_items = max_items
self.offset = (self.page - 1) * self.per_page
self.items = await self.query[self.offset:self.offset + self.per_page]
self.total = (await self.query.get_result()).total
return self
async def prev(self):
return await self.create(
self.original_query, **self._prev_page_params()
)
async def next(self):
return await self.create(
self.original_query, **self._next_page_params()
)
|
{
"content_hash": "c973d0c7d422074ba20c7b180961e18b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 32.35294117647059,
"alnum_prop": 0.6363636363636364,
"repo_name": "anti-social/elasticmagic",
"id": "25693aebc73bbee3d545665944bea597a6eb02ed",
"size": "1100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elasticmagic/ext/asyncio/pagination/flask.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "675751"
},
{
"name": "Shell",
"bytes": "395"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as readme:
README = readme.read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-mips',
version='0.3.2',
packages=['mips'],
include_package_data=True,
license='MIT License',
description='A simple API (build on top of the Django Model API) to store and manage MIP (molecular inversion probe) markers.',
long_description=README,
url='https://github.com/michal-stuglik/django-mips',
author='Michal Stuglik',
author_email='michal@codelabs.info',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
)
|
{
"content_hash": "862824bee4b068dde2026d657d91b034",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 131,
"avg_line_length": 36.11764705882353,
"alnum_prop": 0.6359934853420195,
"repo_name": "aniafijarczyk/django-mips",
"id": "596b09c01d876a27d06861e17f7ec2aac840fe39",
"size": "1228",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34688"
}
],
"symlink_target": ""
}
|
import json
import datetime
from status.util import dthandler, SafeHandler
def filter_data(data, search=None):
if not search:
last_month = datetime.datetime.now()-datetime.timedelta(days=30)
first_term = last_month.isoformat()[2:10].replace('-','')
second_term = datetime.datetime.now().isoformat()[2:10].replace('-','')
search = "{}-{}".format(first_term, second_term)
searches=search.split('-')
return [d for d in data if d['id'][:6] >= searches[0] and d['id'][:6] <= searches[1]]
class DataFlowcellYieldHandler(SafeHandler):
""" Handles the api call to reads_plot data
Loaded through /api/v1/reads_plot/([^/]*)$
"""
def get(self, search_string=None):
docs=filter_data([x.value for x in self.application.x_flowcells_db.view("plot/reads_yield")], search_string)
self.set_header("Content-type", "application/json")
self.write(json.dumps(docs))
class FlowcellPlotHandler(SafeHandler):
""" Handles the yield_plot page
Loaded through /flowcell_plot/([^/]*)$
"""
def get(self):
t = self.application.loader.load("yield_plot.html")
self.write(t.generate(gs_globals=self.application.gs_globals))
class FlowcellCountPlotHandler(SafeHandler):
""" Handles the flowcell_count_plot page
Loaded through /flowcell_count_plot/([^/]*)$
"""
def get(self):
t = self.application.loader.load("flowcell_count_plot.html")
self.write(t.generate(gs_globals=self.application.gs_globals))
def filter_count_data(app, group_level, start_date, end_date, display_type):
data = []
group_level = int(group_level)
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
expected_format=["%Y", "%m", "%W", "%d"]
view = app.application.x_flowcells_db.view("plot/count", reduce=True, group = True, group_level = group_level)
for row in view:
datestring = "-".join([str(x) for x in row.key[1:]])
formatstring = "-".join(expected_format[0:group_level-1])
if group_level == 4:#ugly hack to get weeks working
datestring = "{}-0".format(datestring)
formatstring = "{}-%w".format(formatstring)
row_date = datetime.datetime.strptime(datestring, formatstring)
if row_date <=end_date and row_date >=start_date :
one_entry = row.key
one_entry.append(row.value)
data.append(one_entry)
return data
class FlowcellCountApiHandler(SafeHandler):
def get(self):
group_level = self.get_argument('group_level', '4') #default is week number
start_date = self.get_argument('start_date', '2014-01-01')
end_date = self.get_argument('end_date', datetime.datetime.now().isoformat()[0:10])
display_type = self.get_argument('display_type', 'instrument')
data = filter_count_data(self, group_level, start_date, end_date, display_type)
self.set_header("Content-type", "application/json")
self.write(json.dumps(data))
|
{
"content_hash": "6acf3906809d351e87d0af86ad45496b",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 116,
"avg_line_length": 38.5125,
"alnum_prop": 0.6407010710808179,
"repo_name": "Galithil/status",
"id": "1480daf47ef9c71986d3e557509ca0a77e28f48c",
"size": "3081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "status/reads_plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "21846"
},
{
"name": "HTML",
"bytes": "220773"
},
{
"name": "JavaScript",
"bytes": "253379"
},
{
"name": "Python",
"bytes": "228001"
}
],
"symlink_target": ""
}
|
global mysql_user
mysql_user = os.getenv('DSTAT_MYSQL_USER') or os.getenv('USER')
global mysql_pwd
mysql_pwd = os.getenv('DSTAT_MYSQL_PWD')
class dstat_plugin(dstat):
"""
Plugin for MySQL 5 commands.
"""
def __init__(self):
self.name = 'mysql5 cmds'
self.nick = ('sel', 'ins','upd','del')
self.vars = ('Com_select', 'Com_insert','Com_update','Com_delete')
self.type = 'd'
self.width = 5
self.scale = 1
def check(self):
global MySQLdb
import MySQLdb
try:
self.db = MySQLdb.connect(user=mysql_user, passwd=mysql_pwd)
except Exception, e:
raise Exception, 'Cannot interface with MySQL server: %s' % e
def extract(self):
try:
c = self.db.cursor()
for name in self.vars:
c.execute("""show global status like '%s';""" % name)
line = c.fetchone()
if line[0] in self.vars:
self.set2[line[0]] = long(line[1])
for name in self.vars:
self.val[name] = self.set2[name] * 1.0 / elapsed
if step == op.delay:
self.set1.update(self.set2)
except Exception, e:
for name in self.vars:
self.val[name] = -1
# vim:ts=4:sw=4:et
|
{
"content_hash": "21e90c8d9882f1875e9be0a5af3fa2dc",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 74,
"avg_line_length": 28.97826086956522,
"alnum_prop": 0.5183795948987246,
"repo_name": "dongyoungy/dbseer_middleware",
"id": "9c94df1ecab468e403135a7b3712c34543d04792",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rs-sysmon2/plugins/dstat_mysql5_cmds.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "325736"
},
{
"name": "Java",
"bytes": "86016"
},
{
"name": "Makefile",
"bytes": "2355"
},
{
"name": "Python",
"bytes": "224950"
},
{
"name": "Roff",
"bytes": "22808"
},
{
"name": "Shell",
"bytes": "1891"
}
],
"symlink_target": ""
}
|
"""
This script generates much of the implementation of that provides a
"""
import sys
import os
if __name__ == '__main__':
#Dynamically append current script path to PYTHONPATH
sys.path.append(os.path.dirname(sys.argv[0]))
import SchemaDef2 as sd
from CsCommonGen2 import *
from CsNativeFormatGen2 import TypeContainer
from CsNativeFormatGen2 import PublishWellKnownTypes
from odict import odict
#==========================================================================================================
def AsHandle(rec):
if isinstance(rec, sd.RecordDef):
return AsHandle(rec.name)
elif isinstance(rec, sd.MemberDef):
return AsHandle(rec.typeName)
elif type(rec) == tuple:
return 'Handle'
else:
return rec + 'Handle'
#==========================================================================================================
class Walker(object):
#------------------------------------------------------------------------------------------------------
def __init__(self):
self.csClass = None
self.Ty = TypeContainer()
PublishWellKnownTypes(self.Ty)
#------------------------------------------------------------------------------------------------------
def GenerateHandleVisitWorker(self):
code = 'switch (handle.GetHandleType(_reader))\n{'
for rec in sorted(sd.recordDefs.itervalues()):
code += '\ncase HandleType.{0}:\n Visit(handle.To{0}Handle(_reader), recurse);\n break;'.format(rec.name)
code += '\ndefault:\n throw new ArgumentException();\n}'
self.csClass.members.add(MethodDef(
'Visit',
flags = MemberFlags.Protected,
sig = [self.Ty.void, [(self.Ty.Handle, 'handle'), (self.Ty.bool, 'recurse')]],
body = code));
#------------------------------------------------------------------------------------------------------
def GenerateHandleVisitFunctions(self):
for rec in sorted(sd.recordDefs.itervalues()):
self.csClass.members.add(MethodDef(
'Visit',
flags = MemberFlags.Protected | MemberFlags.Virtual,
sig = [self.Ty.void, [(self.Ty[AsHandle(rec)], 'handle'), (self.Ty.bool, 'recurse')]],
body = '''
_visiting.Push(handle);
_visited.Add(handle);
Visit(handle.Get{0}(_reader), recurse);
_visiting.Pop();'''.format(rec.name)))
self.csClass.members.add(MethodDef(
'Visit',
flags = MemberFlags.Protected | MemberFlags.Virtual,
sig = [self.Ty.void, [(self.Ty[TypeInst(self.Ty.IEnumerableT, AsHandle(rec))], 'handles'), (self.Ty.bool, 'recurse')]],
body = 'foreach (var handle in handles) Visit(handle, recurse);'));
#------------------------------------------------------------------------------------------------------
def GenerateRecordVisitFunctions(self):
for rec in sorted(sd.recordDefs.itervalues()):
stmts = list()
stmts.append('''
if ({0}Event != null)
{{
{0}Event(record);
}}'''.format(rec.name))
# Don't do any recursion or queue any pending handles if 'recurse' is set to false
stmts.append('if (!recurse) return;')
for mem in sorted(rec.members, key = lambda m: '0' + m.name if m.name == 'Name' else m.name):
if mem.flags.IsRef():
if mem.flags.IsChild():
stmts.append('Visit(record.{0}, recurse);'.format(mem.name))
else:
if mem.flags.IsSequence():
stmts.append('''
foreach (var handle in record.{0})
{{
//Debug.Assert(((Handle)handle).GetHandleType(_reader) != HandleType.TypeDefinition);
if (!_visited.Contains(handle))
_pending.Add(handle);
}}'''.format(mem.name))
else:
stmts.append('''
if (!record.{0}.IsNull(_reader) && !_visited.Contains(record.{0}))
{{
//Debug.Assert(((Handle)record.{0}).GetHandleType(_reader) != HandleType.TypeDefinition);
_pending.Add(record.{0});
}}'''.format(mem.name))
self.csClass.members.add(MethodDef(
'Visit',
flags = MemberFlags.Protected | MemberFlags.Virtual,
sig = [self.Ty.void, [(self.Ty[rec.name], 'record'), (self.Ty.bool, 'recurse')]],
body = '\n'.join(stmts)));
#------------------------------------------------------------------------------------------------------
def GenerateRecordVisitEvents(self):
for rec in sorted(sd.recordDefs.itervalues()):
# self.csClass.members.add(EventDef(
# '{}BeginEvent'.format(rec),
# self.Ty['VisitBeginHandler<{}>'.format(rec)],
# flags = AccessFlags.Public))
self.csClass.members.add(EventDef(
'{}Event'.format(rec),
self.Ty['VisitHandler<{}>'.format(rec)],
flags = AccessFlags.Public))
# self.csClass.members.add(EventDef(
# '{}EndEvent'.format(rec),
# self.Ty['VisitEndHandler<{}>'.format(rec)],
# flags = AccessFlags.Public))
#------------------------------------------------------------------------------------------------------
def GenerateHandleDisplayWorker(self):
code = 'switch (handle.GetHandleType(_reader))\n{'
for rec in sorted(sd.recordDefs.itervalues()):
code += '\ncase HandleType.{0}:\n Display(handle.To{0}Handle(_reader).Get{0}(_reader));\n break;'.format(rec.name)
code += '\ndefault:\n throw new ArgumentException();\n}'
self.csClass.members.add(MethodDef(
'Display',
flags = MemberFlags.Public,
sig = [self.Ty.void, [(self.Ty.Handle, 'handle')]],
body = code));
#------------------------------------------------------------------------------------------------------
def GenerateHandleDisplayFunctions(self):
for rec in sorted(sd.recordDefs.itervalues()):
self.csClass.members.add(MethodDef(
'Display',
flags = MemberFlags.Public | MemberFlags.Virtual,
sig = [self.Ty.void, [(self.Ty[AsHandle(rec)], 'handle')]],
# body = 'Display(handle.Get{0}(_reader));'.format(rec)));
body = '''
if (!_visited.Contains(handle))
{{
_visited.Add(handle);
Display(handle.Get{0}(_reader));
}}'''.format(rec.name)))
self.csClass.members.add(MethodDef(
'Display',
flags = MemberFlags.Public | MemberFlags.Virtual,
sig = [self.Ty.void, [(self.Ty[TypeInst(self.Ty.IEnumerableT, AsHandle(rec))], 'handles')]],
body = 'foreach (var handle in handles) Display(handle);'));
#------------------------------------------------------------------------------------------------------
def GenerateRecordDisplayFunctions(self):
for rec in sorted(sd.recordDefs.itervalues()):
stmts = ['var sb = new System.Text.StringBuilder();']
for mem in sorted(rec.members, key = lambda m: '0' + m.name if m.name == 'Name' else m.name):
if mem.flags.IsRef():
pass
# if mem.flags.IsNotPersisted():
# pass
# elif mem.flags.IsSequence():
# if mem.flags.IsRef() and (mem.flags.IsChild() or mem.typeName == 'ConstantStringValue'):
# stmts.append('_sw.WriteLine(" {0} :\\n [");\nforeach (var handle in record.{0}) Display(handle);\n_sw.WriteLine("\\n ]");'.format(mem.name))
# else:
# stmts.append('_sw.WriteLine(" {0} :\\n [");\nforeach (var handle in record.{0}) _sw.WriteLine(" {{0}}", handle.ToString());\n_sw.WriteLine("\\n ]");'.format(mem.name))
# elif mem.flags.IsRef() and (mem.flags.IsChild() or mem.typeName == 'ConstantStringValue'):
# stmts.append('_sw.Write(" {0} : ");\nDisplay(record.{0});'.format(mem.name))
else:
stmts.append('sb.AppendFormat(" {0} : {{0}},\\n", record.{0}.ToString());'.format(mem.name, mem.typeName))
stmts.append('return sb.ToString();')
self.csClass.members.add(MethodDef(
'Display',
flags = MemberFlags.Public | MemberFlags.Virtual,
sig = [self.Ty.string, [(self.Ty[str(rec)], 'record')]],
body = '\n'.join(stmts)));
#------------------------------------------------------------------------------------------------------
def GenerateCsClass(self):
if self.csClass == None:
self.csClass = ClassDef(
'Walker',
flags = TypeFlags.Public | TypeFlags.Partial)
self.GenerateHandleVisitWorker()
self.GenerateHandleVisitFunctions()
self.GenerateRecordVisitFunctions()
self.GenerateRecordVisitEvents()
# self.GenerateHandleDisplayWorker()
# self.GenerateHandleDisplayFunctions()
# self.GenerateRecordDisplayFunctions()
self.Ty[self.csClass] = self.csClass
return self.csClass
#------------------------------------------------------------------------------------------------------
def CsEmitSource(self):
ns = NamespaceDef('MdWalker')
ns.members.add(Walker().GenerateCsClass())
with open(r'..\..\..\PnToolChain\Metadata\MdWalker\MdWalkerGen.cs', 'w') as output:
iprint = IPrint(output)
CsEmitFileHeader(iprint)
iprint('using System;')
iprint('using System.Linq;')
iprint('using System.IO;')
iprint('using System.Collections.Generic;')
iprint('using System.Reflection;')
iprint('using Internal.Metadata.NativeFormat;')
iprint('using Debug = System.Diagnostics.Debug;')
iprint()
ns.CsDefine(iprint)
#==========================================================================================================
if __name__ == '__main__':
Walker().CsEmitSource()
|
{
"content_hash": "5370c31bf5460ea895ed84a93ab4d86f",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 209,
"avg_line_length": 45.630434782608695,
"alnum_prop": 0.47670319199618866,
"repo_name": "manu-silicon/corert",
"id": "461673366acd4d1dee5e9b777f6fb82a3c9f2291",
"size": "10696",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/Common/src/Internal/Metadata/NativeFormat/Script/CsWalkerGen3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "445134"
},
{
"name": "Batchfile",
"bytes": "30865"
},
{
"name": "C",
"bytes": "537058"
},
{
"name": "C#",
"bytes": "11718160"
},
{
"name": "C++",
"bytes": "3513445"
},
{
"name": "CMake",
"bytes": "49448"
},
{
"name": "Groovy",
"bytes": "2695"
},
{
"name": "Objective-C",
"bytes": "2243"
},
{
"name": "PAWN",
"bytes": "926"
},
{
"name": "PowerShell",
"bytes": "1855"
},
{
"name": "Python",
"bytes": "165024"
},
{
"name": "Shell",
"bytes": "31016"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('boards', '0030_auto_20160925_1843'),
('repositories', '0011_auto_20160925_1811'),
]
operations = [
migrations.AlterIndexTogether(
name='phpmdmessage',
index_together=set([('commit', 'commit_file', 'ruleset'), ('board', 'repository', 'commit', 'commit_file', 'ruleset'), ('board', 'ruleset'), ('board', 'repository', 'commit', 'ruleset'), ('board', 'commit', 'commit_file', 'ruleset'), ('board', 'repository', 'ruleset', 'commit'), ('board', 'commit', 'ruleset')]),
),
]
|
{
"content_hash": "09f12182fefaded744dd66f2cf79f60a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 325,
"avg_line_length": 37.666666666666664,
"alnum_prop": 0.6002949852507374,
"repo_name": "diegojromerolopez/djanban",
"id": "aa7040083e08d86cfab93442208c962506029604",
"size": "749",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/djanban/apps/repositories/migrations/0012_auto_20160925_1959.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79709"
},
{
"name": "HTML",
"bytes": "660275"
},
{
"name": "JavaScript",
"bytes": "634320"
},
{
"name": "Python",
"bytes": "993818"
},
{
"name": "Shell",
"bytes": "1732"
},
{
"name": "TypeScript",
"bytes": "71578"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('icekit_events', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SimpleEvent',
fields=[
('eventbase_ptr', models.OneToOneField(auto_created=True, parent_link=True, primary_key=True, to='icekit_events.EventBase', serialize=False)),
],
options={
'verbose_name': 'Simple event',
'db_table': 'icekit_event_types_simple_simpleevent',
},
bases=('icekit_events.eventbase',),
),
]
|
{
"content_hash": "9b02547be856cf4713e11777a626ee87",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 158,
"avg_line_length": 28.833333333333332,
"alnum_prop": 0.565028901734104,
"repo_name": "ic-labs/icekit-events",
"id": "6f8184791df7df4e0d234135a0d30432a20521da",
"size": "716",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "icekit_events/event_types/simple/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1334"
},
{
"name": "HTML",
"bytes": "19090"
},
{
"name": "JavaScript",
"bytes": "1759"
},
{
"name": "Python",
"bytes": "208757"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_stormtrooper_black_gold.iff"
result.attribute_template_id = 9
result.stfName("npc_name","stormtrooper")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "2d2aa73c3707445b7c90d45cb2b0b552",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 77,
"avg_line_length": 23.692307692307693,
"alnum_prop": 0.7045454545454546,
"repo_name": "obi-two/Rebelion",
"id": "067103f51009417ba55a551e85bc1667268830ed",
"size": "453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/mobile/shared_dressed_stormtrooper_black_gold.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.build import html_to_js_template
from frappe import conf, _
class Page(Document):
def autoname(self):
"""
Creates a url friendly name for this page.
Will restrict the name to 30 characters, if there exists a similar name,
it will add name-1, name-2 etc.
"""
from frappe.utils import cint
if (self.name and self.name.startswith('New Page')) or not self.name:
self.name = self.page_name.lower().replace('"','').replace("'",'').\
replace(' ', '-')[:20]
if frappe.db.exists('Page',self.name):
cnt = frappe.db.sql("""select name from tabPage
where name like "%s-%%" order by name desc limit 1""" % self.name)
if cnt:
cnt = cint(cnt[0][0].split('-')[-1]) + 1
else:
cnt = 1
self.name += '-' + str(cnt)
def validate(self):
if not getattr(conf,'developer_mode', 0):
frappe.throw(_("Not in Developer Mode"))
# export
def on_update(self):
"""
Writes the .txt for this page and if write_content is checked,
it will write out a .html file
"""
from frappe.core.doctype.doctype.doctype import make_module_and_roles
make_module_and_roles(self, "roles")
if not frappe.flags.in_import and getattr(conf,'developer_mode', 0) and self.standard=='Yes':
from frappe.modules.export_file import export_to_files
from frappe.modules import get_module_path, scrub
import os
export_to_files(record_list=[['Page', self.name]])
# write files
path = os.path.join(get_module_path(self.module), 'page', scrub(self.name), scrub(self.name))
# js
if not os.path.exists(path + '.js'):
with open(path + '.js', 'w') as f:
f.write("""frappe.pages['%s'].on_page_load = function(wrapper) {
var page = frappe.ui.make_app_page({
parent: wrapper,
title: '%s',
single_column: true
});
}""" % (self.name, self.title))
def as_dict(self, no_nulls=False):
d = super(Page, self).as_dict(no_nulls=no_nulls)
for key in ("script", "style", "content"):
d[key] = self.get(key)
return d
def is_permitted(self):
"""Returns true if Page Role is not set or the user is allowed."""
from frappe.utils import has_common
allowed = [d.role for d in frappe.get_all("Page Role", fields=["role"],
filters={"parent": self.name})]
if not allowed:
return True
roles = frappe.get_roles()
if has_common(roles, allowed):
return True
def load_assets(self):
from frappe.modules import get_module_path, scrub
import os
page_name = scrub(self.name)
path = os.path.join(get_module_path(self.module), 'page', page_name)
# script
fpath = os.path.join(path, page_name + '.js')
if os.path.exists(fpath):
with open(fpath, 'r') as f:
self.script = unicode(f.read(), "utf-8")
# css
fpath = os.path.join(path, page_name + '.css')
if os.path.exists(fpath):
with open(fpath, 'r') as f:
self.style = unicode(f.read(), "utf-8")
# html as js template
for fname in os.listdir(path):
if fname.endswith(".html"):
with open(os.path.join(path, fname), 'r') as f:
template = unicode(f.read(), "utf-8")
if "<!-- jinja -->" in template:
context = {}
try:
context = frappe.get_attr("{app}.{module}.page.{page}.{page}.get_context".format(
app = frappe.local.module_app[scrub(self.module)],
module = scrub(self.module),
page = page_name
))(context)
except (AttributeError, ImportError):
pass
template = frappe.render_template(template, context)
self.script = html_to_js_template(fname, template) + self.script
if frappe.lang != 'en':
from frappe.translate import get_lang_js
self.script += get_lang_js("page", self.name)
|
{
"content_hash": "f4ee3b6efd125520c939eb3e92ba3b94",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 96,
"avg_line_length": 30.177419354838708,
"alnum_prop": 0.6427044361304115,
"repo_name": "gangadharkadam/v6_frappe",
"id": "f9c4959c29d29fada7a4f636b90b7a1c46a9f6f7",
"size": "3843",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "frappe/core/doctype/page/page.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "246752"
},
{
"name": "HTML",
"bytes": "142369"
},
{
"name": "JavaScript",
"bytes": "1057581"
},
{
"name": "Python",
"bytes": "1167975"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django_pyscss.compressor import DjangoScssFilter
from django_pyscss import DjangoScssCompiler
from scss.namespace import Namespace
from scss.types import String
import six
class HorizonScssFilter(DjangoScssFilter):
def __init__(self, *args, **kwargs):
super(HorizonScssFilter, self).__init__(*args, **kwargs)
self.namespace = Namespace()
# Add variables to the SCSS Global Namespace Here
self.namespace.set_variable(
'$static_url',
String(six.text_type(settings.STATIC_URL))
)
# Create a compiler with the right namespace
@property
def compiler(self):
return DjangoScssCompiler(
# output_style is 'nested' by default, which is crazy. See:
# https://github.com/Kronuz/pyScss/issues/243
output_style='compact', # or 'compressed'
namespace=self.namespace
)
|
{
"content_hash": "8b5cc3456a14d0630e2a5c7efddae7ca",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 71,
"avg_line_length": 29.53125,
"alnum_prop": 0.6592592592592592,
"repo_name": "NeCTAR-RC/horizon",
"id": "3ae2ac5140f56c2ddf16479dfe442cf984678f7e",
"size": "1556",
"binary": false,
"copies": "1",
"ref": "refs/heads/nectar/train",
"path": "horizon/utils/scss_filter.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "598098"
},
{
"name": "JavaScript",
"bytes": "2474550"
},
{
"name": "Python",
"bytes": "5323984"
},
{
"name": "SCSS",
"bytes": "132603"
},
{
"name": "Shell",
"bytes": "7466"
}
],
"symlink_target": ""
}
|
import re
batregex=re.compile(r'Bat(wo)*man')
mo1=batregex.search('The adventures of Batman')
print(mo1.group())
mo2=batregex.search('The adventures of Batwoman')
print(mo2.group())
mo3=batregex.search('The adventures of Batwowowowowowowoman')
print(mo3.group())
batregex1=re.compile(r'Bat(wo)+man')
mo4=batregex1.search('The adventures of Batwoman')
print(mo4.group())
mo5=batregex1.search('The adventures of Batwowowoman')
print(mo5.group())
mo6=batregex1.search('The adventures of Batman')
print(mo6==None)
|
{
"content_hash": "0bf82e5e3cb26f08ce3d5d8e92edd9b9",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 61,
"avg_line_length": 25.75,
"alnum_prop": 0.7631067961165049,
"repo_name": "zac11/AutomateThingsWithPython",
"id": "b81060781fc32d42aec9dbb5efefc3b2de44b027",
"size": "515",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Regex/one_or_more.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "33191"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.