text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from electrum_ltc.plugin import hook
from electrum_ltc.util import print_msg, raw_input, print_stderr
from electrum_ltc.logging import get_logger
from ..hw_wallet.cmdline import CmdLineHandler
from .coldcard import ColdcardPlugin
_logger = get_logger(__name__)
class ColdcardCmdLineHandler(CmdLineHandler):
def get_passphrase(self, msg, confirm):
raise NotImplementedError
def get_pin(self, msg, *, show_strength=True):
raise NotImplementedError
def prompt_auth(self, msg):
raise NotImplementedError
def yes_no_question(self, msg):
print_msg(msg)
return raw_input() in 'yY'
def stop(self):
pass
def update_status(self, b):
_logger.info(f'hw device status {b}')
def finished(self):
pass
class Plugin(ColdcardPlugin):
handler = ColdcardCmdLineHandler()
@hook
def init_keystore(self, keystore):
if not isinstance(keystore, self.keystore_class):
return
keystore.handler = self.handler
def create_handler(self, window):
return self.handler
# EOF
|
{
"content_hash": "4a4fe67dfea436aec3e98779954eff5d",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 64,
"avg_line_length": 22.489795918367346,
"alnum_prop": 0.6715063520871143,
"repo_name": "vialectrum/vialectrum",
"id": "ff14ac4047c8dc671f38ea5404d6c9bed7e6f110",
"size": "1102",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "electrum_ltc/plugins/coldcard/cmdline.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "839"
},
{
"name": "NSIS",
"bytes": "7496"
},
{
"name": "Python",
"bytes": "1895270"
},
{
"name": "Shell",
"bytes": "16219"
}
],
"symlink_target": ""
}
|
"""
Miscelanius utilities which are helpful sometime.
"""
import logging
try:
from urlparse import urlsplit
except ImportError:
from urllib.parse import urlsplit
from hashlib import sha1
import os
import shutil
def unique_file(path):
"""
Drop non-unique lines in the file.
Return number of unique lines.
"""
lines = set()
count = 0
with open(path) as inf:
for line in inf:
lines.add(line)
count += 1
logging.debug('Read %d lines from %s, unique: %d' % (count, path,
len(lines)))
with open(path, 'w') as out:
out.write(''.join(lines))
return len(lines)
def unique_host(path):
"""
Filter out urls with duplicated hostnames.
"""
hosts = set()
lines = []
count = 0
with open(path) as inf:
for line in inf:
host = urlsplit(line).netloc
if not host in hosts:
lines.append(line)
hosts.add(host)
count += 1
logging.debug('Read %d lines from %s, unique hosts: %d' % (count, path,
len(lines)))
with open(path, 'w') as out:
out.write(''.join(lines))
return len(lines)
def hashed_path_details(url, ext='jpg', base_dir=None):
_hash = sha1(url).hexdigest()
a, b, tail = _hash[:2], _hash[2:4], _hash[4:]
directory = '%s/%s' % (a, b)
if base_dir is not None:
directory = '%s/%s' % (base_dir, directory)
if ext is not None:
filename = '%s.%s' % (tail, ext)
else:
filename = tail
full_path = '%s/%s' % (directory, filename)
return {'directory': directory,
'filename': filename,
'full_path': full_path,
}
def hashed_path(url, ext='jpg', base_dir=None):
dtl = hashed_path_details(url, ext=ext, base_dir=base_dir)
return dtl['full_path']
# Alias for back-ward compatibility
def hash_path(*args, **kwargs):
logging.debug('This function name is depricated. Please use hashed_path function')
return hashed_path(*args, **kwargs)
def clear_directory(path):
"""
Delete recursively all directories and files in
specified directory.
"""
for root, dirs, files in os.walk(path):
for fname in files:
os.unlink(os.path.join(root, fname))
for _dir in dirs:
shutil.rmtree(os.path.join(root, _dir))
# Bad name, not clear logic
#def smart_copy_file(filename, dst_root):
#dir_path, fname = os.path.split(filename)
#dst_dir = os.path.join(dst_root, dir_path)
#if not os.path.exists(dst_dir):
#os.makedirs(dst_dir)
#import pdb; pdb.set_trace()
#shutil.copy(filename, dst_dir)
|
{
"content_hash": "1c5f5ccef6c2563beaad50eb6a401e20",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 86,
"avg_line_length": 27.254901960784313,
"alnum_prop": 0.5651079136690648,
"repo_name": "subeax/grab",
"id": "7a59fe36d345045fdb2423697861b3a2f2aff5f1",
"size": "2780",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grab/tools/files.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "976"
},
{
"name": "Perl",
"bytes": "45"
},
{
"name": "Python",
"bytes": "739023"
},
{
"name": "Shell",
"bytes": "317"
}
],
"symlink_target": ""
}
|
from rest_framework.serializers import ModelSerializer, HyperlinkedIdentityField, SerializerMethodField
from posts.models import Post
from comments.api.serializers import CommentSerializer
from comments.models import Comment
class PostCreateUpdateSerializer(ModelSerializer):
class Meta:
model = Post
fields = [
'title',
# 'slug',
'content',
'publish',
]
class PostDetailsSerializer(ModelSerializer):
comments = SerializerMethodField()
class Meta:
model = Post
fields = [
'id',
'title',
'slug',
'content',
'publish',
'comments'
]
def get_comments(self,obj):
content_type = obj.get_content_type
object_id = obj.id
c_qs = Comment.objects.filter_by_instance(obj)
comments = CommentSerializer(c_qs,many=True).data
return comments
class PostListSerializer(ModelSerializer):
url = HyperlinkedIdentityField(
view_name='posts-api:detail',
lookup_field='slug'
)
user = SerializerMethodField()
class Meta:
model = Post
fields = [
'url',
'title',
'user',
'content',
'publish',
]
def get_user(self,obj):
return str(obj.user.username)
|
{
"content_hash": "896032b417a7b8f335d004d04b280062",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 103,
"avg_line_length": 23.440677966101696,
"alnum_prop": 0.5704989154013015,
"repo_name": "videetssinghai/Blog-Rest-Api",
"id": "76617d5c23dbf27556a0135a53aff60a69381f9c",
"size": "1383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "posts/api/serializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "145"
},
{
"name": "HTML",
"bytes": "12817"
},
{
"name": "Python",
"bytes": "1215165"
},
{
"name": "Shell",
"bytes": "3222"
}
],
"symlink_target": ""
}
|
"""Implements a source for reading Avro files."""
import cStringIO
import os
import zlib
import avro
from avro import datafile
from avro import io as avroio
from avro import schema
import apache_beam as beam
from apache_beam.io import filebasedsource
from apache_beam.io import filebasedsink
from apache_beam.io import iobase
from apache_beam.io.filesystem import CompressionTypes
from apache_beam.io.iobase import Read
from apache_beam.transforms import PTransform
__all__ = ['ReadFromAvro', 'WriteToAvro']
class ReadFromAvro(PTransform):
"""A ``PTransform`` for reading avro files."""
def __init__(self, file_pattern=None, min_bundle_size=0, validate=True):
"""Initializes ``ReadFromAvro``.
Uses source '_AvroSource' to read a set of Avro files defined by a given
file pattern.
If '/mypath/myavrofiles*' is a file-pattern that points to a set of Avro
files, a ``PCollection`` for the records in these Avro files can be created
in the following manner.
p = df.Pipeline(argv=pipeline_args)
records = p | 'Read' >> df.io.ReadFromAvro('/mypath/myavrofiles*')
Each record of this ``PCollection`` will contain a single record read from a
source. Records that are of simple types will be mapped into corresponding
Python types. Records that are of Avro type 'RECORD' will be mapped to
Python dictionaries that comply with the schema contained in the Avro file
that contains those records. In this case, keys of each dictionary
will contain the corresponding field names and will be of type ``string``
while the values of the dictionary will be of the type defined in the
corresponding Avro schema.
For example, if schema of the Avro file is the following.
{"namespace": "example.avro","type": "record","name": "User","fields":
[{"name": "name", "type": "string"},
{"name": "favorite_number", "type": ["int", "null"]},
{"name": "favorite_color", "type": ["string", "null"]}]}
Then records generated by ``AvroSource`` will be dictionaries of the
following form.
{u'name': u'Alyssa', u'favorite_number': 256, u'favorite_color': None}).
Args:
file_pattern: the set of files to be read.
min_bundle_size: the minimum size in bytes, to be considered when
splitting the input into bundles.
validate: flag to verify that the files exist during the pipeline
creation time.
"""
super(ReadFromAvro, self).__init__()
self._source = _AvroSource(file_pattern, min_bundle_size, validate=validate)
def expand(self, pvalue):
return pvalue.pipeline | Read(self._source)
def display_data(self):
return {'source_dd': self._source}
class _AvroUtils(object):
@staticmethod
def read_meta_data_from_file(f):
"""Reads metadata from a given Avro file.
Args:
f: Avro file to read.
Returns:
a tuple containing the codec, schema, and the sync marker of the Avro
file.
Raises:
ValueError: if the file does not start with the byte sequence defined in
the specification.
"""
if f.tell() > 0:
f.seek(0)
decoder = avroio.BinaryDecoder(f)
header = avroio.DatumReader().read_data(datafile.META_SCHEMA,
datafile.META_SCHEMA, decoder)
if header.get('magic') != datafile.MAGIC:
raise ValueError('Not an Avro file. File header should start with %s but'
'started with %s instead.', datafile.MAGIC,
header.get('magic'))
meta = header['meta']
if datafile.CODEC_KEY in meta:
codec = meta[datafile.CODEC_KEY]
else:
codec = 'null'
schema_string = meta[datafile.SCHEMA_KEY]
sync_marker = header['sync']
return codec, schema_string, sync_marker
@staticmethod
def read_block_from_file(f, codec, schema, expected_sync_marker):
"""Reads a block from a given Avro file.
Args:
f: Avro file to read.
codec: The codec to use for block-level decompression.
Supported codecs: 'null', 'deflate', 'snappy'
schema: Avro Schema definition represented as JSON string.
expected_sync_marker: Avro synchronization marker. If the block's sync
marker does not match with this parameter then ValueError is thrown.
Returns:
A single _AvroBlock.
Raises:
ValueError: If the block cannot be read properly because the file doesn't
match the specification.
"""
offset = f.tell()
decoder = avroio.BinaryDecoder(f)
num_records = decoder.read_long()
block_size = decoder.read_long()
block_bytes = decoder.read(block_size)
sync_marker = decoder.read(len(expected_sync_marker))
if sync_marker != expected_sync_marker:
raise ValueError('Unexpected sync marker (actual "%s" vs expected "%s"). '
'Maybe the underlying avro file is corrupted?',
sync_marker, expected_sync_marker)
size = f.tell() - offset
return _AvroBlock(block_bytes, num_records, codec, schema, offset, size)
@staticmethod
def advance_file_past_next_sync_marker(f, sync_marker):
buf_size = 10000
data = f.read(buf_size)
while data:
pos = data.find(sync_marker)
if pos >= 0:
# Adjusting the current position to the ending position of the sync
# marker.
backtrack = len(data) - pos - len(sync_marker)
f.seek(-1 * backtrack, os.SEEK_CUR)
return True
else:
if f.tell() >= len(sync_marker):
# Backtracking in case we partially read the sync marker during the
# previous read. We only have to backtrack if there are at least
# len(sync_marker) bytes before current position. We only have to
# backtrack (len(sync_marker) - 1) bytes.
f.seek(-1 * (len(sync_marker) - 1), os.SEEK_CUR)
data = f.read(buf_size)
class _AvroBlock(object):
"""Represents a block of an Avro file."""
def __init__(self, block_bytes, num_records, codec, schema_string,
offset, size):
# Decompress data early on (if needed) and thus decrease the number of
# parallel copies of the data in memory at any given in time during
# block iteration.
self._decompressed_block_bytes = self._decompress_bytes(block_bytes, codec)
self._num_records = num_records
self._schema = schema.parse(schema_string)
self._offset = offset
self._size = size
def size(self):
return self._size
def offset(self):
return self._offset
@staticmethod
def _decompress_bytes(data, codec):
if codec == 'null':
return data
elif codec == 'deflate':
# zlib.MAX_WBITS is the window size. '-' sign indicates that this is
# raw data (without headers). See zlib and Avro documentations for more
# details.
return zlib.decompress(data, -zlib.MAX_WBITS)
elif codec == 'snappy':
# Snappy is an optional avro codec.
# See Snappy and Avro documentation for more details.
try:
import snappy
except ImportError:
raise ValueError('Snappy does not seem to be installed.')
# Compressed data includes a 4-byte CRC32 checksum which we verify.
# We take care to avoid extra copies of data while slicing large objects
# by use of a buffer.
result = snappy.decompress(buffer(data)[:-4])
avroio.BinaryDecoder(cStringIO.StringIO(data[-4:])).check_crc32(result)
return result
else:
raise ValueError('Unknown codec: %r', codec)
def num_records(self):
return self._num_records
def records(self):
decoder = avroio.BinaryDecoder(
cStringIO.StringIO(self._decompressed_block_bytes))
reader = avroio.DatumReader(
writers_schema=self._schema, readers_schema=self._schema)
current_record = 0
while current_record < self._num_records:
yield reader.read(decoder)
current_record += 1
class _AvroSource(filebasedsource.FileBasedSource):
"""A source for reading Avro files.
``_AvroSource`` is implemented using the file-based source framework available
in module 'filebasedsource'. Hence please refer to module 'filebasedsource'
to fully understand how this source implements operations common to all
file-based sources such as file-pattern expansion and splitting into bundles
for parallel processing.
"""
def read_records(self, file_name, range_tracker):
next_block_start = -1
def split_points_unclaimed(stop_position):
if next_block_start >= stop_position:
# Next block starts at or after the suggested stop position. Hence
# there will not be split points to be claimed for the range ending at
# suggested stop position.
return 0
return iobase.RangeTracker.SPLIT_POINTS_UNKNOWN
range_tracker.set_split_points_unclaimed_callback(split_points_unclaimed)
start_offset = range_tracker.start_position()
if start_offset is None:
start_offset = 0
with self.open_file(file_name) as f:
codec, schema_string, sync_marker = _AvroUtils.read_meta_data_from_file(
f)
# We have to start at current position if previous bundle ended at the
# end of a sync marker.
start_offset = max(0, start_offset - len(sync_marker))
f.seek(start_offset)
_AvroUtils.advance_file_past_next_sync_marker(f, sync_marker)
while range_tracker.try_claim(f.tell()):
block = _AvroUtils.read_block_from_file(f, codec, schema_string,
sync_marker)
next_block_start = block.offset() + block.size()
for record in block.records():
yield record
class WriteToAvro(beam.transforms.PTransform):
"""A ``PTransform`` for writing avro files."""
def __init__(self,
file_path_prefix,
schema,
codec='deflate',
file_name_suffix='',
num_shards=0,
shard_name_template=None,
mime_type='application/x-avro'):
"""Initialize a WriteToAvro transform.
Args:
file_path_prefix: The file path to write to. The files written will begin
with this prefix, followed by a shard identifier (see num_shards), and
end in a common extension, if given by file_name_suffix. In most cases,
only this argument is specified and num_shards, shard_name_template, and
file_name_suffix use default values.
schema: The schema to use, as returned by avro.schema.parse
codec: The codec to use for block-level compression. Any string supported
by the Avro specification is accepted (for example 'null').
file_name_suffix: Suffix for the files written.
num_shards: The number of files (shards) used for output. If not set, the
service will decide on the optimal number of shards.
Constraining the number of shards is likely to reduce
the performance of a pipeline. Setting this value is not recommended
unless you require a specific number of output files.
shard_name_template: A template string containing placeholders for
the shard number and shard count. Currently only '' and
'-SSSSS-of-NNNNN' are patterns accepted by the service.
When constructing a filename for a particular shard number, the
upper-case letters 'S' and 'N' are replaced with the 0-padded shard
number and shard count respectively. This argument can be '' in which
case it behaves as if num_shards was set to 1 and only one file will be
generated. The default pattern used is '-SSSSS-of-NNNNN'.
mime_type: The MIME type to use for the produced files, if the filesystem
supports specifying MIME types.
Returns:
A WriteToAvro transform usable for writing.
"""
self._sink = _AvroSink(file_path_prefix, schema, codec, file_name_suffix,
num_shards, shard_name_template, mime_type)
def expand(self, pcoll):
return pcoll | beam.io.iobase.Write(self._sink)
def display_data(self):
return {'sink_dd': self._sink}
class _AvroSink(filebasedsink.FileBasedSink):
"""A sink to avro files."""
def __init__(self,
file_path_prefix,
schema,
codec,
file_name_suffix,
num_shards,
shard_name_template,
mime_type):
super(_AvroSink, self).__init__(
file_path_prefix,
file_name_suffix=file_name_suffix,
num_shards=num_shards,
shard_name_template=shard_name_template,
coder=None,
mime_type=mime_type,
# Compression happens at the block level using the supplied codec, and
# not at the file level.
compression_type=CompressionTypes.UNCOMPRESSED)
self._schema = schema
self._codec = codec
def open(self, temp_path):
file_handle = super(_AvroSink, self).open(temp_path)
return avro.datafile.DataFileWriter(
file_handle, avro.io.DatumWriter(), self._schema, self._codec)
def write_record(self, writer, value):
writer.append(value)
def display_data(self):
res = super(self.__class__, self).display_data()
res['codec'] = str(self._codec)
res['schema'] = str(self._schema)
return res
|
{
"content_hash": "c01eedba4da9194985e9a472139d339a",
"timestamp": "",
"source": "github",
"line_count": 358,
"max_line_length": 80,
"avg_line_length": 37.332402234636874,
"alnum_prop": 0.6551440329218107,
"repo_name": "dhalperi/incubator-beam",
"id": "e02e1f7e893fc4b79fac2bf5099eb975973e8f91",
"size": "14149",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/avroio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22449"
},
{
"name": "Java",
"bytes": "9735468"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
}
|
'''
Execute a command and read the output as JSON. The JSON data is then directly overlaid onto the minion's Pillar data.
'''
from __future__ import absolute_import
# Don't "fix" the above docstring to put it on two lines, as the sphinx
# autosummary pulls only the first line for its description.
# Import python libs
import logging
import json
# Set up logging
log = logging.getLogger(__name__)
def ext_pillar(minion_id, # pylint: disable=W0613
pillar, # pylint: disable=W0613
command):
'''
Execute a command and read the output as JSON
'''
try:
command = command.replace('%s', minion_id)
return json.loads(__salt__['cmd.run'](command))
except Exception:
log.critical(
'JSON data from {0} failed to parse'.format(command)
)
return {}
|
{
"content_hash": "e03c8d12068aa9498d1313368e23abf4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 117,
"avg_line_length": 28.566666666666666,
"alnum_prop": 0.6312718786464411,
"repo_name": "stephane-martin/salt-debian-packaging",
"id": "1a75d819bea30b5fa97aea0e44439567ea08aab2",
"size": "881",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "salt-2016.3.2/salt/pillar/cmd_json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13798"
},
{
"name": "C",
"bytes": "986"
},
{
"name": "Groff",
"bytes": "13634346"
},
{
"name": "HTML",
"bytes": "39558"
},
{
"name": "Makefile",
"bytes": "20902"
},
{
"name": "NSIS",
"bytes": "22316"
},
{
"name": "PowerShell",
"bytes": "38719"
},
{
"name": "Python",
"bytes": "40857506"
},
{
"name": "SaltStack",
"bytes": "58278"
},
{
"name": "Scheme",
"bytes": "1790"
},
{
"name": "Shell",
"bytes": "829927"
},
{
"name": "Tcl",
"bytes": "6532"
},
{
"name": "TeX",
"bytes": "11632"
}
],
"symlink_target": ""
}
|
"""
Authentication backend classes for Desktop.
These classes should implement the interface described at:
http://docs.djangoproject.com/en/1.0/topics/auth/#writing-an-authentication-backend
In addition, the User classes they return must support:
- get_groups() (returns a list of strings)
- get_home_directory() (returns None or a string)
- has_hue_permission(action, app) -> boolean
Because Django's models are sometimes unfriendly, you'll want
User to remain a django.contrib.auth.models.User object.
"""
from django.contrib.auth.models import User
import django.contrib.auth.backends
import logging
import desktop.conf
from desktop import metrics
from django.utils.importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from useradmin.models import get_profile, get_default_user_group, UserProfile
from useradmin.views import import_ldap_users
from useradmin import ldap_access
import pam
from django_auth_ldap.backend import LDAPBackend
import ldap
from django_auth_ldap.config import LDAPSearch
LOG = logging.getLogger(__name__)
def load_augmentation_class():
"""
Loads the user augmentation class.
Similar in spirit to django.contrib.auth.load_backend
"""
try:
class_name = desktop.conf.AUTH.USER_AUGMENTOR.get()
i = class_name.rfind('.')
module, attr = class_name[:i], class_name[i+1:]
mod = import_module(module)
klass = getattr(mod, attr)
LOG.info("Augmenting users with class: %s" % (klass,))
return klass
except:
LOG.exception('failed to augment class')
raise ImproperlyConfigured("Could not find user_augmentation_class: %s" % (class_name,))
_user_augmentation_class = None
def get_user_augmentation_class():
global _user_augmentation_class
if _user_augmentation_class is None:
_user_augmentation_class = load_augmentation_class()
return _user_augmentation_class
def rewrite_user(user):
"""
Rewrites the user according to the augmentation class.
We currently only re-write specific attributes,
though this could be generalized.
"""
augment = get_user_augmentation_class()(user)
for attr in ("get_groups", "get_home_directory", "has_hue_permission"):
setattr(user, attr, getattr(augment, attr))
return user
class DefaultUserAugmentor(object):
def __init__(self, parent):
self._parent = parent
def _get_profile(self):
return get_profile(self._parent)
def get_groups(self):
return self._get_profile().get_groups()
def get_home_directory(self):
return self._get_profile().home_directory
def has_hue_permission(self, action, app):
return self._get_profile().has_hue_permission(action=action, app=app)
def find_user(username):
try:
user = User.objects.get(username=username)
LOG.debug("Found user %s in the db" % username)
except User.DoesNotExist:
user = None
return user
def create_user(username, password):
LOG.info("Materializing user %s in the database" % username)
user = User(username=username)
if password is None:
user.set_unusable_password()
else:
user.set_password(password)
user.is_superuser = True
user.save()
return user
def find_or_create_user(username, password=None):
user = find_user(username)
if user is None:
user = create_user(username, password)
return user
def ensure_has_a_group(user):
default_group = get_default_user_group()
if not user.groups.exists() and default_group is not None:
user.groups.add(default_group)
user.save()
class DesktopBackendBase(object):
"""
Abstract base class for providing external authentication schemes.
Extend this class and implement check_auth
"""
def authenticate(self, username, password):
if self.check_auth(username, password):
user = find_or_create_user(username)
user = rewrite_user(user)
return user
else:
return None
def get_user(self, user_id):
try:
user = User.objects.get(pk=user_id)
user = rewrite_user(user)
return user
except User.DoesNotExist:
return None
def check_auth(self, username, password):
"""
Implementors should return a boolean value which determines
whether the given username and password pair is valid.
"""
raise NotImplemented("Abstract class - must implement check_auth")
class AllowFirstUserDjangoBackend(django.contrib.auth.backends.ModelBackend):
"""
Allows the first user in, but otherwise delegates to Django's
ModelBackend.
"""
def authenticate(self, username=None, password=None):
user = super(AllowFirstUserDjangoBackend, self).authenticate(username, password)
if user is not None:
if user.is_active:
user = rewrite_user(user)
return user
return user
if self.is_first_login_ever():
user = find_or_create_user(username, password)
user = rewrite_user(user)
userprofile = get_profile(user)
userprofile.first_login = False
userprofile.save()
ensure_has_a_group(user)
return user
return None
def get_user(self, user_id):
user = super(AllowFirstUserDjangoBackend, self).get_user(user_id)
user = rewrite_user(user)
return user
def is_first_login_ever(self):
""" Return true if no one has ever logged in to Desktop yet. """
return User.objects.count() == 0
class OAuthBackend(DesktopBackendBase):
"""
Deprecated, use liboauth.backend.OAuthBackend instead
Heavily based on Twitter Oauth: https://github.com/simplegeo/python-oauth2#logging-into-django-w-twitter
Requires: python-oauth2 and httplib2
build/env/bin/python setup.py install https://github.com/simplegeo/python-oauth2
build/env/bin/pip install httplib2
"""
@metrics.oauth_authentication_time
def authenticate(self, access_token):
username = access_token['screen_name']
password = access_token['oauth_token_secret']
# Could save oauth_token detail in the user profile here
user = find_or_create_user(username, password)
user.is_superuser = False
user.save()
ensure_has_a_group(user)
return user
@classmethod
def manages_passwords_externally(cls):
return True
class AllowAllBackend(DesktopBackendBase):
"""
Authentication backend that allows any user to login as long
as they have a username. The users will be added to the 'default_user_group'.
We want to ensure that already created users (e.g., from other backends)
retain their superuser status, and any new users are not super users by default.
"""
def check_auth(self, username, password):
user = find_user(username)
if user is None:
user = create_user(username, password)
user.is_superuser = False
user.save()
ensure_has_a_group(user)
return user
@classmethod
def manages_passwords_externally(cls):
return True
class DemoBackend(django.contrib.auth.backends.ModelBackend):
"""
Log automatically users without a session with a new user account.
"""
def authenticate(self, username, password):
user = super(DemoBackend, self).authenticate(username, password)
if not user:
username = self._random_name()
user = find_or_create_user(username, None)
user.is_superuser = False
user.save()
ensure_has_a_group(user)
user = rewrite_user(user)
return user
def get_user(self, user_id):
user = super(DemoBackend, self).get_user(user_id)
user = rewrite_user(user)
return user
def _random_name(self):
import string
import random
N = 7
return ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(N))
class PamBackend(DesktopBackendBase):
"""
Authentication backend that uses PAM to authenticate logins. The first user to
login will become the superuser.
"""
@metrics.pam_authentication_time
def check_auth(self, username, password):
if pam.authenticate(username, password, desktop.conf.AUTH.PAM_SERVICE.get()):
is_super = False
if User.objects.count() == 0:
is_super = True
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = find_or_create_user(username, None)
if user is not None and user.is_active:
profile = get_profile(user)
profile.creation_method = UserProfile.CreationMethod.EXTERNAL
profile.save()
user.is_superuser = is_super
ensure_has_a_group(user)
user.save()
user = rewrite_user(user)
return user
return None
@classmethod
def manages_passwords_externally(cls):
return True
class LdapBackend(object):
"""
Authentication backend that uses LDAP to authenticate logins.
The first user to login will become the superuser.
"""
def __init__(self):
# Delegate to django_auth_ldap.LDAPBackend
class _LDAPBackend(LDAPBackend):
def get_or_create_user(self, username, ldap_user):
username = desktop.conf.LDAP.FORCE_USERNAME_LOWERCASE.get() and username.lower() or username
if desktop.conf.LDAP.IGNORE_USERNAME_CASE.get():
try:
return User.objects.get(username__iexact=username), False
except User.DoesNotExist:
return User.objects.get_or_create(username=username)
else:
return User.objects.get_or_create(username=username)
self._backend = _LDAPBackend()
def add_ldap_config(self, ldap_config):
if ldap_config.LDAP_URL.get() is None:
LOG.warn("Could not find LDAP URL required for authentication.")
return None
else:
setattr(self._backend.settings, 'SERVER_URI', ldap_config.LDAP_URL.get())
if ldap_config.SEARCH_BIND_AUTHENTICATION.get():
# New Search/Bind Auth
base_dn = ldap_config.BASE_DN.get()
user_name_attr = ldap_config.USERS.USER_NAME_ATTR.get()
user_filter = ldap_config.USERS.USER_FILTER.get()
if not user_filter.startswith('('):
user_filter = '(' + user_filter + ')'
if ldap_config.BIND_DN.get():
bind_dn = ldap_config.BIND_DN.get()
setattr(self._backend.settings, 'BIND_DN', bind_dn)
bind_password = ldap_config.BIND_PASSWORD.get()
if not bind_password:
password = ldap_config.BIND_PASSWORD_SCRIPT.get()
setattr(self._backend.settings, 'BIND_PASSWORD', bind_password)
if user_filter is None:
search_bind_results = LDAPSearch(base_dn,
ldap.SCOPE_SUBTREE, "(" + user_name_attr + "=%(user)s)")
else:
search_bind_results = LDAPSearch(base_dn,
ldap.SCOPE_SUBTREE, "(&(" + user_name_attr + "=%(user)s)" + user_filter + ")")
setattr(self._backend.settings, 'USER_SEARCH', search_bind_results)
else:
nt_domain = ldap_config.NT_DOMAIN.get()
if nt_domain is None:
pattern = ldap_config.LDAP_USERNAME_PATTERN.get()
pattern = pattern.replace('<username>', '%(user)s')
setattr(self._backend.settings, 'USER_DN_TEMPLATE', pattern)
else:
# %(user)s is a special string that will get replaced during the authentication process
setattr(self._backend.settings, 'USER_DN_TEMPLATE', "%(user)s@" + nt_domain)
# Certificate-related config settings
if ldap_config.LDAP_CERT.get():
setattr(self._backend.settings, 'START_TLS', ldap_config.USE_START_TLS.get())
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE, ldap_config.LDAP_CERT.get())
else:
setattr(self._backend.settings, 'START_TLS', False)
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
if ldap_config.FOLLOW_REFERRALS.get():
ldap.set_option(ldap.OPT_REFERRALS, 1)
else:
ldap.set_option(ldap.OPT_REFERRALS, 0)
def add_ldap_config_for_server(self, server):
if desktop.conf.LDAP.LDAP_SERVERS.get():
# Choose from multiple server configs
if server in desktop.conf.LDAP.LDAP_SERVERS.get():
self.add_ldap_config(desktop.conf.LDAP.LDAP_SERVERS.get()[server])
else:
self.add_ldap_config(desktop.conf.LDAP)
@metrics.ldap_authentication_time
def authenticate(self, username=None, password=None, server=None):
self.add_ldap_config_for_server(server)
username_filter_kwargs = ldap_access.get_ldap_user_kwargs(username)
# Do this check up here, because the auth call creates a django user upon first login per user
is_super = False
if not UserProfile.objects.filter(creation_method=str(UserProfile.CreationMethod.EXTERNAL)).exists():
# If there are no LDAP users already in the system, the first one will
# become a superuser
is_super = True
elif User.objects.filter(**username_filter_kwargs).exists():
# If the user already exists, we shouldn't change its superuser
# privileges. However, if there's a naming conflict with a non-external
# user, we should do the safe thing and turn off superuser privs.
existing_user = User.objects.get(**username_filter_kwargs)
existing_profile = get_profile(existing_user)
if existing_profile.creation_method == str(UserProfile.CreationMethod.EXTERNAL):
is_super = User.objects.get(**username_filter_kwargs).is_superuser
elif not desktop.conf.LDAP.CREATE_USERS_ON_LOGIN.get():
return None
try:
user = self._backend.authenticate(username, password)
except ImproperlyConfigured, detail:
LOG.warn("LDAP was not properly configured: %s", detail)
return None
if user is not None and user.is_active:
profile = get_profile(user)
profile.creation_method = UserProfile.CreationMethod.EXTERNAL
profile.save()
user.is_superuser = is_super
user = rewrite_user(user)
ensure_has_a_group(user)
if desktop.conf.LDAP.SYNC_GROUPS_ON_LOGIN.get():
self.import_groups(server, user)
return user
def get_user(self, user_id):
user = self._backend.get_user(user_id)
user = rewrite_user(user)
return user
def import_groups(self, server, user):
connection = ldap_access.get_connection_from_server(server)
import_ldap_users(connection, user.username, sync_groups=True, import_by_dn=False, server=server)
@classmethod
def manages_passwords_externally(cls):
return True
class SpnegoDjangoBackend(django.contrib.auth.backends.ModelBackend):
"""
A note about configuration:
The HTTP/_HOST@REALM principal (where _HOST is the fully qualified domain
name of the server running Hue) needs to be exported to a keytab file.
The keytab file can either be located in /etc/krb5.keytab or you can set
the KRB5_KTNAME environment variable to point to another location
(e.g. /etc/hue/hue.keytab).
"""
@metrics.spnego_authentication_time
def authenticate(self, username=None):
username = self.clean_username(username)
is_super = False
if User.objects.count() == 0:
is_super = True
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = find_or_create_user(username, None)
if user is not None and user.is_active:
profile = get_profile(user)
profile.creation_method = UserProfile.CreationMethod.EXTERNAL
profile.save()
user.is_superuser = is_super
ensure_has_a_group(user)
user.save()
user = rewrite_user(user)
return user
def clean_username(self, username):
if '@' in username:
return username.split('@')[0]
return username
def get_user(self, user_id):
user = super(SpnegoDjangoBackend, self).get_user(user_id)
user = rewrite_user(user)
return user
class RemoteUserDjangoBackend(django.contrib.auth.backends.RemoteUserBackend):
"""
Delegates to Django's RemoteUserBackend and requires HueRemoteUserMiddleware
"""
def authenticate(self, remote_user=None):
username = self.clean_username(remote_user)
username = desktop.conf.AUTH.FORCE_USERNAME_LOWERCASE.get() and username.lower() or username
is_super = False
if User.objects.count() == 0:
is_super = True
try:
if desktop.conf.AUTH.IGNORE_USERNAME_CASE.get():
user = User.objects.get(username__iexact=username)
else:
user = User.objects.get(username=username)
except User.DoesNotExist:
user = find_or_create_user(username, None)
if user is not None and user.is_active:
profile = get_profile(user)
profile.creation_method = UserProfile.CreationMethod.EXTERNAL
profile.save()
user.is_superuser = is_super
ensure_has_a_group(user)
user.save()
user = rewrite_user(user)
return user
def get_user(self, user_id):
user = super(RemoteUserDjangoBackend, self).get_user(user_id)
user = rewrite_user(user)
return user
|
{
"content_hash": "bef86d2794d0dabafad953129b657e6e",
"timestamp": "",
"source": "github",
"line_count": 534,
"max_line_length": 106,
"avg_line_length": 31.44569288389513,
"alnum_prop": 0.6887803716055264,
"repo_name": "yongshengwang/hue",
"id": "c65839d97e869fc8afc44c28be0e18eeef1b65eb",
"size": "17583",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "desktop/core/src/desktop/auth/backend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13685"
},
{
"name": "C",
"bytes": "2479183"
},
{
"name": "C++",
"bytes": "177090"
},
{
"name": "CSS",
"bytes": "1133541"
},
{
"name": "Emacs Lisp",
"bytes": "12145"
},
{
"name": "Genshi",
"bytes": "946"
},
{
"name": "Groff",
"bytes": "28547"
},
{
"name": "HTML",
"bytes": "26230478"
},
{
"name": "Java",
"bytes": "133906"
},
{
"name": "JavaScript",
"bytes": "9757355"
},
{
"name": "Makefile",
"bytes": "94066"
},
{
"name": "Mako",
"bytes": "2185828"
},
{
"name": "Myghty",
"bytes": "936"
},
{
"name": "PLSQL",
"bytes": "13774"
},
{
"name": "Perl",
"bytes": "138710"
},
{
"name": "PigLatin",
"bytes": "328"
},
{
"name": "Python",
"bytes": "88056623"
},
{
"name": "Scala",
"bytes": "191428"
},
{
"name": "Shell",
"bytes": "59514"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "TeX",
"bytes": "126420"
},
{
"name": "Thrift",
"bytes": "101931"
},
{
"name": "VimL",
"bytes": "1530"
},
{
"name": "XSLT",
"bytes": "357625"
}
],
"symlink_target": ""
}
|
contestants, problems = [int(x) for x in input().split()]
for i in range(contestants):
input()
print(problems)
|
{
"content_hash": "7474718c0cca82e4a8b9af62bef841f2",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 57,
"avg_line_length": 19.5,
"alnum_prop": 0.6752136752136753,
"repo_name": "CajetanP/coding-exercises",
"id": "5131eaff1b23fa65b0441d30ef8d540329a75c74",
"size": "141",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "OpenKattis/Trivial/carrots.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "270628"
},
{
"name": "C++",
"bytes": "45658"
},
{
"name": "CMake",
"bytes": "1831"
},
{
"name": "Java",
"bytes": "278238"
},
{
"name": "Makefile",
"bytes": "12331"
},
{
"name": "Python",
"bytes": "9124"
},
{
"name": "Rust",
"bytes": "211597"
}
],
"symlink_target": ""
}
|
"""
Implementation of optimized einsum.
"""
from __future__ import division, absolute_import, print_function
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asarray, asanyarray, result_type
__all__ = ['einsum', 'einsum_path']
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
einsum_symbols_set = set(einsum_symbols)
def _compute_size_by_dict(indices, idx_dict):
"""
Computes the product of the elements in indices based on the dictionary
idx_dict.
Parameters
----------
indices : iterable
Indices to base the product on.
idx_dict : dictionary
Dictionary of index sizes
Returns
-------
ret : int
The resulting product.
Examples
--------
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
90
"""
ret = 1
for i in indices:
ret *= idx_dict[i]
return ret
def _find_contraction(positions, input_sets, output_set):
"""
Finds the contraction for a given set of input and output sets.
Paramaters
----------
positions : iterable
Integer positions of terms used in the contraction.
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
Returns
-------
new_result : set
The indices of the resulting contraction
remaining : list
List of sets that have not been contracted, the new set is appended to
the end of this list
idx_removed : set
Indices removed from the entire contraction
idx_contraction : set
The indices used in the current contraction
Examples
--------
# A simple dot product test case
>>> pos = (0, 1)
>>> isets = [set('ab'), set('bc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
# A more complex case with additional terms in the contraction
>>> pos = (0, 2)
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('ac')
>>> _find_contraction(pos, isets, oset)
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
"""
idx_contract = set()
idx_remain = output_set.copy()
remaining = []
for ind, value in enumerate(input_sets):
if ind in positions:
idx_contract |= value
else:
remaining.append(value)
idx_remain |= value
new_result = idx_remain & idx_contract
idx_removed = (idx_contract - new_result)
remaining.append(new_result)
return (new_result, remaining, idx_removed, idx_contract)
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
"""
Computes all possible pair contractions, sieves the results based
on ``memory_limit`` and returns the lowest cost path. This algorithm
scales factorial with respect to the elements in the list ``input_sets``.
Paramaters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The optimal contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('')
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
full_results = [(0, [], input_sets)]
for iteration in range(len(input_sets) - 1):
iter_results = []
# Compute all unique pairs
comb_iter = []
for x in range(len(input_sets) - iteration):
for y in range(x + 1, len(input_sets) - iteration):
comb_iter.append((x, y))
for curr in full_results:
cost, positions, remaining = curr
for con in comb_iter:
# Find the contraction
cont = _find_contraction(con, remaining, output_set)
new_result, new_input_sets, idx_removed, idx_contract = cont
# Sieve the results based on memory_limit
new_size = _compute_size_by_dict(new_result, idx_dict)
if new_size > memory_limit:
continue
# Find cost
new_cost = _compute_size_by_dict(idx_contract, idx_dict)
if idx_removed:
new_cost *= 2
# Build (total_cost, positions, indices_remaining)
new_cost += cost
new_pos = positions + [con]
iter_results.append((new_cost, new_pos, new_input_sets))
# Update list to iterate over
full_results = iter_results
# If we have not found anything return single einsum contraction
if len(full_results) == 0:
return [tuple(range(len(input_sets)))]
path = min(full_results, key=lambda x: x[0])[1]
return path
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
"""
Finds the path by contracting the best pair until the input list is
exhausted. The best pair is found by minimizing the tuple
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
matrix multiplication or inner product operations, then Hadamard like
operations, and finally outer operations. Outer products are limited by
``memory_limit``. This algorithm scales cubically with respect to the
number of elements in the list ``input_sets``.
Paramaters
----------
input_sets : list
List of sets that represent the lhs side of the einsum subscript
output_set : set
Set that represents the rhs side of the overall einsum subscript
idx_dict : dictionary
Dictionary of index sizes
memory_limit_limit : int
The maximum number of elements in a temporary array
Returns
-------
path : list
The greedy contraction order within the memory limit constraint.
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
>>> oset = set('')
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
>>> _path__greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
if len(input_sets) == 1:
return [(0,)]
path = []
for iteration in range(len(input_sets) - 1):
iteration_results = []
comb_iter = []
# Compute all unique pairs
for x in range(len(input_sets)):
for y in range(x + 1, len(input_sets)):
comb_iter.append((x, y))
for positions in comb_iter:
# Find the contraction
contract = _find_contraction(positions, input_sets, output_set)
idx_result, new_input_sets, idx_removed, idx_contract = contract
# Sieve the results based on memory_limit
if _compute_size_by_dict(idx_result, idx_dict) > memory_limit:
continue
# Build sort tuple
removed_size = _compute_size_by_dict(idx_removed, idx_dict)
cost = _compute_size_by_dict(idx_contract, idx_dict)
sort = (-removed_size, cost)
# Add contraction to possible choices
iteration_results.append([sort, positions, new_input_sets])
# If we did not find a new contraction contract remaining
if len(iteration_results) == 0:
path.append(tuple(range(len(input_sets))))
break
# Sort based on first index
best = min(iteration_results, key=lambda x: x[0])
path.append(best[1])
input_sets = best[2]
return path
def _parse_einsum_input(operands):
"""
A reproduction of einsum c side einsum parsing in python.
Returns
-------
input_strings : str
Parsed input strings
output_string : str
Parsed output string
operands : list of array_like
The operands to use in the numpy contraction
Examples
--------
The operand list is simplified to reduce printing:
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
>>> __parse_einsum_input(('...a,...a->...', a, b))
('za,xza', 'xz', [a, b])
>>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
('za,xza', 'xz', [a, b])
"""
if len(operands) == 0:
raise ValueError("No input operands")
if isinstance(operands[0], str):
subscripts = operands[0].replace(" ", "")
operands = [asanyarray(v) for v in operands[1:]]
# Ensure all characters are valid
for s in subscripts:
if s in '.,->':
continue
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
else:
tmp_operands = list(operands)
operand_list = []
subscript_list = []
for p in range(len(operands) // 2):
operand_list.append(tmp_operands.pop(0))
subscript_list.append(tmp_operands.pop(0))
output_list = tmp_operands[-1] if len(tmp_operands) else None
operands = [asanyarray(v) for v in operand_list]
subscripts = ""
last = len(subscript_list) - 1
for num, sub in enumerate(subscript_list):
for s in sub:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
if num != last:
subscripts += ","
if output_list is not None:
subscripts += "->"
for s in output_list:
if s is Ellipsis:
subscripts += "..."
elif isinstance(s, int):
subscripts += einsum_symbols[s]
else:
raise TypeError("For this input type lists must contain "
"either int or Ellipsis")
# Check for proper "->"
if ("-" in subscripts) or (">" in subscripts):
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
if invalid or (subscripts.count("->") != 1):
raise ValueError("Subscripts can only contain one '->'.")
# Parse ellipses
if "." in subscripts:
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
unused = list(einsum_symbols_set - set(used))
ellipse_inds = "".join(unused)
longest = 0
if "->" in subscripts:
input_tmp, output_sub = subscripts.split("->")
split_subscripts = input_tmp.split(",")
out_sub = True
else:
split_subscripts = subscripts.split(',')
out_sub = False
for num, sub in enumerate(split_subscripts):
if "." in sub:
if (sub.count(".") != 3) or (sub.count("...") != 1):
raise ValueError("Invalid Ellipses.")
# Take into account numerical values
if operands[num].shape == ():
ellipse_count = 0
else:
ellipse_count = max(len(operands[num].shape), 1)
ellipse_count -= (len(sub) - 3)
if ellipse_count > longest:
longest = ellipse_count
if ellipse_count < 0:
raise ValueError("Ellipses lengths do not match.")
elif ellipse_count == 0:
split_subscripts[num] = sub.replace('...', '')
else:
rep_inds = ellipse_inds[-ellipse_count:]
split_subscripts[num] = sub.replace('...', rep_inds)
subscripts = ",".join(split_subscripts)
if longest == 0:
out_ellipse = ""
else:
out_ellipse = ellipse_inds[-longest:]
if out_sub:
subscripts += "->" + output_sub.replace("...", out_ellipse)
else:
# Special care for outputless ellipses
output_subscript = ""
tmp_subscripts = subscripts.replace(",", "")
for s in sorted(set(tmp_subscripts)):
if s not in (einsum_symbols):
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
normal_inds = ''.join(sorted(set(output_subscript) -
set(out_ellipse)))
subscripts += "->" + out_ellipse + normal_inds
# Build output string if does not exist
if "->" in subscripts:
input_subscripts, output_subscript = subscripts.split("->")
else:
input_subscripts = subscripts
# Build output subscripts
tmp_subscripts = subscripts.replace(",", "")
output_subscript = ""
for s in sorted(set(tmp_subscripts)):
if s not in einsum_symbols:
raise ValueError("Character %s is not a valid symbol." % s)
if tmp_subscripts.count(s) == 1:
output_subscript += s
# Make sure output subscripts are in the input
for char in output_subscript:
if char not in input_subscripts:
raise ValueError("Output character %s did not appear in the input"
% char)
# Make sure number operands is equivalent to the number of terms
if len(input_subscripts.split(',')) != len(operands):
raise ValueError("Number of einsum subscripts must be equal to the "
"number of operands.")
return (input_subscripts, output_subscript, operands)
def einsum_path(*operands, **kwargs):
"""
einsum_path(subscripts, *operands, optimize='greedy')
Evaluates the lowest cost contraction order for an einsum expression by
considering the creation of intermediate arrays.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
*operands : list of array_like
These are the arrays for the operation.
optimize : {bool, list, tuple, 'greedy', 'optimal'}
Choose the type of path. If a tuple is provided, the second argument is
assumed to be the maximum intermediate size created. If only a single
argument is provided the largest input or output array size is used
as a maximum intermediate size.
* if a list is given that starts with ``einsum_path``, uses this as the
contraction path
* if False no optimization is taken
* if True defaults to the 'greedy' algorithm
* 'optimal' An algorithm that combinatorially explores all possible
ways of contracting the listed tensors and choosest the least costly
path. Scales exponentially with the number of terms in the
contraction.
* 'greedy' An algorithm that chooses the best pair contraction
at each step. Effectively, this algorithm searches the largest inner,
Hadamard, and then outer products at each step. Scales cubically with
the number of terms in the contraction. Equivalent to the 'optimal'
path for most contractions.
Default is 'greedy'.
Returns
-------
path : list of tuples
A list representation of the einsum path.
string_repr : str
A printable representation of the einsum path.
Notes
-----
The resulting path indicates which terms of the input contraction should be
contracted first, the result of this contraction is then appended to the
end of the contraction list. This list can then be iterated over until all
intermediate contractions are complete.
See Also
--------
einsum, linalg.multi_dot
Examples
--------
We can begin with a chain dot example. In this case, it is optimal to
contract the ``b`` and ``c`` tensors first as reprsented by the first
element of the path ``(1, 2)``. The resulting tensor is added to the end
of the contraction and the remaining contraction ``(0, 1)`` is then
completed.
>>> a = np.random.rand(2, 2)
>>> b = np.random.rand(2, 5)
>>> c = np.random.rand(5, 2)
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
>>> print(path_info[0])
['einsum_path', (1, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ij,jk,kl->il
Naive scaling: 4
Optimized scaling: 3
Naive FLOP count: 1.600e+02
Optimized FLOP count: 5.600e+01
Theoretical speedup: 2.857
Largest intermediate: 4.000e+00 elements
-------------------------------------------------------------------------
scaling current remaining
-------------------------------------------------------------------------
3 kl,jk->jl ij,jl->il
3 jl,ij->il il->il
A more complex index transformation example.
>>> I = np.random.rand(10, 10, 10, 10)
>>> C = np.random.rand(10, 10)
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
optimize='greedy')
>>> print(path_info[0])
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
>>> print(path_info[1])
Complete contraction: ea,fb,abcd,gc,hd->efgh
Naive scaling: 8
Optimized scaling: 5
Naive FLOP count: 8.000e+08
Optimized FLOP count: 8.000e+05
Theoretical speedup: 1000.000
Largest intermediate: 1.000e+04 elements
--------------------------------------------------------------------------
scaling current remaining
--------------------------------------------------------------------------
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
5 bcde,fb->cdef gc,hd,cdef->efgh
5 cdef,gc->defg hd,defg->efgh
5 defg,hd->efgh efgh->efgh
"""
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize', 'einsum_call']
unknown_kwargs = [k for (k, v) in kwargs.items() if k
not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs:"
" %s" % unknown_kwargs)
# Figure out what the path really is
path_type = kwargs.pop('optimize', False)
if path_type is True:
path_type = 'greedy'
if path_type is None:
path_type = False
memory_limit = None
# No optimization or a named path algorithm
if (path_type is False) or isinstance(path_type, str):
pass
# Given an explicit path
elif len(path_type) and (path_type[0] == 'einsum_path'):
pass
# Path tuple with memory limit
elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
isinstance(path_type[1], (int, float))):
memory_limit = int(path_type[1])
path_type = path_type[0]
else:
raise TypeError("Did not understand the path: %s" % str(path_type))
# Hidden option, only einsum should call this
einsum_call_arg = kwargs.pop("einsum_call", False)
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
subscripts = input_subscripts + '->' + output_subscript
# Build a few useful list and sets
input_list = input_subscripts.split(',')
input_sets = [set(x) for x in input_list]
output_set = set(output_subscript)
indices = set(input_subscripts.replace(',', ''))
# Get length of each unique dimension and ensure all dimensions are correct
dimension_dict = {}
for tnum, term in enumerate(input_list):
sh = operands[tnum].shape
if len(sh) != len(term):
raise ValueError("Einstein sum subscript %s does not contain the "
"correct number of indices for operand %d.",
input_subscripts[tnum], tnum)
for cnum, char in enumerate(term):
dim = sh[cnum]
if char in dimension_dict.keys():
if dimension_dict[char] != dim:
raise ValueError("Size of label '%s' for operand %d does "
"not match previous terms.", char, tnum)
else:
dimension_dict[char] = dim
# Compute size of each input array plus the output array
size_list = []
for term in input_list + [output_subscript]:
size_list.append(_compute_size_by_dict(term, dimension_dict))
max_size = max(size_list)
if memory_limit is None:
memory_arg = max_size
else:
memory_arg = memory_limit
# Compute naive cost
# This isnt quite right, need to look into exactly how einsum does this
naive_cost = _compute_size_by_dict(indices, dimension_dict)
indices_in_input = input_subscripts.replace(',', '')
mult = max(len(input_list) - 1, 1)
if (len(indices_in_input) - len(set(indices_in_input))):
mult *= 2
naive_cost *= mult
# Compute the path
if (path_type is False) or (len(input_list) in [1, 2]) or (indices == output_set):
# Nothing to be optimized, leave it to einsum
path = [tuple(range(len(input_list)))]
elif path_type == "greedy":
# Maximum memory should be at most out_size for this algorithm
memory_arg = min(memory_arg, max_size)
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type == "optimal":
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
elif path_type[0] == 'einsum_path':
path = path_type[1:]
else:
raise KeyError("Path name %s not found", path_type)
cost_list, scale_list, size_list, contraction_list = [], [], [], []
# Build contraction tuple (positions, gemm, einsum_str, remaining)
for cnum, contract_inds in enumerate(path):
# Make sure we remove inds from right to left
contract_inds = tuple(sorted(list(contract_inds), reverse=True))
contract = _find_contraction(contract_inds, input_sets, output_set)
out_inds, input_sets, idx_removed, idx_contract = contract
cost = _compute_size_by_dict(idx_contract, dimension_dict)
if idx_removed:
cost *= 2
cost_list.append(cost)
scale_list.append(len(idx_contract))
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
tmp_inputs = []
for x in contract_inds:
tmp_inputs.append(input_list.pop(x))
# Last contraction
if (cnum - len(path)) == -1:
idx_result = output_subscript
else:
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
idx_result = "".join([x[1] for x in sorted(sort_result)])
input_list.append(idx_result)
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
contraction = (contract_inds, idx_removed, einsum_str, input_list[:])
contraction_list.append(contraction)
opt_cost = sum(cost_list) + 1
if einsum_call_arg:
return (operands, contraction_list)
# Return the path along with a nice string representation
overall_contraction = input_subscripts + "->" + output_subscript
header = ("scaling", "current", "remaining")
speedup = naive_cost / opt_cost
max_i = max(size_list)
path_print = " Complete contraction: %s\n" % overall_contraction
path_print += " Naive scaling: %d\n" % len(indices)
path_print += " Optimized scaling: %d\n" % max(scale_list)
path_print += " Naive FLOP count: %.3e\n" % naive_cost
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
path_print += " Theoretical speedup: %3.3f\n" % speedup
path_print += " Largest intermediate: %.3e elements\n" % max_i
path_print += "-" * 74 + "\n"
path_print += "%6s %24s %40s\n" % header
path_print += "-" * 74
for n, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining = contraction
remaining_str = ",".join(remaining) + "->" + output_subscript
path_run = (scale_list[n], einsum_str, remaining_str)
path_print += "\n%4d %24s %40s" % path_run
path = ['einsum_path'] + path
return (path, path_print)
# Rewrite einsum to handle different cases
def einsum(*operands, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe', optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional
array operations can be represented in a simple fashion. This function
provides a way to compute such summations. The best way to understand this
function is to try the examples below, which show how many common NumPy
functions can be implemented as calls to `einsum`.
Parameters
----------
subscripts : str
Specifies the subscripts for summation.
operands : list of array_like
These are the arrays for the operation.
out : {ndarray, None}, optional
If provided, the calculation is done into this array.
dtype : {data-type, None}, optional
If provided, forces the calculation to use the data type specified.
Note that you may have to also give a more liberal `casting`
parameter to allow the conversions. Default is None.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout of the output. 'C' means it should
be C contiguous. 'F' means it should be Fortran contiguous,
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
'K' means it should be as close to the layout as the inputs as
is possible, including arbitrarily permuted axes.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Setting this to
'unsafe' is not recommended, as it can adversely affect accumulations.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
Default is 'safe'.
optimize : {False, True, 'greedy', 'optimal'}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False and True will default to the 'greedy' algorithm.
Also accepts an explicit contraction list from the ``np.einsum_path``
function. See ``np.einsum_path`` for more details. Default is False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
See Also
--------
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
Notes
-----
.. versionadded:: 1.6.0
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Repeated subscripts labels in one operand take the diagonal. For example,
``np.einsum('ii', a)`` is equivalent to ``np.trace(a)``.
Whenever a label is repeated, it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to ``np.inner(a,b)``. If a label appears only once,
it is not summed, so ``np.einsum('i', a)`` produces a view of ``a``
with no changes.
The order of labels in the output is by default alphabetical. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose.
The output can be controlled by specifying output subscript labels
as well. This specifies the label order, and allows summing to
be disallowed or forced when desired. The call ``np.einsum('i->', a)``
is like ``np.sum(a, axis=-1)``, and ``np.einsum('ii->i', a)``
is like ``np.diag(a)``. The difference is that `einsum` does not
allow broadcasting by default.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, you can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
An alternative way to provide the subscripts and operands is as
``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``. The examples
below have corresponding `einsum` calls with the two parameter methods.
.. versionadded:: 1.10.0
Views returned from einsum are now writeable whenever the input array
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
have the same effect as ``np.swapaxes(a, 0, 2)`` and
``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
of a 2D array.
.. versionadded:: 1.12.0
Added the ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
See ``np.einsum_path`` for more details.
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
>>> np.einsum('ii', a)
60
>>> np.einsum(a, [0,0])
60
>>> np.trace(a)
60
>>> np.einsum('ii->i', a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum(a, [0,0], [0])
array([ 0, 6, 12, 18, 24])
>>> np.diag(a)
array([ 0, 6, 12, 18, 24])
>>> np.einsum('ij,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum(a, [0,1], b, [1])
array([ 30, 80, 130, 180, 230])
>>> np.dot(a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('...j,j', a, b)
array([ 30, 80, 130, 180, 230])
>>> np.einsum('ji', c)
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum(c, [1,0])
array([[0, 3],
[1, 4],
[2, 5]])
>>> c.T
array([[0, 3],
[1, 4],
[2, 5]])
>>> np.einsum('..., ...', 3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.multiply(3, c)
array([[ 0, 3, 6],
[ 9, 12, 15]])
>>> np.einsum('i,i', b, b)
30
>>> np.einsum(b, [0], b, [0])
30
>>> np.inner(b,b)
30
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum(np.arange(2)+1, [0], b, [1])
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.outer(np.arange(2)+1, b)
array([[0, 1, 2, 3, 4],
[0, 2, 4, 6, 8]])
>>> np.einsum('i...->...', a)
array([50, 55, 60, 65, 70])
>>> np.einsum(a, [0,Ellipsis], [Ellipsis])
array([50, 55, 60, 65, 70])
>>> np.sum(a, axis=0)
array([50, 55, 60, 65, 70])
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> np.einsum('k...,jk', a, b)
array([[10, 28, 46, 64],
[13, 40, 67, 94]])
>>> # since version 1.10.0
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
"""
# Grab non-einsum kwargs
optimize_arg = kwargs.pop('optimize', False)
# If no optimization, run pure einsum
if optimize_arg is False:
return c_einsum(*operands, **kwargs)
valid_einsum_kwargs = ['out', 'dtype', 'order', 'casting']
einsum_kwargs = {k: v for (k, v) in kwargs.items() if
k in valid_einsum_kwargs}
# Make sure all keywords are valid
valid_contract_kwargs = ['optimize'] + valid_einsum_kwargs
unknown_kwargs = [k for (k, v) in kwargs.items() if
k not in valid_contract_kwargs]
if len(unknown_kwargs):
raise TypeError("Did not understand the following kwargs: %s"
% unknown_kwargs)
# Special handeling if out is specified
specified_out = False
out_array = einsum_kwargs.pop('out', None)
if out_array is not None:
specified_out = True
# Build the contraction list and operand
operands, contraction_list = einsum_path(*operands, optimize=optimize_arg,
einsum_call=True)
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining = contraction
tmp_operands = []
for x in inds:
tmp_operands.append(operands.pop(x))
# If out was specified
if specified_out and ((num + 1) == len(contraction_list)):
einsum_kwargs["out"] = out_array
# Do the contraction
new_view = c_einsum(einsum_str, *tmp_operands, **einsum_kwargs)
# Append new items and derefernce what we can
operands.append(new_view)
del tmp_operands, new_view
if specified_out:
return out_array
else:
return operands[0]
|
{
"content_hash": "3d1612d9b9a7123429d3188177781768",
"timestamp": "",
"source": "github",
"line_count": 990,
"max_line_length": 86,
"avg_line_length": 35.81515151515151,
"alnum_prop": 0.5624841357136814,
"repo_name": "dwillmer/numpy",
"id": "97eb7924faade2c0e8373a9dad63f3a4e8698763",
"size": "35457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/core/einsumfunc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7473116"
},
{
"name": "C++",
"bytes": "110227"
},
{
"name": "FORTRAN",
"bytes": "6310"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Python",
"bytes": "5932518"
},
{
"name": "Shell",
"bytes": "2241"
}
],
"symlink_target": ""
}
|
import cPickle as pickle
import logging
import numpy as np
from os.path import join as path_join
import sys
import keras.backend as K
from keras.models import Sequential, Graph
from keras.layers.containers import Graph as SubGraph
from keras.layers.containers import Sequential as Stack
from keras.layers.core import *
from keras.layers import Embedding
from keras.layers.convolutional import *
from keras.layers.recurrent import GRU, LSTM
from keras.regularizers import l2
ROOT_PATH = '../..'
sys.path.append(ROOT_PATH)
from textclf.nn import train_neural
from textclf.nn.timedistributed import TimeDistributed
LOGGER_PREFIX = ' %s'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def log(msg, logger=logger):
logger.info(LOGGER_PREFIX % msg)
MODEL_FILE = './imdb-model-rcnn-old-2'
LOG_FILE = './log-model-rcnn-old-2'
# Read back data
WV_FILE_IMDB = path_join(ROOT_PATH, 'embeddings/wv/IMDB-GloVe-300dim-glovebox.pkl')
WV_FILE_GLOBAL = path_join(ROOT_PATH, 'embeddings/wv/glove.42B.300d.120000-glovebox.pkl')
gb_global = pickle.load(open(WV_FILE_GLOBAL, 'rb'))
gb_imdb = pickle.load(open(WV_FILE_IMDB, 'rb'))
train, test = {}, {}
log('Loading training data')
train['text4imdb'] = np.load(path_join(ROOT_PATH, 'IMDB_train_glove_X.npy'))
train['text4global'] = np.load(path_join(ROOT_PATH, 'IMDB_train_global_glove_X.npy'))
train['labels'] = np.load(path_join(ROOT_PATH, 'IMDB_train_glove_y.npy'))
log('Shuffling training data')
shuff = range(train['text4imdb'].shape[0])
np.random.shuffle(shuff)
for k in train.keys():
train[k] = train[k][shuff]
# -- flatten across paragraph dimension, will later be reconstructed in the embedding
if 'lab' not in k:
train[k] = train[k].reshape(train[k].shape[0], -1)
del shuff
log('Loading testing data')
# -- testing data
test['text4imdb'] = np.load(path_join(ROOT_PATH, 'IMDB_test_glove_X.npy'))
test['text4global'] = np.load(path_join(ROOT_PATH, 'IMDB_test_global_glove_X.npy'))
test['labels'] = np.load(path_join(ROOT_PATH, 'IMDB_test_glove_y.npy'))
test['text4imdb'] = test['text4imdb'].reshape(test['text4imdb'].shape[0], -1)
test['text4global'] = test['text4global'].reshape(test['text4global'].shape[0], -1)
log('Building model architecture...')
NGRAMS = [2, 3, 4, 5, 7]
NFILTERS = 32 * 4
SENTENCE_LENGTH = 50
PARAGRAPH_LENGTH = 50
INPUT_SHAPE = (SENTENCE_LENGTH * PARAGRAPH_LENGTH, )
global_vectors = Sequential([Embedding(input_dim=gb_global.W.shape[0], output_dim=300, weights=[gb_global.W], input_length=INPUT_SHAPE[0])])
imdb_vectors = Sequential([Embedding(input_dim=gb_imdb.W.shape[0], output_dim=300, weights=[gb_imdb.W], input_length=INPUT_SHAPE[0])])
model = Sequential()
model.add(Merge([global_vectors, imdb_vectors], mode='concat'))
model.add(Reshape((PARAGRAPH_LENGTH, SENTENCE_LENGTH, 2, 300)))
model.add(Permute(dims=(1, 3, 2, 4)))
# -- create convolution units...
conv_unit = SubGraph()
conv_unit = Graph()
conv_unit.add_input('embeddings', input_shape=model.output_shape[1:])
for n in NGRAMS:
conv_unit.add_node(
TimeDistributed(Convolution2D(NFILTERS, n, 300,
W_regularizer=l2(0.0001),
activation='relu')
),
name='conv{}gram'.format(n), input='embeddings'
)
conv_unit.add_node(
TimeDistributed(MaxPooling2D(pool_size=(SENTENCE_LENGTH - n + 1, 1))),
name='maxpool{}gram'.format(n), input='conv{}gram'.format(n)
)
# conv_unit.add_node(
# Lambda(
# function=lambda x: K.squeeze(x, axis=-1),
# output_shape=lambda s: s[:-1]
# ),
# name='squeeze{}gram'.format(n), input='conv{}gram'.format(n)
# )
# conv_unit.add_node(
# TimeDistributed(GRU(10), input_shape=conv_unit.nodes['squeeze{}gram'.format(n)].output_shape[1:]),
# name='gru-attn-forward{}gram'.format(n), input='squeeze{}gram'.format(n)
# )
# conv_unit.add_node(
# TimeDistributed(GRU(10, go_backwards=True)),
# name='gru-attn-backward{}gram'.format(n), input='squeeze{}gram'.format(n)
# )
conv_unit.add_node(
Dropout(0.5),
name='dropout{}gram'.format(n), input='maxpool{}gram'.format(n)
)
conv_unit.add_node(
TimeDistributed(Flatten()),
name='flatten{}gram'.format(n), input='dropout{}gram'.format(n)
)
# conv_unit.add_node(
# Dropout(0.15),
# name='dropout-gru{}gram'.format(n), input='gru-attn-forward{}gram'.format(n)
# )
# conv_unit.add_node(
# TimeDistributed(Highway(activation='relu')),
# name='highway{}gram'.format(n),
# input='flatten{}gram'.format(n)
# # inputs=['flatten{}gram'.format(n), 'dropout-gru{}gram'.format(n)]
# )
# -- merge across all the n-gram sizes
#conv_unit.add_node(Dropout(0.1), name='dropout', inputs=['flatten{}gram'.format(n) for n in NGRAMS])
conv_unit.add_node(GRU(96), name='forwards', inputs=['flatten{}gram'.format(n) for n in NGRAMS], concat_axis=-1)
conv_unit.add_node(GRU(96, go_backwards=True), name='backwards', inputs=['flatten{}gram'.format(n) for n in NGRAMS], concat_axis=-1)
# -- add a bidirectional RNN
#conv_unit.add_node(GRU(100), name='forwards', input='dropout', concat_axis=-1)
#conv_unit.add_node(GRU(100, go_backwards=True), name='backwards', input='dropout', concat_axis=-1)
conv_unit.add_node(Dropout(0.7), name='gru_dropout', inputs=['forwards', 'backwards'], create_output=True)
model.add(conv_unit)
model.add(MaxoutDense(64, 5, init='he_uniform'))
model.add(Dropout(0.5))
#model.add(Highway(activation='relu'))
#model.add(Highway(activation='relu'))
#model.add(Highway(activation='relu'))
#model.add(Dropout(0.2))
model.add(Dense(1, activation='sigmoid'))
log('Compiling model (may take >10 mins)')
model.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')
log('Testing model!')
# -- since we are using a merge model, we need to make a list
train_reviews = [train['text4global'], train['text4imdb']]
test_reviews = [test['text4global'], test['text4imdb']]
train_labels = train['labels']
test_labels = test['labels']
history = train_neural.train_sequential(model, train_reviews, train_labels, MODEL_FILE)
acc = train_neural.test_sequential(model, test_reviews, test_labels, MODEL_FILE)
train_neural.write_log(model, history, __file__, acc, LOG_FILE)
|
{
"content_hash": "a2e79d8848862f0dfa8bdff16f665d41",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 140,
"avg_line_length": 32.12562814070352,
"alnum_prop": 0.672454246832473,
"repo_name": "textclf/fancy-cnn",
"id": "13db8f496d0749bc8eac83acbef5d4cb34e2efc1",
"size": "6393",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/imdb/test_rcnn.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "88607"
},
{
"name": "Shell",
"bytes": "655"
}
],
"symlink_target": ""
}
|
from flask_wtf import FlaskForm
from wtforms import validators, PasswordField, BooleanField, SubmitField
class PasswordsForm(FlaskForm):
password = PasswordField('New Password', [
validators.DataRequired(),
validators.EqualTo('confirm', message='Passwords must match')
])
confirm = PasswordField('Repeat Password', [validators.DataRequired()])
appuser = BooleanField('Change App password',default=True)
sshuser = BooleanField('Change SSH password',default=True)
wlanuser = BooleanField('Change WLAN password',default=True)
submit = SubmitField('Save')
|
{
"content_hash": "82ecf408d376430cd855866f9376a7ff",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 75,
"avg_line_length": 45.92307692307692,
"alnum_prop": 0.7370184254606366,
"repo_name": "t4skforce/PenTestingUnit",
"id": "6a12bc99d7afb8a329adf6895e63a9cd80086dcb",
"size": "597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/app/blueprints/system/forms/passwords.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7238"
},
{
"name": "HTML",
"bytes": "212601"
},
{
"name": "JavaScript",
"bytes": "2830190"
},
{
"name": "Python",
"bytes": "38451"
},
{
"name": "Shell",
"bytes": "6381"
}
],
"symlink_target": ""
}
|
from django.urls import re_path
from .views import WizardCreateView
urlpatterns = [
re_path(r"^create/$",
WizardCreateView.as_view(), name="cms_wizard_create"),
]
|
{
"content_hash": "853879ffce976dfe9619e13b0310a115",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 66,
"avg_line_length": 22.625,
"alnum_prop": 0.6740331491712708,
"repo_name": "rsalmaso/django-cms",
"id": "e24a37e4b081789c3baeec8c8919f52a5589c3ee",
"size": "181",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "cms/wizards/urls.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "204223"
},
{
"name": "JavaScript",
"bytes": "1250281"
},
{
"name": "Python",
"bytes": "2386268"
},
{
"name": "SCSS",
"bytes": "137693"
},
{
"name": "Shell",
"bytes": "22511"
}
],
"symlink_target": ""
}
|
class GwinnettPipeline(object):
def process_item(self, item, spider):
return item
|
{
"content_hash": "389da0d76cd58e13ba5fee015271dafc",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 41,
"avg_line_length": 31.333333333333332,
"alnum_prop": 0.6914893617021277,
"repo_name": "lahoffm/aclu-bail-reform",
"id": "61c0798f145a14ec7104cee4607fb915529fa01b",
"size": "288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/webscraper/gwinnett/gwinnett/pipelines.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2269"
},
{
"name": "Jupyter Notebook",
"bytes": "67941"
},
{
"name": "Python",
"bytes": "199030"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import, division
import warnings
import numpy as np
import scipy.sparse as sp
import os.path
import sys
import contextlib
import json
from datetime import datetime
from sklearn.pipeline import Pipeline
from six import StringIO
from .eval_scopes import import_all_estimators
from .trials import JSONEncoded
__all__ = ['dict_merge', 'in_directory', 'prepend_syspath', 'prepend_syspath',
'Unbuffered', 'format_timedelta', 'current_pretty_time',
'short_format_time', 'mock_module', 'join_quoted', 'expand_path',
'is_msmbuilder_estimator', 'num_samples', 'check_arrays',
'trials_to_dict']
def is_json_serializable(obj):
"""
Checks to see if obj(ect) is Json serializable
Returns
-------
Bool
"""
try:
json.dumps(obj)
return True
except TypeError:
return False
def dict_merge(base, top):
"""Recursively merge two dictionaries, with the elements from `top`
taking precedence over elements from `top`.
Returns
-------
out : dict
A new dict, containing the merged records.
"""
out = dict(top)
for key in base:
if key in top:
if isinstance(base[key], dict) and isinstance(top[key], dict):
out[key] = dict_merge(base[key], top[key])
else:
out[key] = base[key]
return out
@contextlib.contextmanager
def in_directory(path):
"""Context manager (with statement) that changes the current directory
during the context.
"""
curdir = os.path.abspath(os.curdir)
os.chdir(path)
yield
os.chdir(curdir)
@contextlib.contextmanager
def prepend_syspath(path):
"""Contect manager (with statement) that prepends path to sys.path"""
sys.path.insert(0, path)
yield
sys.path.pop(0)
class Unbuffered(object):
# used to turn off output buffering
# http://stackoverflow.com/questions/107705/python-output-buffering
def __init__(self, stream):
self.stream = stream
def write(self, data):
self.stream.write(data)
self.stream.flush()
def __getattr__(self, attr):
return getattr(self.stream, attr)
def format_timedelta(td_object):
"""Format a timedelta object for display to users
Returns
-------
str
"""
def get_total_seconds(td):
# timedelta.total_seconds not in py2.6
return (td.microseconds +
(td.seconds + td.days * 24 * 3600) * 1e6) / 1e6
seconds = int(get_total_seconds(td_object))
periods = [('year', 60*60*24*365),
('month', 60*60*24*30),
('day', 60*60*24),
('hour', 60*60),
('minute', 60),
('second', 1)]
strings = []
for period_name, period_seconds in periods:
if seconds > period_seconds:
period_value, seconds = divmod(seconds, period_seconds)
if period_value == 1:
strings.append("%s %s" % (period_value, period_name))
else:
strings.append("%s %ss" % (period_value, period_name))
return ", ".join(strings)
def current_pretty_time():
return datetime.now().strftime("%B %d, %Y %l:%M %p")
def _squeeze_time(t):
"""Remove .1s to the time under Windows: this is the time it take to
stat files. This is needed to make results similar to timings under
Unix, for tests
"""
if sys.platform.startswith('win'):
return max(0, t - .1)
else:
return t
def short_format_time(t):
t = _squeeze_time(t)
if t > 60:
return "%4.1fmin" % (t / 60.)
else:
return " %5.1fs" % (t)
def mock_module(name):
class MockModule(object):
def __cal__(self, *args, **kwargs):
raise ImportError('no module named %s' % name)
def __getattr__(self, *args, **kwargs):
raise ImportError('no module named %s' % name)
return MockModule()
def join_quoted(values, quote="'"):
return ', '.join("%s%s%s" % (quote, e, quote) for e in values)
def expand_path(path, base='.'):
path = os.path.expanduser(path)
if not os.path.isabs(path):
path = os.path.join(base, path)
return path
def is_msmbuilder_estimator(estimator):
try:
import msmbuilder
except ImportError:
return False
msmbuilder_estimators = import_all_estimators(msmbuilder).values()
out = estimator.__class__ in msmbuilder_estimators
if isinstance(estimator, Pipeline):
out = any(step.__class__ in msmbuilder_estimators
for name, step in estimator.steps)
return out
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method
if (X.dtype.char in np.typecodes['AllFloat'] and
not np.isfinite(X.sum()) and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def _warn_if_not_finite(X):
"""UserWarning if array contains non-finite elements"""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method
if (X.dtype.char in np.typecodes['AllFloat'] and
not np.isfinite(X.sum()) and not np.isfinite(X).all()):
warnings.warn("Result contains NaN, infinity"
" or a value too large for %r." % X.dtype,
category=UserWarning)
def num_samples(x, is_nested=False):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if is_nested:
return sum(num_samples(xx, is_nested=False) for xx in x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def check_arrays(*arrays, **options):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
By default lists and tuples are converted to numpy arrays.
It is possible to enforce certain properties, such as dtype, continguity
and sparse matrix format (if a sparse matrix is passed).
Converting lists to arrays can be disabled by setting ``allow_lists=True``.
Lists can then contain arbitrary objects and are not checked for dtype,
finiteness or anything else but length. Arrays are still checked
and possibly converted.
Parameters
----------
*arrays : sequence of arrays or scipy.sparse matrices with same shape[0]
Python lists or tuples occurring in arrays are converted to 1D numpy
arrays, unless allow_lists is specified.
sparse_format : 'csr', 'csc' or 'dense', None by default
If not None, any scipy.sparse matrix is converted to
Compressed Sparse Rows or Compressed Sparse Columns representations.
If 'dense', an error is raised when a sparse array is
passed.
copy : boolean, False by default
If copy is True, ensure that returned arrays are copies of the original
(if not already converted to another format earlier in the process).
check_ccontiguous : boolean, False by default
Check that the arrays are C contiguous
dtype : a numpy dtype instance, None by default
Enforce a specific dtype.
warn_nans : boolean, False by default
Prints warning if nans in the arrays
Disables allow_nans
replace_nans : boolean, False by default
Replace nans in the arrays with zeros
allow_lists : bool
Allow lists of arbitrary objects as input, just check their length.
Disables
allow_nans : boolean, False by default
Allows nans in the arrays
allow_nd : boolean, False by default
Allows arrays of more than 2 dimensions.
"""
sparse_format = options.pop('sparse_format', None)
if sparse_format not in (None, 'csr', 'csc', 'dense'):
raise ValueError('Unexpected sparse format: %r' % sparse_format)
copy = options.pop('copy', False)
check_ccontiguous = options.pop('check_ccontiguous', False)
dtype = options.pop('dtype', None)
warn_nans = options.pop('warn_nans', False)
replace_nans = options.pop('replace_nans', False)
allow_lists = options.pop('allow_lists', False)
allow_nans = options.pop('allow_nans', False)
allow_nd = options.pop('allow_nd', False)
if options:
raise TypeError("Unexpected keyword arguments: %r" % options.keys())
if len(arrays) == 0:
return None
n_samples = num_samples(arrays[0])
checked_arrays = []
for array in arrays:
array_orig = array
if array is None:
# special case: ignore optional y=None kwarg pattern
checked_arrays.append(array)
continue
size = num_samples(array)
if size != n_samples:
raise ValueError("Found array with dim %d. Expected %d"
% (size, n_samples))
if not allow_lists or hasattr(array, "shape"):
if sp.issparse(array):
if sparse_format == 'csr':
array = array.tocsr()
elif sparse_format == 'csc':
array = array.tocsc()
elif sparse_format == 'dense':
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if check_ccontiguous:
array.data = np.ascontiguousarray(array.data, dtype=dtype)
elif hasattr(array, 'data'):
array.data = np.asarray(array.data, dtype=dtype)
elif array.dtype != dtype:
array = array.astype(dtype)
if not allow_nans:
if hasattr(array, 'data'):
_assert_all_finite(array.data)
else:
_assert_all_finite(array.values())
else:
if check_ccontiguous:
array = np.ascontiguousarray(array, dtype=dtype)
else:
array = np.asarray(array, dtype=dtype)
if warn_nans:
allow_nans = True
_warn_if_not_finite(array)
if replace_nans:
array = np.nan_to_num(array)
if not allow_nans:
_assert_all_finite(array)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. Expected <= 2" %
array.ndim)
if copy and array is array_orig:
array = array.copy()
checked_arrays.append(array)
return checked_arrays
def trials_to_dict(trials, columns):
for trial in trials:
d = {}
for i, item in enumerate(columns.items()):
key, val = item
new_val = trial[i]
if isinstance(val.type, JSONEncoded):
new_val = json.load(StringIO(new_val))
d[key] = new_val
yield d
|
{
"content_hash": "a0bb412952829496ecc9359e457162a4",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 79,
"avg_line_length": 33.244505494505496,
"alnum_prop": 0.588298487728287,
"repo_name": "pandegroup/osprey",
"id": "b4dee9cd80f241251bc65602a2484a714266f1b2",
"size": "12101",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "osprey/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "165828"
},
{
"name": "Shell",
"bytes": "3289"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0002_auto_20180131_1828'),
]
operations = [
migrations.AlterField(
model_name='dancefloorevent',
name='completed',
field=models.NullBooleanField(default=None),
),
]
|
{
"content_hash": "02f01c19706f047fe0d1c10c9d88d01c",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 56,
"avg_line_length": 22.0625,
"alnum_prop": 0.5920679886685553,
"repo_name": "bruecksen/notifhain",
"id": "c1d349dfeebf01c630da0d180dbb68b8a3483d4d",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifhain/event/migrations/0003_auto_20180131_1832.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "584"
},
{
"name": "Python",
"bytes": "61554"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
}
|
import mock
import testtools
import heatclient.v1.shell as shell
class TestHooks(testtools.TestCase):
def setUp(self):
super(TestHooks, self).setUp()
self.client = mock.Mock()
nested_stack = mock.Mock()
self.client.resources.get = mock.Mock(name='thingy',
return_value=nested_stack)
type(nested_stack).physical_resource_id = mock.PropertyMock(
return_value='nested_id')
self.args = mock.Mock()
stack_name_p = mock.PropertyMock(return_value="mystack")
type(self.args).name = stack_name_p
type(self.args).id = stack_name_p
shell.template_utils.get_template_contents = mock.Mock(
return_value=({}, ""))
shell.template_utils.process_multiple_environments_and_files = \
mock.Mock(return_value=({}, {}))
shell.utils.format_all_parameters = mock.Mock(return_value=[])
shell.do_stack_list = mock.Mock()
shell.logger = mock.Mock()
type(self.args).clear_parameter = mock.PropertyMock(return_value=[])
type(self.args).rollback = mock.PropertyMock(return_value=None)
type(self.args).pre_create = mock.PropertyMock(return_value=False)
type(self.args).pre_update = mock.PropertyMock(return_value=False)
def test_create_hooks_in_args(self):
type(self.args).pre_create = mock.PropertyMock(
return_value=['bp', 'another_bp'])
shell.do_stack_create(self.client, self.args)
self.client.stacks.create.assert_called_once()
expected_hooks = {
'bp': {'hooks': 'pre-create'},
'another_bp': {'hooks': 'pre-create'}
}
actual_hooks = self.client.stacks.create.call_args[1][
'environment']['resource_registry']['resources']
self.assertEqual(expected_hooks, actual_hooks)
def test_create_nested_hooks_in_args(self):
type(self.args).pre_create = mock.PropertyMock(
return_value=['nested/bp', 'super/nested/bp'])
shell.do_stack_create(self.client, self.args)
self.client.stacks.create.assert_called_once()
expected_hooks = {
'nested': {
'bp': {'hooks': 'pre-create'},
},
'super': {
'nested': {
'bp': {'hooks': 'pre-create'},
}
}
}
actual_hooks = self.client.stacks.create.call_args[1][
'environment']['resource_registry']['resources']
self.assertEqual(expected_hooks, actual_hooks)
def test_create_hooks_in_env_and_args(self):
type(self.args).pre_create = mock.PropertyMock(return_value=[
'nested_a/bp',
'bp_a',
'another_bp_a',
'super_a/nested/bp',
])
env = {
'resource_registry': {
'resources': {
'bp_e': {'hooks': 'pre-create'},
'another_bp_e': {'hooks': 'pre-create'},
'nested_e': {
'bp': {'hooks': 'pre-create'}
},
'super_e': {
'nested': {
'bp': {'hooks': 'pre-create'}
}
}
}
}
}
shell.template_utils.process_multiple_environments_and_files = \
mock.Mock(return_value=({}, env))
shell.do_stack_create(self.client, self.args)
self.client.stacks.create.assert_called_once()
actual_hooks = self.client.stacks.create.call_args[1][
'environment']['resource_registry']['resources']
expected_hooks = {
'bp_e': {'hooks': 'pre-create'},
'another_bp_e': {'hooks': 'pre-create'},
'nested_e': {
'bp': {'hooks': 'pre-create'}
},
'super_e': {
'nested': {
'bp': {'hooks': 'pre-create'}
}
},
'bp_a': {'hooks': 'pre-create'},
'another_bp_a': {'hooks': 'pre-create'},
'nested_a': {
'bp': {'hooks': 'pre-create'}
},
'super_a': {
'nested': {
'bp': {'hooks': 'pre-create'}
}
},
}
self.assertEqual(expected_hooks, actual_hooks)
def test_update_hooks_in_args(self):
type(self.args).pre_update = mock.PropertyMock(
return_value=['bp', 'another_bp'])
shell.do_stack_update(self.client, self.args)
self.client.stacks.update.assert_called_once()
expected_hooks = {
'bp': {'hooks': 'pre-update'},
'another_bp': {'hooks': 'pre-update'},
}
actual_hooks = self.client.stacks.update.call_args[1][
'environment']['resource_registry']['resources']
self.assertEqual(expected_hooks, actual_hooks)
def test_update_nested_hooks_in_args(self):
type(self.args).pre_update = mock.PropertyMock(
return_value=['nested/bp', 'super/nested/bp'])
shell.do_stack_update(self.client, self.args)
self.client.stacks.update.assert_called_once()
expected_hooks = {
'nested': {
'bp': {'hooks': 'pre-update'}
},
'super': {
'nested': {
'bp': {'hooks': 'pre-update'}
}
}
}
actual_hooks = self.client.stacks.update.call_args[1][
'environment']['resource_registry']['resources']
self.assertEqual(expected_hooks, actual_hooks)
def test_update_hooks_in_env_and_args(self):
type(self.args).pre_update = mock.PropertyMock(return_value=[
'nested_a/bp',
'bp_a',
'another_bp_a',
'super_a/nested/bp',
])
env = {
'resource_registry': {
'resources': {
'bp_e': {'hooks': 'pre-update'},
'another_bp_e': {'hooks': 'pre-update'},
'nested_e': {
'bp': {'hooks': 'pre-update'}
},
'super_e': {
'nested': {
'bp': {'hooks': 'pre-update'}
}
}
}
}
}
shell.template_utils.process_multiple_environments_and_files = \
mock.Mock(return_value=({}, env))
shell.do_stack_update(self.client, self.args)
self.client.stacks.update.assert_called_once()
actual_hooks = self.client.stacks.update.call_args[1][
'environment']['resource_registry']['resources']
expected_hooks = {
'bp_e': {'hooks': 'pre-update'},
'another_bp_e': {'hooks': 'pre-update'},
'nested_e': {
'bp': {'hooks': 'pre-update'}
},
'super_e': {
'nested': {
'bp': {'hooks': 'pre-update'}
}
},
'bp_a': {'hooks': 'pre-update'},
'another_bp_a': {'hooks': 'pre-update'},
'nested_a': {
'bp': {'hooks': 'pre-update'}
},
'super_a': {
'nested': {
'bp': {'hooks': 'pre-update'}
}
},
}
self.assertEqual(expected_hooks, actual_hooks)
def test_clear_all_hooks(self):
shell._get_hook_type_via_status =\
mock.Mock(return_value='pre-create')
type(self.args).hook = mock.PropertyMock(
return_value=['bp'])
type(self.args).pre_create = mock.PropertyMock(return_value=True)
bp = mock.Mock()
type(bp).resource_name = 'bp'
self.client.resources.list = mock.Mock(return_value=[bp])
shell.do_hook_clear(self.client, self.args)
self.assertEqual(1, self.client.resources.signal.call_count)
payload_pre_create = self.client.resources.signal.call_args_list[0][1]
self.assertEqual({'unset_hook': 'pre-create'},
payload_pre_create['data'])
self.assertEqual('bp', payload_pre_create['resource_name'])
self.assertEqual('mystack', payload_pre_create['stack_id'])
def test_clear_pre_create_hooks(self):
type(self.args).hook = mock.PropertyMock(
return_value=['bp'])
type(self.args).pre_create = mock.PropertyMock(return_value=True)
bp = mock.Mock()
type(bp).resource_name = 'bp'
self.client.resources.list = mock.Mock(return_value=[bp])
shell.do_hook_clear(self.client, self.args)
self.assertEqual(1, self.client.resources.signal.call_count)
payload = self.client.resources.signal.call_args_list[0][1]
self.assertEqual({'unset_hook': 'pre-create'}, payload['data'])
self.assertEqual('bp', payload['resource_name'])
self.assertEqual('mystack', payload['stack_id'])
def test_clear_pre_update_hooks(self):
type(self.args).hook = mock.PropertyMock(
return_value=['bp'])
type(self.args).pre_update = mock.PropertyMock(return_value=True)
bp = mock.Mock()
type(bp).resource_name = 'bp'
self.client.resources.list = mock.Mock(return_value=[bp])
shell.do_hook_clear(self.client, self.args)
self.assertEqual(1, self.client.resources.signal.call_count)
payload = self.client.resources.signal.call_args_list[0][1]
self.assertEqual({'unset_hook': 'pre-update'}, payload['data'])
self.assertEqual('bp', payload['resource_name'])
self.assertEqual('mystack', payload['stack_id'])
def test_clear_nested_hook(self):
type(self.args).hook = mock.PropertyMock(
return_value=['a/b/bp'])
type(self.args).pre_create = mock.PropertyMock(return_value=True)
a = mock.Mock()
type(a).resource_name = 'a'
b = mock.Mock()
type(b).resource_name = 'b'
bp = mock.Mock()
type(bp).resource_name = 'bp'
self.client.resources.list = mock.Mock(
side_effect=[[a], [b], [bp]])
m1 = mock.Mock()
m2 = mock.Mock()
type(m2).physical_resource_id = 'nested_id'
self.client.resources.get = mock.Mock(
side_effect=[m1, m2])
shell.do_hook_clear(self.client, self.args)
payload = self.client.resources.signal.call_args_list[0][1]
self.assertEqual({'unset_hook': 'pre-create'}, payload['data'])
self.assertEqual('bp', payload['resource_name'])
self.assertEqual('nested_id', payload['stack_id'])
def test_clear_wildcard_hooks(self):
type(self.args).hook = mock.PropertyMock(
return_value=['a/*b/bp*'])
type(self.args).pre_create = mock.PropertyMock(return_value=True)
a = mock.Mock()
type(a).resource_name = 'a'
b = mock.Mock()
type(b).resource_name = 'matcthis_b'
bp = mock.Mock()
type(bp).resource_name = 'bp_matchthis'
self.client.resources.list = mock.Mock(
side_effect=[[a], [b], [bp]])
m1 = mock.Mock()
m2 = mock.Mock()
type(m2).physical_resource_id = 'nested_id'
self.client.resources.get = mock.Mock(
side_effect=[m1, m2])
shell.do_hook_clear(self.client, self.args)
payload = self.client.resources.signal.call_args_list[0][1]
self.assertEqual({'unset_hook': 'pre-create'},
payload['data'])
self.assertEqual('bp_matchthis', payload['resource_name'])
self.assertEqual('nested_id', payload['stack_id'])
|
{
"content_hash": "af4832c4e1086d4352d091c72819a2ff",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 78,
"avg_line_length": 38.921052631578945,
"alnum_prop": 0.5196078431372549,
"repo_name": "rdo-management/python-heatclient",
"id": "b797136acbb767ca87173eb239d45780d17cfbfa",
"size": "12378",
"binary": false,
"copies": "2",
"ref": "refs/heads/mgt-master",
"path": "heatclient/tests/unit/v1/test_hooks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "517520"
},
{
"name": "Shell",
"bytes": "3351"
}
],
"symlink_target": ""
}
|
"""
# Name: meas/wsgi.py
# Description:
# Created by: Auto
# Date Created: Oct 07 2016
# Last Modified: Oct 10 2016
# Modified by: Phuc Le-Sanh
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "meas_development.settings")
application = get_wsgi_application()
|
{
"content_hash": "98b764e51df301317a2fd5a453f8d5e6",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 76,
"avg_line_length": 24.571428571428573,
"alnum_prop": 0.6976744186046512,
"repo_name": "deka108/meas_deka",
"id": "0e89ada24d0d77e00ad9dc97e393fb251ce84c02",
"size": "344",
"binary": false,
"copies": "2",
"ref": "refs/heads/release-deka",
"path": "meas/wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "120893"
},
{
"name": "HTML",
"bytes": "500260"
},
{
"name": "JavaScript",
"bytes": "1112443"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "Python",
"bytes": "350763"
}
],
"symlink_target": ""
}
|
"""Backport of the new select module with epoll and kqueue interface
The select_backport extension is a backport of the new API functions of Python
2.7/SVN for Python 2.3 to 2.6. It contains object oriented wrappers for epoll
(Linux 2.6) and kqueue/kevent (BSD).
>>> try:
... import select_backport as select
... except ImportError:
... import select
>>> ep = select.epoll()
>>> kq = select.kqueue()
This release is based upon Python svn.
NOTE: I made this package because python2.5 and python2.6 lacked features
I'm using from select.kqueue and the select26 package isn't being maintained.
"""
import sys
try:
from setuptools import setup
from setuptools import Extension
except ImportError:
from distutils.core import setup
from distutils.core import Extension
MACROS = []
if "linux" in sys.platform:
MACROS.append(("HAVE_EPOLL", 1))
MACROS.append(("HAVE_SYS_EPOLL_H", 1))
elif "darwin" in sys.platform or "bsd" in sys.platform:
MACROS.append(("HAVE_KQUEUE", 1))
MACROS.append(("HAVE_SYS_EVENT_H", 1))
else:
raise ValueError("Platform '%s' is not supported" % sys.platform)
#Python2.6 select doesn't work for our purposes.
#if sys.version_info >= (2,6):
# raise ValueError("select_backport is not required in Python 2.6+")
extensions = [
Extension("select_backport", ["select_backportmodule.c"],
define_macros = MACROS,
)
]
setup(
name = "select_backport",
version = "0.2",
description = __doc__[:__doc__.find('\n')].strip(),
long_description = '\n'.join([line
for line in __doc__.split('\n')[1:]]),
author = "Christian Heimes",
author_email = "christian@cheimes.de",
maintainer = "Yesudeep Mangalapilly",
maintainer_email = "gora.khargosh@gmail.com",
download_url = "http://pypi.python.org/",
license = "MIT",
keywords = "select poll epoll kqueue",
ext_modules = extensions,
packages = ["tests"],
include_package_data = True,
platforms = ["Linux 2.6", "BSD", "Mac OS X"],
provides = ["select_backport"],
classifiers = (
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: C',
'Programming Language :: Python',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: BSD',
'Operating System :: POSIX :: Linux',
'Topic :: System :: Networking',
)
)
|
{
"content_hash": "fd1b12a5398b1292856d6b0d97884e69",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 78,
"avg_line_length": 31.195121951219512,
"alnum_prop": 0.63408913213448,
"repo_name": "gorakhargosh/select_backport",
"id": "0c40d88e248aedefb4a38cd468066a3d89efa331",
"size": "2583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "59787"
},
{
"name": "Python",
"bytes": "57786"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from rest_framework.authtoken.views import obtain_auth_token
from blog.urls import router
urlpatterns = [
url(r'^api/token/', obtain_auth_token, name='api-token'),
url(r'^api/', include(router.urls)),
]
|
{
"content_hash": "59d2c79a66bd753f5eb46683c78c5932",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 61,
"avg_line_length": 28.333333333333332,
"alnum_prop": 0.7215686274509804,
"repo_name": "chatcaos-org/django_rest_framework",
"id": "5cb94df8e10aa22488a8e5004ce55e8f796583a8",
"size": "255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/hdson_rest/hdson_rest/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Puppet",
"bytes": "3926"
},
{
"name": "Python",
"bytes": "15158"
},
{
"name": "Shell",
"bytes": "331"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
from unittest import TestCase
from django.test import TransactionTestCase
from django.core.files.storage import default_storage
from django.test import override_settings
from botocore.exceptions import NoCredentialsError
import datetime
from dateutil.parser import parse
from djconnectwise import models
from djconnectwise.utils import get_hash
from djconnectwise.sync import InvalidObjectException
from . import fixtures
from . import fixture_utils
from . import mocks
from .. import sync
from ..sync import log_sync_job
class AssertSyncMixin:
def assert_sync_job(self):
qset = \
models.SyncJob.objects.filter(
entity_name=self.model_class.__bases__[0].__name__
)
assert qset.exists()
class SynchronizerTestMixin(AssertSyncMixin):
synchronizer_class = None
model_class = None
fixture = None
def call_api(self, return_data):
raise NotImplementedError
def _assert_fields(self, instance, json_data):
raise NotImplementedError
def _sync(self, return_data):
_, get_patch = self.call_api(return_data)
self.synchronizer = self.synchronizer_class()
self.synchronizer.sync()
return _, get_patch
def _sync_with_results(self, return_data):
_, get_patch = self.call_api(return_data)
self.synchronizer = self.synchronizer_class()
self.synchronizer.sync()
return self.synchronizer.sync()
def test_sync(self):
self._sync(self.fixture)
instance_dict = {c['id']: c for c in self.fixture}
for instance in self.model_class.objects.all():
json_data = instance_dict[instance.id]
self._assert_fields(instance, json_data)
self.assert_sync_job()
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
name = 'Some New Name'
new_json = deepcopy(self.fixture[0])
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name, name)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
name = 'Some New Name'
new_json = deepcopy(self.fixture[0])
new_json['name'] = name
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
class TestBatchConditionMixin(TestCase):
def test_get_optimal_size(self):
synchronizer = sync.BatchConditionMixin()
size = synchronizer.get_optimal_size([31, 35, 43, 52, 58])
self.assertEqual(size, 5)
sync.MAX_URL_LENGTH = 310
sync.MIN_URL_LENGTH = 305
size = synchronizer.get_optimal_size(
[1, 2, 3, 43434, 54562, 54568, 65643]
)
self.assertEqual(size, 3)
size = synchronizer.get_optimal_size(
[442434, 53462, 552468, 63443]
)
self.assertEqual(size, 1)
size = synchronizer.get_optimal_size([1])
self.assertEqual(size, 1)
size = synchronizer.get_optimal_size([])
self.assertIsNone(size)
class TestTerritorySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.TerritorySynchronizer
model_class = models.TerritoryTracker
fixture = fixtures.API_SYSTEM_TERRITORY_LIST
def setUp(self):
super().setUp()
fixture_utils.init_territories()
def call_api(self, return_data):
return mocks.system_api_get_territories_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
name = 'A Different Territory'
new_json = deepcopy(json_data)
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name, name)
self._assert_fields(changed, new_json)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
class TestCompanySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.CompanySynchronizer
model_class = models.CompanyTracker
fixture = fixtures.API_COMPANY_LIST
def setUp(self):
fixture_utils.init_territories()
mocks.company_api_get_company_statuses_call(
fixtures.API_COMPANY_STATUS_LIST)
sync.CompanyStatusSynchronizer().sync()
fixture_utils.init_company_types()
def call_api(self, return_data):
return mocks.company_api_get_call(return_data)
def _assert_fields(self, company, api_company):
self.assertEqual(company.name, api_company['name'])
self.assertEqual(company.identifier, api_company['identifier'])
self.assertEqual(company.phone_number, api_company['phoneNumber'])
self.assertEqual(company.fax_number, api_company['faxNumber'])
self.assertEqual(company.address_line1, api_company['addressLine1'])
self.assertEqual(company.address_line2, api_company['addressLine1'])
self.assertEqual(company.city, api_company['city'])
self.assertEqual(company.state_identifier, api_company['state'])
self.assertEqual(company.zip, api_company['zip'])
self.assertEqual(company.status.id, api_company['status']['id'])
self.assertEqual(
company.company_types.first().id, api_company['types'][0]['id'])
class TestCompanyStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.CompanyStatusSynchronizer
model_class = models.CompanyStatusTracker
fixture = fixtures.API_COMPANY_STATUS_LIST
def call_api(self, return_data):
return mocks.company_api_get_company_statuses_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
self.assertEqual(instance.notify_flag, json_data['notifyFlag'])
self.assertEqual(instance.dissalow_saving_flag,
json_data['disallowSavingFlag'])
self.assertEqual(instance.notification_message,
json_data['notificationMessage'])
self.assertEqual(instance.custom_note_flag,
json_data['customNoteFlag'])
self.assertEqual(instance.cancel_open_tracks_flag,
json_data['cancelOpenTracksFlag'])
class TestTimeEntrySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.TimeEntrySynchronizer
model_class = models.TimeEntryTracker
fixture = fixtures.API_TIME_ENTRY_LIST
def call_api(self, return_data):
return mocks.time_api_get_time_entries_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
start = '2003-10-06T14:48:18Z'
new_json = deepcopy(self.fixture[0])
new_json["timeStart"] = start
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.time_start,
start)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
start = '2003-10-06T14:48:18Z'
new_json = deepcopy(self.fixture[0])
new_json["timeStart"] = start
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = self._sync_with_results(
new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.charge_to_id.id, json_data['chargeToId'])
self.assertEqual(instance.charge_to_type, json_data['chargeToType'])
self.assertEqual(instance.time_start, parse(json_data['timeStart']))
self.assertEqual(instance.time_end, parse(json_data['timeEnd']))
self.assertEqual(instance.actual_hours, json_data['actualHours'])
self.assertEqual(instance.billable_option, json_data['billableOption'])
self.assertEqual(instance.notes, json_data['notes'])
class TestCompanyTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.CompanyTypeSynchronizer
model_class = models.CompanyTypeTracker
fixture = fixtures.API_COMPANY_TYPES_LIST
def call_api(self, return_data):
return mocks.company_api_get_company_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.vendor_flag, json_data['vendorFlag'])
class TestScheduleEntriesSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ScheduleEntriesSynchronizer
model_class = models.ScheduleEntryTracker
fixture = fixtures.API_SCHEDULE_ENTRIES
def setUp(self):
super().setUp()
fixture_utils.init_boards()
fixture_utils.init_territories()
fixture_utils.init_contacts()
fixture_utils.init_project_statuses()
fixture_utils.init_projects()
fixture_utils.init_locations()
fixture_utils.init_priorities()
fixture_utils.init_members()
fixture_utils.init_opportunity_stages()
fixture_utils.init_opportunity_types()
fixture_utils.init_opportunities()
fixture_utils.init_board_statuses()
fixture_utils.init_schedule_statuses()
fixture_utils.init_schedule_types()
fixture_utils.init_teams()
fixture_utils.init_types()
fixture_utils.init_subtypes()
fixture_utils.init_items()
fixture_utils.init_tickets()
fixture_utils.init_activities()
def call_api(self, return_data):
return mocks.schedule_api_get_schedule_entries_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
name = 'Some New Name'
new_json = deepcopy(self.fixture[0])
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name,
name)
self._assert_fields(changed, new_json)
def test_schedule_object_assignment(self):
self._sync(self.fixture)
json_data = self.fixture[0]
schedule_entry = self.model_class.objects.get(id=json_data['id'])
self.assertEqual(schedule_entry.schedule_type.identifier, "S")
json_data = self.fixture[1]
schedule_entry = self.model_class.objects.get(id=json_data['id'])
self.assertEqual(schedule_entry.schedule_type.identifier, "C")
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.done_flag, json_data['doneFlag'])
self.assertEqual(instance.date_start,
parse(json_data['dateStart']))
self.assertEqual(instance.date_end,
parse(json_data['dateEnd']))
# verify referenced objects
if instance.activity_object is not None:
self.assertEqual(instance.activity_object.id,
json_data['objectId'])
if instance.ticket_object is not None:
self.assertEqual(instance.ticket_object.id, json_data['objectId'])
self.assertEqual(instance.where.id, json_data['where']['id'])
self.assertEqual(instance.member.id, json_data['member']['id'])
self.assertEqual(instance.status.id, json_data['status']['id'])
self.assertEqual(instance.schedule_type.id, json_data['type']['id'])
class TestScheduleTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ScheduleTypeSynchronizer
model_class = models.ScheduleTypeTracker
fixture = fixtures.API_SCHEDULE_TYPE_LIST
def call_api(self, return_data):
return mocks.schedule_api_get_schedule_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
class TestScheduleStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ScheduleStatusSynchronizer
model_class = models.ScheduleStatusTracker
fixture = fixtures.API_SCHEDULE_STATUS_LIST
def call_api(self, return_data):
return mocks.schedule_api_get_schedule_statuses_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
class TestProjectStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ProjectStatusSynchronizer
model_class = models.ProjectStatusTracker
fixture = fixtures.API_PROJECT_STATUSES
def call_api(self, return_data):
return mocks.projects_api_get_project_statuses_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
self.assertEqual(instance.closed_flag, json_data['closedFlag'])
class TestProjectTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ProjectTypeSynchronizer
model_class = models.ProjectTypeTracker
fixture = fixtures.API_PROJECT_TYPES
def call_api(self, return_data):
return mocks.projects_api_get_project_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
class TestProjectPhaseSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ProjectPhaseSynchronizer
model_class = models.ProjectPhaseTracker
fixture = fixtures.API_PROJECT_PHASE_LIST
def setUp(self):
super().setUp()
fixture_utils.init_project_statuses()
fixture_utils.init_project_types()
fixture_utils.init_boards()
fixture_utils.init_projects()
def call_api(self, return_data):
return mocks.projects_api_get_project_phases_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.description, json_data['description'])
self.assertEqual(instance.bill_time, json_data['billTime'])
self.assertEqual(instance.notes, json_data['notes'])
self.assertEqual(instance.scheduled_hours, json_data['scheduledHours'])
self.assertEqual(instance.actual_hours, json_data['actualHours'])
self.assertEqual(instance.budget_hours, json_data['budgetHours'])
self.assertEqual(instance.project_id, json_data['projectId'])
self.assertEqual(
instance.scheduled_start, parse(json_data['scheduledStart']).date()
)
self.assertEqual(
instance.scheduled_end, parse(json_data['scheduledEnd']).date()
)
self.assertEqual(
instance.actual_start, parse(json_data['actualStart']).date()
)
self.assertEqual(
instance.actual_end, parse(json_data['actualEnd']).date()
)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
description = 'Some New Description'
new_json = deepcopy(self.fixture[0])
new_json['description'] = description
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.description, description)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
description = 'Some New Description'
new_json = deepcopy(self.fixture[0])
new_json['description'] = description
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
class TestProjectSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ProjectSynchronizer
model_class = models.ProjectTracker
fixture = fixtures.API_PROJECT_LIST
def setUp(self):
super().setUp()
fixture_utils.init_project_statuses()
fixture_utils.init_project_types()
fixture_utils.init_boards()
def call_api(self, return_data):
return mocks.project_api_get_projects_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.manager_id, json_data['manager']['id'])
self.assertAlmostEqual(
float(instance.actual_hours),
json_data['actualHours']
)
self.assertAlmostEqual(
float(instance.budget_hours),
json_data['budgetHours']
)
self.assertAlmostEqual(
float(instance.scheduled_hours),
json_data['scheduledHours']
)
self.assertEqual(
instance.actual_start, parse(json_data['actualStart']).date()
)
self.assertEqual(
instance.actual_end, parse(json_data['actualEnd']).date()
)
self.assertEqual(
instance.estimated_start, parse(json_data['estimatedStart']).date()
)
self.assertEqual(
instance.estimated_end, parse(json_data['estimatedEnd']).date()
)
class TestTeamSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.TeamSynchronizer
model_class = models.TeamTracker
fixture = fixtures.API_SERVICE_TEAM_LIST
def call_api(self, return_data):
return mocks.service_api_get_teams_call(return_data)
def setUp(self):
fixture_utils.init_boards()
def _assert_fields(self, team, team_json):
ids = set([t.id for t in team.members.all()])
self.assertEqual(team.id, team_json['id'])
self.assertEqual(team.name, team_json['name'])
self.assertEqual(team.board.id, team_json['boardId'])
self.assertTrue(ids < set(team_json['members']))
class TestPrioritySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.PrioritySynchronizer
model_class = models.TicketPriorityTracker
fixture = fixtures.API_SERVICE_PRIORITY_LIST
def _assert_fields(self, priority, api_priority):
assert priority.name == api_priority['name']
assert priority.id == api_priority['id']
if 'color' in api_priority.keys():
assert priority.color == api_priority['color']
else:
assert priority.color in self.valid_prio_colors
if 'sortOrder' in api_priority.keys():
assert priority.sort == api_priority['sortOrder']
else:
assert priority.sort is None
def setUp(self):
self.synchronizer = sync.PrioritySynchronizer()
self.valid_prio_colors = \
list(models.TicketPriority.DEFAULT_COLORS.values()) + \
[models.TicketPriority.DEFAULT_COLOR]
def call_api(self, return_data):
return mocks.service_api_get_priorities_call(return_data)
class TestLocationSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.LocationSynchronizer
model_class = models.LocationTracker
fixture = fixtures.API_SERVICE_LOCATION_LIST
def _assert_fields(self, location, api_location):
self.assertEqual(location.name, api_location['name'])
self.assertEqual(location.id, api_location['id'])
self.assertEqual(location.where, api_location['where'])
def setUp(self):
self.synchronizer = sync.LocationSynchronizer()
def call_api(self, return_data):
return mocks.service_api_get_locations_call(return_data)
class TestBoardSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.BoardSynchronizer
model_class = models.ConnectWiseBoardTracker
fixture = fixtures.API_BOARD_LIST
def call_api(self, return_data):
return mocks.service_api_get_boards_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.inactive, json_data['inactiveFlag'])
self.assertEqual(instance.work_role.name,
json_data['workRole']['name'])
self.assertEqual(instance.work_type.name,
json_data['workType']['name'])
def setUp(self):
super().setUp()
fixture_utils.init_work_roles()
fixture_utils.init_work_types()
fixture_utils.init_boards()
class TestBoardStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.BoardStatusSynchronizer
model_class = models.BoardStatusTracker
fixture = fixtures.API_BOARD_STATUS_LIST
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.sort_order, json_data['sortOrder'])
self.assertEqual(instance.display_on_board,
json_data['displayOnBoard'])
self.assertEqual(instance.inactive, json_data['inactive'])
self.assertEqual(instance.closed_status, json_data['closedStatus'])
def setUp(self):
fixture_utils.init_boards()
def call_api(self, return_data):
return mocks.service_api_get_statuses_call(return_data)
class TestServiceNoteSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ServiceNoteSynchronizer
model_class = models.ServiceNoteTracker
fixture = fixtures.API_SERVICE_NOTE_LIST
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.ticket.id, json_data['ticketId'])
self.assertEqual(instance.text, json_data['text'])
self.assertEqual(instance.detail_description_flag,
json_data['detailDescriptionFlag'])
self.assertEqual(instance.internal_analysis_flag,
json_data['internalAnalysisFlag'])
self.assertEqual(instance.resolution_flag, json_data['resolutionFlag'])
self.assertEqual(instance.member.identifier,
json_data['member']['identifier'])
self.assertEqual(instance.date_created,
parse(json_data['dateCreated']))
self.assertEqual(instance.created_by, json_data['createdBy'])
self.assertEqual(instance.internal_flag, json_data['internalFlag'])
self.assertEqual(instance.external_flag, json_data['externalFlag'])
def call_api(self, return_data):
return mocks.service_api_get_notes_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
flag = False
new_json = deepcopy(self.fixture[0])
new_json['detailDescriptionFlag'] = flag
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.detail_description_flag,
flag)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
new_json = deepcopy(self.fixture[0])
new_json['detailDescriptionFlag'] = False
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
class TestOpportunityNoteSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.OpportunityNoteSynchronizer
model_class = models.OpportunityNoteTracker
fixture = fixtures.API_SALES_OPPORTUNITY_NOTE_LIST
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.text, json_data['text'])
self.assertEqual(instance.opportunity.id,
json_data['opportunityId'])
def setUp(self):
super().setUp()
fixture_utils.init_opportunity_notes()
fixture_utils.init_opportunity_stages()
fixture_utils.init_opportunities()
def call_api(self, return_data):
return mocks.sales_api_get_opportunity_notes_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
text = "Different Text, not the same text, but new, better text."
new_json = deepcopy(self.fixture[0])
new_json['text'] = text
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.text, text)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
text = "Different Text, not the same text, but new, better text."
new_json = deepcopy(self.fixture[0])
new_json['text'] = text
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
class TestMemberSynchronization(TransactionTestCase, AssertSyncMixin):
model_class = models.MemberTracker
def setUp(self):
fixture_utils.init_work_roles()
self.identifier = 'User1'
mocks.system_api_get_members_call([fixtures.API_MEMBER])
self.synchronizer = sync.MemberSynchronizer()
mocks.system_api_get_member_image_by_photo_id_call(
(mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar()))
def _assert_member_fields(self, local_member, api_member):
self.assertEqual(local_member.first_name, api_member['firstName'])
self.assertEqual(local_member.last_name, api_member['lastName'])
self.assertEqual(local_member.office_email, api_member['officeEmail'])
def test_sync_member_update(self):
member = models.Member()
member.id = 176
member.identifier = self.identifier
member.first_name = 'some stale first name'
member.last_name = 'some stale last name'
member.office_email = 'some@stale.com'
member.save()
self.synchronizer.sync()
local_member = models.Member.objects.get(identifier=self.identifier)
api_member = fixtures.API_MEMBER
self._assert_member_fields(local_member, api_member)
def test_sync_member_create(self):
self.synchronizer.sync()
local_member = models.Member.objects.all().first()
api_member = fixtures.API_MEMBER
self._assert_member_fields(local_member, api_member)
self.assert_sync_job()
def test_sync_member_with_no_photo(self):
member_without_photo = deepcopy(fixtures.API_MEMBER)
member_without_photo.pop('photo')
mocks.system_api_get_members_call([member_without_photo])
self.synchronizer = sync.MemberSynchronizer()
self.synchronizer.sync()
local_member = models.Member.objects.get(identifier=self.identifier)
self._assert_member_fields(local_member, member_without_photo)
local_avatar = local_member.avatar
self.assertFalse(local_avatar)
def test_sync_member_avatar_name_is_updated(self):
self.synchronizer = sync.MemberSynchronizer()
self.synchronizer.sync()
member = models.Member.objects.get(identifier=self.identifier)
old_avatar = member.avatar
member.avatar = 'new_image_name.png'
self.synchronizer.sync()
self.assertNotEqual(old_avatar, member.avatar)
def test_avatar_thumbnails_are_in_storage(self):
self.synchronizer = sync.MemberSynchronizer()
self.synchronizer.sync()
member = models.Member.objects.get(identifier=self.identifier)
attachment_filename = 'some_new_image.png'
avatar = mocks.get_member_avatar()
self.synchronizer._save_avatar(member, avatar, attachment_filename)
filename = '{}.{}'.format(get_hash(avatar), 'png')
micro_avatar_size = filename + '20x20.png'
avatar_size = filename + '80x80.png'
self.assertTrue(default_storage.exists(avatar_size))
self.assertTrue(default_storage.exists(micro_avatar_size))
@override_settings(DEFAULT_FILE_STORAGE='storages.backends.'
's3boto3.S3Boto3Storage',
AWS_STORAGE_BUCKET_NAME='somebucket')
def test_app_wont_crash_without_DO_credentials(self):
self.synchronizer = sync.MemberSynchronizer()
api_member = fixtures.API_MEMBER
# If this doesn't raise an exception, GREAT!
self.synchronizer.update_or_create_instance(api_member)
# Now we check to make sure that it WOULD have raised an exception
# if we didn't call it from its safe location. If no exceptions
# occur then there is still a problem.
self.synchronizer = sync.MemberSynchronizer()
self.synchronizer.sync()
member = models.Member.objects.get(identifier=self.identifier)
attachment_filename = 'some_new_image.png'
avatar = mocks.get_member_avatar()
with self.assertRaises(NoCredentialsError):
self.synchronizer._save_avatar(
member, avatar, attachment_filename)
class TestOpportunitySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.OpportunitySynchronizer
model_class = models.OpportunityTracker
fixture = fixtures.API_SALES_OPPORTUNITIES
def setUp(self):
super().setUp()
self.synchronizer = self.synchronizer_class()
mocks.sales_api_get_opportunity_types_call(
fixtures.API_SALES_OPPORTUNITY_TYPES)
fixture_utils.init_activities()
fixture_utils.init_opportunity_statuses()
fixture_utils.init_opportunity_types()
fixture_utils.init_sales_probabilities()
fixture_utils.init_members()
fixture_utils.init_territories()
fixture_utils.init_companies()
def call_api(self, return_data):
return mocks.sales_api_get_opportunities_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.expected_close_date,
parse(json_data['expectedCloseDate']).date())
self.assertEqual(instance.pipeline_change_date,
parse(json_data['pipelineChangeDate']))
self.assertEqual(instance.date_became_lead,
parse(json_data['dateBecameLead']))
self.assertEqual(instance.closed_date,
parse(json_data['closedDate']))
self.assertEqual(instance.notes, json_data['notes'])
self.assertEqual(instance.source, json_data['source'])
self.assertEqual(instance.location_id, json_data['locationId'])
self.assertEqual(instance.business_unit_id,
json_data['businessUnitId'])
self.assertEqual(instance.customer_po,
json_data['customerPO'])
self.assertEqual(instance.priority_id,
json_data['priority']['id'])
self.assertEqual(instance.stage_id,
json_data['stage']['id'])
self.assertEqual(instance.opportunity_type_id,
json_data['type']['id'])
self.assertEqual(instance.status_id,
json_data['status']['id'])
self.assertEqual(instance.primary_sales_rep_id,
json_data['primarySalesRep']['id'])
self.assertEqual(instance.secondary_sales_rep_id,
json_data['secondarySalesRep']['id'])
self.assertEqual(instance.company_id,
json_data['company']['id'])
self.assertEqual(instance.closed_by_id,
json_data['closedBy']['id'])
def test_fetch_sync_by_id(self):
json_data = self.fixture[0]
_, patch = mocks.sales_api_by_id_call(json_data)
result = self.synchronizer.fetch_sync_by_id(json_data['id'])
self._assert_fields(result, json_data)
patch.stop()
# TODO This test does nothing, must be updated
# def test_fetch_delete_by_id(self):
# json_data = self.fixture[0]
# _, patch = mocks.sales_api_by_id_call(json_data)
# self.synchronizer.fetch_delete_by_id(json_data['id'])
# self.assertFalse(Opportunity.objects.filter(
# id=json_data['id']).exists())
# patch.stop()
class TestOpportunityStageSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.OpportunityStageSynchronizer
model_class = models.OpportunityStageTracker
fixture = fixtures.API_SALES_OPPORTUNITY_STAGES
def call_api(self, return_data):
return mocks.sales_api_get_opportunity_stages_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
class TestOpportunityStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.OpportunityStatusSynchronizer
model_class = models.OpportunityStatusTracker
fixture = fixtures.API_SALES_OPPORTUNITY_STATUSES
def call_api(self, return_data):
return mocks.sales_api_get_opportunity_statuses_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.won_flag, json_data['wonFlag'])
self.assertEqual(instance.lost_flag, json_data['lostFlag'])
self.assertEqual(instance.closed_flag, json_data['closedFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
class TestOpportunityTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.OpportunityTypeSynchronizer
model_class = models.OpportunityTypeTracker
fixture = fixtures.API_SALES_OPPORTUNITY_TYPES
def call_api(self, return_data):
return mocks.sales_api_get_opportunity_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.description, json_data['description'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
description = 'Some New Description'
new_json = deepcopy(self.fixture[0])
new_json['description'] = description
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.description,
description)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
description = 'Some New Description'
new_json = deepcopy(self.fixture[0])
new_json['description'] = description
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
class TestHolidaySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.HolidaySynchronizer
model_class = models.HolidayTracker
fixture = fixtures.API_SCHEDULE_HOLIDAY_MODEL_LIST
def setUp(self):
fixture_utils.init_holiday_lists()
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.all_day_flag, json_data['allDayFlag'])
self.assertEqual(instance.date, parse(json_data['date']).date())
self.assertEqual(
instance.start_time, parse(json_data['timeStart']).time())
self.assertEqual(instance.end_time, parse(json_data['timeEnd']).time())
self.assertEqual(
instance.holiday_list.id, json_data['holidayList']['id'])
def call_api(self, return_data):
return mocks.schedule_api_get_holidays_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
new_json = deepcopy(self.fixture[0])
name = 'A new name'
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(
original.name,
name)
self._assert_fields(changed, new_json)
class TestHolidayListSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.HolidayListSynchronizer
model_class = models.HolidayListTracker
fixture = fixtures.API_SCHEDULE_HOLIDAY_LIST_LIST
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
def call_api(self, return_data):
return mocks.schedule_api_get_holiday_lists_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
new_json = deepcopy(self.fixture[0])
name = 'A new name'
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name,
name)
self._assert_fields(changed, new_json)
class TestCalendarSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.CalendarSynchronizer
model_class = models.CalendarTracker
fixture = fixtures.API_SCHEDULE_CALENDAR_LIST
def setUp(self):
fixture_utils.init_holiday_lists()
def call_api(self, return_data):
return mocks.schedule_api_get_calendars_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
name = 'A New Calendar'
new_json = deepcopy(json_data)
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name, name)
self._assert_fields(changed, new_json)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(
instance.monday_start_time,
parse(json_data['mondayStartTime']).time()
)
self.assertEqual(
instance.monday_end_time,
parse(json_data['mondayEndTime']).time()
)
self.assertEqual(
instance.tuesday_start_time,
parse(json_data['tuesdayStartTime']).time()
)
self.assertEqual(
instance.tuesday_end_time,
parse(json_data['tuesdayEndTime']).time()
)
self.assertEqual(
instance.wednesday_start_time,
parse(json_data['wednesdayStartTime']).time()
)
self.assertEqual(
instance.wednesday_end_time,
parse(json_data['wednesdayEndTime']).time()
)
self.assertEqual(
instance.thursday_start_time,
parse(json_data['thursdayStartTime']).time()
)
self.assertEqual(
instance.thursday_end_time,
parse(json_data['thursdayEndTime']).time()
)
self.assertEqual(
instance.friday_start_time,
parse(json_data['fridayStartTime']).time()
)
self.assertEqual(
instance.friday_end_time,
parse(json_data['fridayEndTime']).time()
)
# Dont parse these ones they are None in the fixtures
self.assertEqual(
instance.saturday_start_time,
json_data['saturdayStartTime']
)
self.assertEqual(
instance.saturday_end_time,
json_data['saturdayEndTime']
)
self.assertEqual(
instance.sunday_start_time,
json_data['sundayStartTime']
)
self.assertEqual(
instance.sunday_end_time,
json_data['sundayEndTime']
)
class TestMyCompanyOtherSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.MyCompanyOtherSynchronizer
model_class = models.MyCompanyOtherTracker
fixture = fixtures.API_SYSTEM_OTHER_LIST
def setUp(self):
fixture_utils.init_calendars()
fixture_utils.init_others()
def call_api(self, return_data):
return mocks.system_api_get_other_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
new_json = deepcopy(json_data)
new_json['defaultCalendar'] = None
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(
original.default_calendar, changed.default_calendar)
def test_sync_skips(self):
self._sync(self.fixture)
new_json = deepcopy(self.fixture[0])
new_json['defaultCalendar'] = None
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
class TestSLASynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.SLASynchronizer
model_class = models.SlaTracker
fixture = fixtures.API_SERVICE_SLA_LIST
def setUp(self):
fixture_utils.init_calendars()
def call_api(self, return_data):
return mocks.service_api_get_slas_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
name = 'A Different SLA'
new_json = deepcopy(json_data)
new_json['name'] = name
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.name, name)
self._assert_fields(changed, new_json)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.respond_hours, json_data['respondHours'])
self.assertEqual(instance.plan_within, json_data['planWithin'])
self.assertEqual(instance.resolution_hours,
json_data['resolutionHours'])
class TestSLAPrioritySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.SLAPrioritySynchronizer
model_class = models.SlaPriorityTracker
fixture = fixtures.API_SERVICE_SLA_PRIORITY_LIST
def setUp(self):
fixture_utils.init_calendars()
fixture_utils.init_slas()
fixture_utils.init_priorities()
def call_api(self, return_data):
return mocks.service_api_get_sla_priorities_call(return_data)
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
respond_hours = 500
new_json = deepcopy(json_data)
new_json['respondHours'] = respond_hours
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.respond_hours, respond_hours)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
respond_hours = 500
new_json = deepcopy(self.fixture[0])
new_json['respondHours'] = respond_hours
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = \
self._sync_with_results(new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.respond_hours, json_data['respondHours'])
self.assertEqual(instance.plan_within, json_data['planWithin'])
self.assertEqual(instance.resolution_hours,
json_data['resolutionHours'])
class TestTicketSynchronizerMixin(AssertSyncMixin):
model_class = models.TicketTracker
def setUp(self):
super().setUp()
mocks.system_api_get_members_call(fixtures.API_MEMBER_LIST)
mocks.system_api_get_member_image_by_photo_id_call(
(mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar()))
def _clean(self):
models.Ticket.objects.all().delete()
def _init_data(self):
self._clean()
fixture_utils.init_holiday_lists()
fixture_utils.init_calendars()
fixture_utils.init_slas()
fixture_utils.init_board_statuses()
fixture_utils.init_teams()
fixture_utils.init_types()
fixture_utils.init_subtypes()
fixture_utils.init_items()
def test_sync_ticket(self):
"""
Test to ensure ticket synchronizer saves a CW Ticket instance
locally.
"""
synchronizer = self.sync_class()
synchronizer.sync()
self.assertGreater(models.Ticket.objects.all().count(), 0)
json_data = self.ticket_fixture
instance = models.Ticket.objects.get(id=json_data['id'])
self._assert_sync(instance, json_data)
self.assert_sync_job()
def test_sync_ticket_truncates_automatic_cc_field(self):
"""
Test to ensure ticket synchronizer truncates the automatic CC field
to 1000 characters.
"""
synchronizer = self.sync_class()
instance = models.Ticket()
field_data = "kanban@kanban.com;"
for i in range(6):
# Make some field data with length 1152
field_data = field_data + field_data
json_data = deepcopy(self.ticket_fixture)
json_data['automaticEmailCc'] = field_data
instance = synchronizer._assign_field_data(instance, json_data)
self.assertEqual(len(instance.automatic_email_cc), 1000)
def test_sync_child_tickets(self):
"""
Test to ensure that a ticket will sync related objects,
in its case schedule, note, and time entries
"""
fixture_utils.init_schedule_entries()
fixture_utils.init_service_notes()
synchronizer = self.sync_class()
ticket = models.Ticket.objects.get(id=self.ticket_fixture['id'])
# Change some fields on all child objects
updated_fixture = deepcopy(fixtures.API_SERVICE_NOTE_LIST[0])
updated_fixture['ticketId'] = ticket.id
updated_fixture['text'] = 'Some new text'
fixture_list = [updated_fixture]
method_name = 'djconnectwise.api.ServiceAPIClient.get_notes'
mock_call, _patch = mocks.create_mock_call(method_name, fixture_list)
_, _task_patch = mocks.create_mock_call(
"djconnectwise.sync.TicketTaskSynchronizer.sync_tasks",
None
)
updated_fixture = deepcopy(fixtures.API_TIME_ENTRY)
updated_fixture['chargeToId'] = ticket.id
updated_fixture['text'] = 'Some new text'
updated_fixture['timeEnd'] = '2005-05-16T15:00:00Z'
fixture_list = [updated_fixture]
method_name = 'djconnectwise.api.TimeAPIClient.get_time_entries'
mock_call, _patch = mocks.create_mock_call(method_name, fixture_list)
updated_fixture = deepcopy(fixtures.API_SALES_ACTIVITY)
fixture_list = [updated_fixture]
method_name = 'djconnectwise.api.SalesAPIClient.get_activities'
mock_call, _patch = mocks.create_mock_call(method_name, fixture_list)
method_name = 'djconnectwise.api.TicketAPIMixin.get_ticket'
mock_call, _patch = \
mocks.create_mock_call(method_name, self.ticket_fixture)
# Trigger method called on callback
synchronizer.fetch_sync_by_id(ticket.id)
# Get the new Values from the db
updated_note = models.ServiceNote.objects.filter(ticket=ticket)[0]
updated_time = models.TimeEntry.objects.filter(charge_to_id=ticket)[0]
_task_patch.stop()
# Confirm that they have all been updated
self.assertEqual('Some new text', updated_note.text)
self.assertEqual(
datetime.datetime(
2005, 5, 16, 15, 0, tzinfo=datetime.timezone.utc),
updated_time.time_end
)
def test_sync_updated(self):
updated_ticket_fixture = deepcopy(self.ticket_fixture)
updated_ticket_fixture['summary'] = 'A new kind of summary'
fixture_list = [updated_ticket_fixture]
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = mocks.create_mock_call(method_name, fixture_list)
synchronizer = self.sync_class()
created_count, updated_count, _, _ = synchronizer.sync()
self.assertEqual(created_count, 0)
self.assertEqual(updated_count, len(fixture_list))
instance = models.Ticket.objects.get(id=updated_ticket_fixture['id'])
self._assert_sync(instance, updated_ticket_fixture)
def test_sync_skips(self):
# Update the ticket to know it skips it the second time
updated_ticket_fixture = deepcopy(self.ticket_fixture)
updated_ticket_fixture['summary'] = 'A new kind of summary'
fixture_list = [updated_ticket_fixture]
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
_, _patch = mocks.create_mock_call(method_name, fixture_list)
synchronizer = self.sync_class()
# Synchronizer is called twice, as we are testing that when
# synchronizers are called twice it skips the record if no change is
# detected.
synchronizer.sync()
created_count, _, skipped_count, _ = synchronizer.sync()
self.assertEqual(created_count, 0)
self.assertEqual(skipped_count, len(fixture_list))
instance = models.Ticket.objects.get(id=updated_ticket_fixture['id'])
self._assert_sync(instance, updated_ticket_fixture)
def test_sync_multiple_status_batches(self):
sync.MAX_URL_LENGTH = 330
sync.MIN_URL_LENGTH = 320
self._init_data()
fixture_utils.init_tickets()
updated_ticket_fixture = deepcopy(self.ticket_fixture)
updated_ticket_fixture['summary'] = 'A new kind of summary'
fixture_list = [updated_ticket_fixture]
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = mocks.create_mock_call(method_name, fixture_list)
synchronizer = sync.ServiceTicketSynchronizer()
synchronizer.batch_condition_list.extend(
[234234, 345345, 234213, 2344523, 345645]
)
created_count, updated_count, _, _ = synchronizer.sync()
self.assertEqual(mock_call.call_count, 2)
def test_delete_stale_tickets(self):
"""Local ticket should be deleted if omitted from sync"""
ticket_id = self.ticket_fixture['id']
ticket_qset = models.Ticket.objects.filter(id=ticket_id)
self.assertEqual(ticket_qset.count(), 1)
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = mocks.create_mock_call(method_name, [])
synchronizer = self.sync_class(full=True)
synchronizer.sync()
self.assertEqual(ticket_qset.count(), 0)
_patch.stop()
def test_callback_sync_time_entry(self):
# Sync initial time entry
mocks.time_api_get_time_entries_call(fixtures.API_TIME_ENTRY_LIST)
time_entry_sync = sync.TimeEntrySynchronizer()
time_entry_sync.sync()
self.assertGreater(
models.SyncJob.objects.filter(entity_name='TimeEntry').count(), 0
)
# Mock the child class syncs
mocks.service_api_get_notes_call(fixtures.API_SERVICE_NOTE_LIST)
mocks.sales_api_get_activities_call(fixtures.API_SALES_ACTIVITIES)
_, _task_patch = mocks.create_mock_call(
"djconnectwise.sync.TicketTaskSynchronizer.sync_tasks",
None
)
method_name = 'djconnectwise.sync.TicketSynchronizerMixin.get_single'
mock_call, _patch = \
mocks.create_mock_call(method_name, self.ticket_fixture)
# Create new time entry to sync
new_time_entry = deepcopy(fixtures.API_TIME_ENTRY)
new_time_entry['id'] = 3
mocks.time_api_get_time_entries_call([new_time_entry,
fixtures.API_TIME_ENTRY])
ticket_id = self.ticket_fixture['id']
synchronizer = self.sync_class()
# Simulate ticket getting updated by a callback
synchronizer.fetch_sync_by_id(ticket_id)
# Verify that no time entries are removed,
# and that only one entry is added
last_sync_job = \
models.SyncJob.objects.filter(entity_name='TimeEntry').last()
_task_patch.stop()
self.assertEqual(last_sync_job.deleted, 0)
self.assertEqual(last_sync_job.updated, 0)
self.assertEqual(last_sync_job.added, 1)
self.assertEqual(last_sync_job.sync_type, 'partial')
class TestServiceTicketSynchronizer(TestTicketSynchronizerMixin, TestCase):
sync_class = sync.ServiceTicketSynchronizer
ticket_fixture = fixtures.API_SERVICE_TICKET
def setUp(self):
super().setUp()
self._init_data()
fixture_utils.init_tickets()
def _assert_sync(self, instance, json_data):
self.assertEqual(instance.summary, json_data['summary'])
self.assertEqual(instance.closed_flag, json_data.get('closedFlag'))
self.assertEqual(instance.entered_date_utc,
parse(json_data.get('_info').get('dateEntered')))
self.assertEqual(instance.last_updated_utc,
parse(json_data.get('_info').get('lastUpdated')))
self.assertEqual(instance.required_date_utc,
parse(json_data.get('requiredDate')))
self.assertEqual(instance.resources, json_data.get('resources'))
self.assertEqual(instance.budget_hours, json_data.get('budgetHours'))
self.assertEqual(instance.actual_hours, json_data.get('actualHours'))
self.assertEqual(instance.record_type, json_data.get('recordType'))
self.assertEqual(instance.parent_ticket_id,
json_data.get('parentTicketId'))
self.assertEqual(instance.has_child_ticket,
json_data.get('hasChildTicket'))
self.assertEqual(instance.has_child_ticket,
json_data.get('hasChildTicket'))
# verify assigned team
self.assertEqual(instance.team_id, json_data['team']['id'])
# verify assigned board
self.assertEqual(instance.board_id, json_data['board']['id'])
# verify assigned company
self.assertEqual(instance.company_id, json_data['company']['id'])
# verify assigned priority
self.assertEqual(instance.priority_id, json_data['priority']['id'])
# verify assigned location
self.assertEqual(instance.location_id,
json_data['serviceLocation']['id'])
# verify assigned status
self.assertEqual(instance.status_id,
json_data['status']['id'])
# verify assigned type
self.assertEqual(instance.type_id, json_data['type']['id'])
# verify assigned type
self.assertEqual(instance.sub_type_id, json_data['subType']['id'])
# verify assigned type
self.assertEqual(instance.sub_type_item_id, json_data['item']['id'])
self.assertEqual(instance.bill_time, json_data['billTime'])
self.assertEqual(instance.automatic_email_cc_flag,
json_data['automaticEmailCcFlag'])
self.assertEqual(instance.automatic_email_contact_flag,
json_data['automaticEmailContactFlag'])
self.assertEqual(instance.automatic_email_resource_flag,
json_data['automaticEmailResourceFlag'])
self.assertEqual(instance.automatic_email_cc,
json_data['automaticEmailCc'])
self.assertEqual(instance.agreement, json_data['agreement'])
def test_project_tickets_not_deleted_during_sync(self):
"""
Verify that during a sync of service tickets, no project tickets are
removed.
"""
synchronizer = self.sync_class(full=True)
synchronizer.sync()
self.assertTrue(
models.Ticket.objects.get(id=self.ticket_fixture['id']))
project_ticket = models.Ticket.objects.create(
summary='Project ticket',
record_type='ProjectTicket'
)
project_ticket.save()
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = mocks.create_mock_call(method_name, [])
synchronizer = self.sync_class(full=True)
synchronizer.sync()
# Verify that the service ticket has been removed and project ticket
# still exists.
self.assertEqual(
models.Ticket.objects.get(id=project_ticket.id), project_ticket)
self.assertFalse(
models.Ticket.objects.filter(
id=self.ticket_fixture['id']).exists())
def test_callback_sync_service_note(self):
# Sync initial service note
mocks.service_api_get_notes_call(fixtures.API_SERVICE_NOTE_LIST)
service_note_sync = sync.ServiceNoteSynchronizer()
service_note_sync.sync()
self.assertGreater(
models.SyncJob.objects.filter(entity_name='ServiceNote').count(), 0
)
# Mock the child class syncs
mocks.time_api_get_time_entries_call(fixtures.API_TIME_ENTRY_LIST)
mocks.sales_api_get_activities_call(fixtures.API_SALES_ACTIVITIES)
_, _task_patch = mocks.create_mock_call(
"djconnectwise.sync.TicketTaskSynchronizer.sync_tasks",
None
)
method_name = 'djconnectwise.sync.TicketSynchronizerMixin.get_single'
mock_call, _patch = \
mocks.create_mock_call(method_name, self.ticket_fixture)
# Create new service note to sync
new_service_note = deepcopy(fixtures.API_SERVICE_NOTE_LIST[0])
new_service_note['id'] = self.ticket_fixture['id']
mocks.service_api_get_notes_call(
[new_service_note, fixtures.API_SERVICE_NOTE_LIST[0]]
)
ticket_id = self.ticket_fixture['id']
synchronizer = self.sync_class()
# Simulate ticket getting updated by a callback
synchronizer.fetch_sync_by_id(ticket_id)
# Verify that no notes are removed, and that only one note is added
last_sync_job = \
models.SyncJob.objects.filter(entity_name='ServiceNote').last()
_task_patch.stop()
self.assertEqual(last_sync_job.deleted, 0)
self.assertEqual(last_sync_job.updated, 0)
self.assertEqual(last_sync_job.added, 1)
self.assertEqual(last_sync_job.sync_type, 'partial')
class TestProjectTicketSynchronizer(TestTicketSynchronizerMixin, TestCase):
sync_class = sync.ProjectTicketSynchronizer
ticket_fixture = fixtures.API_PROJECT_TICKET
def setUp(self):
super().setUp()
mocks.project_api_tickets_call()
self._init_data()
fixture_utils.init_schedule_statuses()
fixture_utils.init_schedule_types()
fixture_utils.init_project_tickets()
def _assert_sync(self, instance, json_data):
self.assertEqual(instance.summary, json_data['summary'])
self.assertEqual(instance.closed_flag, json_data.get('closedFlag'))
self.assertEqual(instance.last_updated_utc,
parse(json_data.get('_info').get('lastUpdated')))
self.assertEqual(instance.required_date_utc,
parse(json_data.get('requiredDate')))
self.assertEqual(instance.resources, json_data.get('resources'))
self.assertEqual(instance.budget_hours, json_data.get('budgetHours'))
self.assertEqual(instance.actual_hours, json_data.get('actualHours'))
# verify assigned board
self.assertEqual(instance.board_id, json_data['board']['id'])
# verify assigned company
self.assertEqual(instance.company_id, json_data['company']['id'])
# verify assigned priority
self.assertEqual(instance.priority_id, json_data['priority']['id'])
# verify assigned location
self.assertEqual(instance.location_id,
json_data['serviceLocation']['id'])
# verify assigned project
self.assertEqual(instance.project_id,
json_data['project']['id'])
# verify assigned status
self.assertEqual(instance.status_id,
json_data['status']['id'])
# verify assigned type
self.assertEqual(instance.type_id, json_data['type']['id'])
# verify assigned subtype
self.assertEqual(instance.sub_type_id, json_data['subType']['id'])
# verify assigned subtype item
self.assertEqual(instance.sub_type_item_id, json_data['item']['id'])
self.assertEqual(instance.bill_time, json_data['billTime'])
self.assertEqual(instance.automatic_email_cc_flag,
json_data['automaticEmailCcFlag'])
self.assertEqual(instance.automatic_email_contact_flag,
json_data['automaticEmailContactFlag'])
self.assertEqual(instance.automatic_email_resource_flag,
json_data['automaticEmailResourceFlag'])
self.assertEqual(instance.automatic_email_cc,
json_data['automaticEmailCc'])
self.assertEqual(instance.agreement, json_data['agreement'])
def test_service_tickets_not_deleted_during_sync(self):
"""
Verify that during a sync of project tickets, no service tickets are
removed.
"""
synchronizer = self.sync_class(full=True)
synchronizer.sync()
self.assertTrue(
models.Ticket.objects.get(id=self.ticket_fixture['id']))
service_ticket = models.Ticket.objects.create(
summary='Service ticket',
record_type='ServiceTicket'
)
service_ticket.save()
method_name = 'djconnectwise.api.TicketAPIMixin.get_tickets'
mock_call, _patch = mocks.create_mock_call(method_name, [])
synchronizer = self.sync_class(full=True)
synchronizer.sync()
# Verify that the project ticket has been removed and service ticket
# still exists.
self.assertEqual(
models.Ticket.objects.get(id=service_ticket.id), service_ticket)
self.assertFalse(
models.Ticket.objects.filter(
id=self.ticket_fixture['id']).exists())
class TestActivityStatusSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ActivityStatusSynchronizer
model_class = models.ActivityStatusTracker
fixture = fixtures.API_SALES_ACTIVITY_STATUSES
def call_api(self, return_data):
return mocks.sales_api_get_activities_statuses_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
self.assertEqual(
instance.spawn_followup_flag,
json_data.get('spawnFollowupFlag', False)
)
self.assertEqual(instance.closed_flag, json_data['closedFlag'])
class TestActivityTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ActivityTypeSynchronizer
model_class = models.ActivityTypeTracker
fixture = fixtures.API_SALES_ACTIVITY_TYPES
def call_api(self, return_data):
return mocks.sales_api_get_activities_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.default_flag, json_data['defaultFlag'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
self.assertEqual(instance.email_flag, json_data['emailFlag'])
self.assertEqual(instance.memo_flag, json_data['memoFlag'])
self.assertEqual(instance.history_flag, json_data['historyFlag'])
class TestActivitySynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ActivitySynchronizer
model_class = models.ActivityTracker
fixture = fixtures.API_SALES_ACTIVITIES
def setUp(self):
super().setUp()
fixture_utils.init_work_roles()
fixture_utils.init_work_types()
mocks.system_api_get_member_image_by_photo_id_call(
(mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar()))
fixture_utils.init_members()
fixture_utils.init_territories()
fixture_utils.init_company_statuses()
fixture_utils.init_company_types()
fixture_utils.init_companies()
fixture_utils.init_contacts()
fixture_utils.init_agreements()
fixture_utils.init_sales_probabilities()
fixture_utils.init_opportunity_types()
fixture_utils.init_opportunity_stages()
fixture_utils.init_opportunity_statuses()
fixture_utils.init_opportunities()
fixture_utils.init_agreements()
fixture_utils.init_activity_statuses()
fixture_utils.init_activity_types()
fixture_utils.init_activities()
def call_api(self, return_data):
return mocks.sales_api_get_activities_call(return_data)
def _get_datetime(self, instance, date_field):
date_field = instance.get(date_field)
if date_field:
date_field = parse(date_field, default=parse('00:00Z'))
return date_field
def _assert_fields(self, activity, api_activity):
self.assertEqual(activity.name, api_activity['name'])
self.assertEqual(activity.notes, api_activity['notes'])
self.assertEqual(activity.date_start,
self._get_datetime(api_activity, 'dateStart')
)
self.assertEqual(activity.date_end,
self._get_datetime(api_activity, 'dateEnd')
)
self.assertEqual(activity.assign_to_id, api_activity['assignTo']['id'])
self.assertEqual(activity.opportunity_id,
api_activity['opportunity']['id'])
if api_activity['ticket'] is not None:
self.assertEqual(activity.ticket_id, api_activity['ticket']['id'])
self.assertEqual(
activity.status_id, api_activity['status']['id']
)
self.assertEqual(
activity.type_id, api_activity['type']['id']
)
self.assertEqual(
activity.company_id, api_activity['company']['id']
)
self.assertEqual(
activity.agreement_id, api_activity['agreement']['id']
)
def test_sync_null_member_activity(self):
null_member_activity = deepcopy(fixtures.API_SALES_ACTIVITY)
null_member_activity['id'] = 999
null_member_activity['assignTo'] = {'id': 99999} # Member that does
# not exist
activity_list = [null_member_activity]
method_name = 'djconnectwise.api.SalesAPIClient.get_activities'
mock_call, _patch = \
mocks.create_mock_call(method_name, activity_list)
synchronizer = sync.ActivitySynchronizer(full=True)
created_count, updated_count, skipped_count, deleted_count = \
synchronizer.sync()
# The existing Activity (#47) should be deleted since it is not
# returned from when the sync is run.
self.assertEqual(created_count, 0)
self.assertEqual(updated_count, 0)
self.assertEqual(deleted_count, 1)
def test_sync_activity_null_assign_to(self):
"""
Verify that an activity with a null 'assignTo' field is skipped.
"""
null_assign_to_activity = deepcopy(fixtures.API_SALES_ACTIVITY)
null_assign_to_activity['id'] = 888
null_assign_to_activity['assignTo'] = None
activity_list = [null_assign_to_activity]
method_name = 'djconnectwise.api.SalesAPIClient.get_activities'
mock_call, _patch = \
mocks.create_mock_call(method_name, activity_list)
synchronizer = sync.ActivitySynchronizer(full=True)
created_count, updated_count, skipped_count, deleted_count = \
synchronizer.sync()
self.assertRaises(InvalidObjectException)
self.assertEqual(created_count, 0)
self.assertEqual(updated_count, 0)
class TestSyncTicketTasks(TestCase):
def setUp(self):
self.ticket = models.Ticket()
self.ticket.save()
def tearDown(self):
self.ticket.delete()
def test_sync_tasks(self):
mocks.create_mock_call(
"djconnectwise.sync.ServiceTicketTaskSynchronizer.get",
[
{'closed_flag': False},
{'closed_flag': True},
{'closed_flag': False}
]
)
self.assertIsNone(self.ticket.tasks_total)
self.assertIsNone(self.ticket.tasks_completed)
synchronizer = sync.ServiceTicketTaskSynchronizer()
synchronizer.sync_tasks(self.ticket)
self.assertEqual(3, self.ticket.tasks_total)
self.assertEqual(1, self.ticket.tasks_completed)
class TestSyncSettings(TestCase):
def test_default_batch_size(self):
synchronizer = sync.BoardSynchronizer()
self.assertEqual(synchronizer.batch_size, 50)
def test_dynamic_batch_size(self):
method_name = 'djconnectwise.utils.DjconnectwiseSettings.get_settings'
request_settings = {
'batch_size': 10,
'timeout': 10.0,
}
_, _patch = mocks.create_mock_call(method_name, request_settings)
synchronizer = sync.BoardSynchronizer()
self.assertEqual(synchronizer.batch_size,
request_settings['batch_size'])
_patch.stop()
class MockSynchronizer:
error_message = 'One heck of an error'
model_class = models.TicketTracker
full = False
@log_sync_job
def sync(self):
return 1, 2, 3, 4
@log_sync_job
def sync_with_error(self):
raise ValueError(self.error_message)
class TestSyncJob(TestCase):
def setUp(self):
self.synchronizer = MockSynchronizer()
def assert_sync_job(self, created, updated, skipped, deleted, message,
success):
sync_job = models.SyncJob.objects.all().last()
self.assertEqual(created, sync_job.added)
self.assertEqual(updated, sync_job.updated)
self.assertEqual(skipped, sync_job.skipped)
self.assertEqual(deleted, sync_job.deleted)
self.assertEqual(self.synchronizer.model_class.__bases__[0].__name__,
sync_job.entity_name)
self.assertEqual(message, sync_job.message)
self.assertEqual(sync_job.success, success)
self.assertEqual(sync_job.sync_type, "partial")
def test_sync_successful(self):
created, updated, skipped, deleted = self.synchronizer.sync()
self.assert_sync_job(created, updated, skipped, deleted, None, True)
def test_sync_failed(self):
try:
self.synchronizer.sync_with_error()
except Exception:
pass
self.assert_sync_job(
0, 0, 0, 0, self.synchronizer.error_message, False)
class TestTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.TypeSynchronizer
model_class = models.TypeTracker
fixture = fixtures.API_TYPE_LIST
def setUp(self):
super().setUp()
fixture_utils.init_types()
fixture_utils.init_boards()
def call_api(self, return_data):
return mocks.service_api_get_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.board.name, json_data['board']['name'])
class TestSubTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.SubTypeSynchronizer
model_class = models.SubTypeTracker
fixture = fixtures.API_SUBTYPE_LIST
def setUp(self):
super().setUp()
fixture_utils.init_subtypes()
fixture_utils.init_boards()
def call_api(self, return_data):
return mocks.service_api_get_subtypes_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.board.name, json_data['board']['name'])
class TestItemSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ItemSynchronizer
model_class = models.ItemTracker
fixture = fixtures.API_ITEM_LIST
def setUp(self):
super().setUp()
fixture_utils.init_items()
fixture_utils.init_boards()
def call_api(self, return_data):
return mocks.service_api_get_items_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.board.name, json_data['board']['name'])
class TestWorkTypeSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.WorkTypeSynchronizer
model_class = models.WorkTypeTracker
fixture = fixtures.API_WORK_TYPE_LIST
def setUp(self):
super().setUp()
fixture_utils.init_work_types()
def call_api(self, return_data):
return mocks.time_api_get_work_types_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
class TestWorkRoleSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.WorkRoleSynchronizer
model_class = models.WorkRoleTracker
fixture = fixtures.API_WORK_ROLE_LIST
def setUp(self):
super().setUp()
fixture_utils.init_work_roles()
def call_api(self, return_data):
return mocks.time_api_get_work_roles_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.inactive_flag, json_data['inactiveFlag'])
class TestAgreementSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.AgreementSynchronizer
model_class = models.AgreementTracker
fixture = fixtures.API_AGREEMENT_LIST
def setUp(self):
super().setUp()
fixture_utils.init_work_roles()
fixture_utils.init_work_types()
fixture_utils.init_territories()
fixture_utils.init_company_statuses()
fixture_utils.init_company_types()
fixture_utils.init_companies()
def call_api(self, return_data):
return mocks.finance_api_get_agreements_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
self.assertEqual(instance.name, json_data['name'])
self.assertEqual(instance.bill_time, json_data['billTime'])
self.assertEqual(instance.agreement_type, json_data['type']['name'])
self.assertEqual(instance.cancelled_flag, json_data['cancelledFlag'])
self.assertEqual(
instance.work_role.name, json_data['workRole']['name'])
self.assertEqual(
instance.work_type.name, json_data['workType']['name'])
self.assertEqual(instance.company.name, json_data['company']['name'])
class TestProjectTeamMemberSynchronizer(TestCase, SynchronizerTestMixin):
synchronizer_class = sync.ProjectTeamMemberSynchronizer
model_class = models.ProjectTeamMemberTracker
fixture = fixtures.API_PROJECT_TEAM_MEMBER_LIST
def setUp(self):
super().setUp()
mocks.system_api_get_member_image_by_photo_id_call(
(mocks.CW_MEMBER_IMAGE_FILENAME, mocks.get_member_avatar()))
fixture_utils.init_members()
fixture_utils.init_work_roles()
fixture_utils.init_project_statuses()
fixture_utils.init_boards()
fixture_utils.init_projects()
fixture_utils.init_project_team_members()
def call_api(self, return_data):
return mocks.project_api_get_team_members_call(return_data)
def _assert_fields(self, instance, json_data):
self.assertEqual(instance.id, json_data['id'])
def test_sync_update(self):
self._sync(self.fixture)
json_data = self.fixture[0]
instance_id = json_data['id']
original = self.model_class.objects.get(id=instance_id)
end_date = '2020-10-15T08:00:00Z'
new_json = deepcopy(self.fixture[0])
new_json['endDate'] = end_date
new_json_list = [new_json]
self._sync(new_json_list)
changed = self.model_class.objects.get(id=instance_id)
self.assertNotEqual(original.end_date, end_date)
self._assert_fields(changed, new_json)
def test_sync_skips(self):
self._sync(self.fixture)
end_date = '2020-10-15T08:00:00Z'
new_json = deepcopy(self.fixture[0])
new_json['endDate'] = end_date
new_json_list = [new_json]
# Sync it twice to be sure that the data will be updated, then ignored
self._sync(new_json_list)
_, updated_count, skipped_count, _ = self._sync_with_results(
new_json_list)
self.assertEqual(skipped_count, 1)
self.assertEqual(updated_count, 0)
|
{
"content_hash": "562c2e1593b28c5631654d7cea8b9723",
"timestamp": "",
"source": "github",
"line_count": 2238,
"max_line_length": 79,
"avg_line_length": 37.23547810545129,
"alnum_prop": 0.6457825831303325,
"repo_name": "KerkhoffTechnologies/django-connectwise",
"id": "8ab81a6c3bd613e18f720b5644c5d06ac80267db",
"size": "83333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djconnectwise/tests/test_sync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "113"
},
{
"name": "Makefile",
"bytes": "944"
},
{
"name": "Python",
"bytes": "740231"
}
],
"symlink_target": ""
}
|
"""PhysicsRoller is for wheels, soccer balls, billiard balls, and other things that roll."""
|
{
"content_hash": "86bb2e4a92bb7d4941514c52a734f86e",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 92,
"avg_line_length": 47,
"alnum_prop": 0.7446808510638298,
"repo_name": "tobspr/panda3d",
"id": "c0510dfc69879e370fa22877ae6c0798060eef87",
"size": "94",
"binary": false,
"copies": "23",
"ref": "refs/heads/master",
"path": "direct/src/controls/PhysicsRoller.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4004"
},
{
"name": "C",
"bytes": "6724918"
},
{
"name": "C++",
"bytes": "25480688"
},
{
"name": "Emacs Lisp",
"bytes": "229264"
},
{
"name": "Groff",
"bytes": "3106"
},
{
"name": "HTML",
"bytes": "8081"
},
{
"name": "Java",
"bytes": "3113"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Logos",
"bytes": "5504"
},
{
"name": "MAXScript",
"bytes": "1745"
},
{
"name": "NSIS",
"bytes": "92320"
},
{
"name": "Nemerle",
"bytes": "4403"
},
{
"name": "Objective-C",
"bytes": "28865"
},
{
"name": "Objective-C++",
"bytes": "257446"
},
{
"name": "Perl",
"bytes": "206982"
},
{
"name": "Perl6",
"bytes": "30484"
},
{
"name": "Puppet",
"bytes": "2627"
},
{
"name": "Python",
"bytes": "5537773"
},
{
"name": "R",
"bytes": "421"
},
{
"name": "Shell",
"bytes": "55940"
},
{
"name": "Visual Basic",
"bytes": "136"
}
],
"symlink_target": ""
}
|
from importlib import import_module
from django.conf import settings
from orchestra.core.errors import InvalidSlugValue
from orchestra.core.errors import SlugUniquenessError
class Workflow():
"""
Workflows represent execution graphs of human and machine steps.
Attributes:
slug (str):
Unique identifier for the workflow.
name (str):
Human-readable name for the workflow.
description (str):
A longer description of the workflow.
steps (dict):
Steps comprising the workflow.
"""
def __init__(self,
**kwargs):
self.slug = kwargs.get('slug')
if len(self.slug) > 200:
raise InvalidSlugValue('Slug value should be less than 200 chars')
self.name = kwargs.get('name')
self.description = kwargs.get('description')
self.steps = {}
def add_step(self, step):
"""
Add `step` to the workflow.
Args:
step (orchestra.workflow.Step):
The step to be added.
Returns:
None
Raises:
orchestra.core.errors.InvalidSlugValue:
Step slug should have fewer than 200 characters.
orchestra.core.errors.SlugUniquenessError:
Step slug has already been used in this workflow.
"""
if len(step.slug) > 200:
raise InvalidSlugValue('Slug value should be less than 200 chars')
if step.slug in self.steps:
raise SlugUniquenessError('Slug value already taken')
self.steps[step.slug] = step
def get_steps(self):
"""
Return all steps for the workflow.
Args:
None
Returns:
steps ([orchestra.workflow.Step]):
List of steps for the workflow.
"""
return self.steps.values()
def get_step_slugs(self):
"""
Return all step slugs for the workflow.
Args:
None
Returns:
slugs ([str]):
List of step slugs for the workflow.
"""
return self.steps.keys()
def get_step(self, slug):
"""
Return the specified step from the workflow.
Args:
slug (str):
The slug of the desired step.
Returns:
step (orchestra.workflow.Step):
The specified step from the workflow.
"""
return self.steps[slug]
def get_human_steps(self):
"""
Return steps from the workflow with a human `worker_type`.
Args:
None
Returns:
steps ([orchestra.workflow.Step]):
Steps from the workflow with a human `worker_type`..
"""
return [step for slug, step in self.steps.items()
if step.worker_type == Step.WorkerType.HUMAN]
def __str__(self):
return self.slug
def __unicode__(self):
return self.slug
class Step():
"""
Steps represent nodes on a workflow execution graph.
Attributes:
slug (str):
Unique identifier for the step.
name (str):
Human-readable name for the step.
description (str):
A longer description of the step.
worker_type (orchestra.workflow.Step.WorkerType):
Indicates whether the policy is for a human or machine.
creation_depends_on ([str]):
Slugs for steps on which this step's creation depends.
submission_depends_on ([str]):
Slugs for steps on which this step's submission depends.
function (function):
Function to execute during step. Should be present only for
machine tasks
required_certifications ([str]):
Slugs for certifications required for a worker to pick up
tasks based on this step.
"""
class WorkerType:
"""Specifies whether step is performed by human or machine"""
HUMAN = 0
MACHINE = 1
def __init__(self,
**kwargs):
self.slug = kwargs.get('slug')
self.name = kwargs.get('name')
self.description = kwargs.get('description')
self.worker_type = kwargs.get('worker_type')
self.creation_depends_on = kwargs.get('creation_depends_on') or []
self.submission_depends_on = kwargs.get('submission_depends_on') or []
self.function = kwargs.get('function')
self.required_certifications = kwargs.get(
'required_certifications') or []
# Example: {'policy': 'previously_completed_steps', 'step': ['design']}
self.assignment_policy = (kwargs.get('assignment_policy')
or get_default_policy(self.worker_type,
'assignment_policy'))
# Example: {'policy': 'sampled_review', 'rate': .25, 'max_reviews': 2}
self.review_policy = (kwargs.get('review_policy')
or get_default_policy(self.worker_type,
'review_policy'))
# Example: {'html_blob': 'http://some_url',
# 'javascript_includes': [url1, url2],
# 'css_includes': [url1, url2]}
self.user_interface = kwargs.get('user_interface') or {}
def __str__(self):
return self.slug
def __unicode__(self):
return self.slug
def get_workflows():
"""
Return all stored workflows.
Args:
None
Returns:
workflows ([orchestra.workflow.Workflow]):
A dict of all workflows keyed by slug.
"""
workflows = {}
for backend_module, variable in settings.ORCHESTRA_PATHS:
backend_module = import_module(backend_module)
workflow = getattr(backend_module, variable)
if workflow.slug in workflows:
raise SlugUniquenessError('Repeated slug value for workflows.')
workflows[workflow.slug] = workflow
return workflows
def get_workflow_by_slug(slug):
"""
Return the workflow specified by `slug`.
Args:
slug (str):
The slug of the desired workflow.
Returns:
workflow (orchestra.workflow.Workflow):
The corresponding workflow object.
"""
return get_workflows()[slug]
def get_workflow_choices():
"""
Return workflow data formatted as `choices` for a model field.
Args:
None
Returns:
workflow_choices (tuple):
A tuple of tuples containing each workflow slug and
human-readable name.
"""
workflows = get_workflows()
choices = []
for slug, workflow in workflows.items():
choices.append((slug, workflow.name))
return tuple(choices)
def get_step_choices():
"""
Return step data formatted as `choices` for a model field.
Args:
None
Returns:
step_choices (tuple):
A tuple of tuples containing each step slug and
human-readable name.
"""
choices = []
for slug, workflow in iter(get_workflows().items()):
for step in workflow.get_steps():
choices.append((step.slug, step.name))
return tuple(choices)
def get_default_policy(worker_type, policy_name):
"""
Return the default value for a specified policy.
Args:
worker_type (orchestra.workflow.Step.WorkerType):
Indicates whether the policy is for a human or machine.
policy_name (str):
The specified policy identifier.
Returns:
default_policy (dict):
A dict containing the default policy for the worker type and
policy name specified.
"""
default_policies = {
'assignment_policy': {'policy': 'anyone_certified'},
'review_policy': {'policy': 'sampled_review',
'rate': 1,
'max_reviews': 1}
}
if worker_type == Step.WorkerType.HUMAN:
return default_policies[policy_name]
else:
return {}
|
{
"content_hash": "fc6a5d062612850bd2cbad51f6f2c183",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 79,
"avg_line_length": 29.067857142857143,
"alnum_prop": 0.5666543801449809,
"repo_name": "Sonblind/orchestra",
"id": "f233d8ad02a67af81312753cdaf905a85f5a3971",
"size": "8139",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orchestra/workflow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "76414"
},
{
"name": "HTML",
"bytes": "57292"
},
{
"name": "JavaScript",
"bytes": "234470"
},
{
"name": "Makefile",
"bytes": "826"
},
{
"name": "Python",
"bytes": "310246"
}
],
"symlink_target": ""
}
|
import pytz
from datetime import datetime
def empty_str(in_str):
"""
Simple helper to return True if the passed
string reference is None or '' or all whitespace
"""
if in_str is not None and not isinstance(in_str, basestring):
raise TypeError('Arg must be None or a string type')
return in_str is None or \
len(in_str.strip()) == 0
def to_naive_utc_dt(dt):
"""
Converts a datetime to a naive datetime (no tzinfo)
as follows:
if inbound dt is already naive, it just returns it
if inbound is timezone aware, converts it to UTC,
then strips the tzinfo
"""
if not isinstance(dt, datetime):
raise TypeError('Arg must be type datetime')
if dt.tzinfo is None:
return dt
return dt.astimezone(pytz.utc).replace(tzinfo=None)
def to_aware_utc_dt(dt):
"""
Convert an inbound datetime into a timezone
aware datetime in UTC as follows:
if inbound is naive, uses 'tzinfo.localize' to
add utc tzinfo. NOTE: Timevalues are not changed,
only difference in tzinfo is added to identify this
as a UTC tz aware object.
if inbound is aware, uses 'datetime.astimezone'
to convert timevalues to UTC and set tzinfo to
utc.
"""
if not isinstance(dt, datetime):
raise TypeError('Arg must be type datetime')
if dt.tzinfo is None:
return pytz.utc.localize(dt)
return dt.astimezone(pytz.utc)
def timedelta_as_minutes(td):
"""
Returns the value of the entire timedelta as
integer minutes, rounded down
"""
return timedelta_as_seconds(td) / 60
def timedelta_as_seconds(td):
'''
Returns the value of the entire timedelta as
integer seconds, rounded down
'''
return td.days * 86400 + td.seconds
|
{
"content_hash": "e33670012c30c69b6605f58ccabe75ce",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 65,
"avg_line_length": 23.272727272727273,
"alnum_prop": 0.6590401785714286,
"repo_name": "eHealthAfrica/rapidsms",
"id": "38fea37365eb6c6638090e6a326a0b0debb603e3",
"size": "1859",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "rapidsms/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "27100"
},
{
"name": "JavaScript",
"bytes": "16887"
},
{
"name": "Python",
"bytes": "350060"
},
{
"name": "Shell",
"bytes": "5100"
}
],
"symlink_target": ""
}
|
"""
__init__.py
:copyright: (c) 2015 by Fulfil.IO Inc.
:license: see LICENSE for details.
"""
from trytond.pool import Pool
from party import Address, PaymentProfile, Party
from transaction import PaymentGatewayStripe, PaymentTransactionStripe, \
AddPaymentProfile
def register():
Pool.register(
Address,
PaymentProfile,
PaymentGatewayStripe,
PaymentTransactionStripe,
Party,
module='payment_gateway_stripe', type_='model'
)
Pool.register(
AddPaymentProfile,
module='payment_gateway_stripe', type_='wizard'
)
|
{
"content_hash": "0a3819f54593bce56084cf51a11fdc02",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 73,
"avg_line_length": 24.32,
"alnum_prop": 0.6595394736842105,
"repo_name": "prakashpp/trytond-payment-gateway-stripe",
"id": "582652e8e6df9b45e64130bc55586689dfc3650b",
"size": "632",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "45389"
}
],
"symlink_target": ""
}
|
import mock
from neutron.agent.common import base_polling
from neutron.agent.linux import polling
from neutron.tests import base
class TestGetPollingManager(base.BaseTestCase):
def test_return_always_poll_by_default(self):
with polling.get_polling_manager() as pm:
self.assertEqual(pm.__class__, base_polling.AlwaysPoll)
def test_manage_polling_minimizer(self):
mock_target = 'neutron.agent.linux.polling.InterfacePollingMinimizer'
with mock.patch('%s.start' % mock_target) as mock_start:
with mock.patch('%s.stop' % mock_target) as mock_stop:
with polling.get_polling_manager(minimize_polling=True) as pm:
self.assertEqual(pm.__class__,
polling.InterfacePollingMinimizer)
mock_stop.assert_has_calls([mock.call()])
mock_start.assert_has_calls([mock.call()])
class TestInterfacePollingMinimizer(base.BaseTestCase):
def setUp(self):
super(TestInterfacePollingMinimizer, self).setUp()
self.pm = polling.InterfacePollingMinimizer()
def test_start_calls_monitor_start(self):
with mock.patch.object(self.pm._monitor, 'start') as mock_start:
self.pm.start()
mock_start.assert_called_with(block=True)
def test_stop_calls_monitor_stop(self):
with mock.patch.object(self.pm._monitor, 'stop') as mock_stop:
self.pm.stop()
mock_stop.assert_called_with()
def mock_has_updates(self, return_value):
target = ('neutron.agent.linux.ovsdb_monitor.SimpleInterfaceMonitor'
'.has_updates')
return mock.patch(
target,
new_callable=mock.PropertyMock(return_value=return_value),
)
def test__is_polling_required_returns_when_updates_are_present(self):
with self.mock_has_updates(True):
self.assertTrue(self.pm._is_polling_required())
|
{
"content_hash": "f992d0f02031c50f44d5bb5a5cc407c8",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 78,
"avg_line_length": 38.35294117647059,
"alnum_prop": 0.647239263803681,
"repo_name": "igor-toga/local-snat",
"id": "8ae677e20541ce56b96f4c3a37e78378aa59e562",
"size": "2562",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/agent/linux/test_polling.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "9636936"
},
{
"name": "Shell",
"bytes": "14072"
}
],
"symlink_target": ""
}
|
"""
Multiple SNMP engines
+++++++++++++++++++++
Send multiple SNMP GET requests to multiple peers using multiple
independent SNMP engines. Deal with peers asynchronously. SNMP options
are:
* with SNMPv1, community 'public' and
with SNMPv2c, community 'public' and
with SNMPv3, user 'usr-md5-des', MD5 auth and DES privacy
* over IPv4/UDP and
over IPv6/UDP
* to an Agent at demo.snmplabs.com:161 and
to an Agent at [::1]:161
* for instances of SNMPv2-MIB::sysDescr.0 and
SNMPv2-MIB::sysLocation.0 MIB objects
Within this script we have a single asynchronous TransportDispatcher
and a single UDP-based transport serving two independent SNMP engines.
We use a single instance of AsyncCommandGenerator with each of
SNMP Engines to comunicate GET command request to remote systems.
When we receive a [response] message from remote system we use
a custom message router to choose what of the two SNMP engines
data packet should be handed over. The selection criteria we
employ here is based on peer's UDP port number. Other selection
criterias are also possible.
"""#
from pysnmp.hlapi.v3arch.asyncore import *
from pysnmp.carrier.asyncore.dispatch import AsyncoreDispatcher
# List of targets in the following format:
# ( ( authData, transportTarget, varNames ), ... )
TARGETS = (
# 1-st target (SNMPv1 over IPv4/UDP)
(CommunityData('public', mpModel=0),
UdpTransportTarget(('demo.snmplabs.com', 161)),
(ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)),
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysLocation', 0)))),
# 2-nd target (SNMPv2c over IPv4/UDP)
(CommunityData('public'),
UdpTransportTarget(('demo.snmplabs.com', 1161)),
(ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)),
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysLocation', 0)))),
# 3-nd target (SNMPv3 over IPv4/UDP)
(UsmUserData('usr-md5-des', 'authkey1', 'privkey1'),
UdpTransportTarget(('demo.snmplabs.com', 2161)),
(ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysDescr', 0)),
ObjectType(ObjectIdentity('SNMPv2-MIB', 'sysLocation', 0))))
# N-th target
# ...
)
# Wait for responses or errors
# noinspection PyUnusedLocal,PyUnusedLocal
def cbFun(snmpEngine, sendRequestHandle, errorIndication,
errorStatus, errorIndex, varBinds, cbCtx):
authData, transportTarget = cbCtx
print('snmpEngine %s: %s via %s' % (snmpEngine.snmpEngineID.prettyPrint(), authData, transportTarget))
if errorIndication:
print(errorIndication)
return True
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
return True
else:
for varBind in varBinds:
print(' = '.join([x.prettyPrint() for x in varBind]))
# Instantiate the single transport dispatcher object
transportDispatcher = AsyncoreDispatcher()
# Setup a custom data routing function to select snmpEngine by transportDomain
transportDispatcher.registerRoutingCbFun(
lambda td, ta, d: ta[1] % 3 and 'A' or 'B'
)
snmpEngineA = SnmpEngine()
snmpEngineA.registerTransportDispatcher(transportDispatcher, 'A')
snmpEngineB = SnmpEngine()
snmpEngineB.registerTransportDispatcher(transportDispatcher, 'B')
for authData, transportTarget, varBinds in TARGETS:
snmpEngine = (transportTarget.getTransportInfo()[1][1] % 3 and
snmpEngineA or snmpEngineB)
getCmd(snmpEngine, authData, transportTarget, ContextData(), *varBinds,
cbFun=cbFun, cbCtx=(authData, transportTarget))
transportDispatcher.runDispatcher()
|
{
"content_hash": "abf3c5feabe156fdc493b6291974490c",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 106,
"avg_line_length": 35.25242718446602,
"alnum_prop": 0.7135775268521068,
"repo_name": "etingof/pysnmp",
"id": "fc2143d488c5172628a06452a4627928ed2dd33e",
"size": "3631",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/hlapi/v3arch/asyncore/manager/cmdgen/query-multiple-snmp-engines-over-ipv4-and-ipv6.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "1453555"
},
{
"name": "Shell",
"bytes": "1312"
}
],
"symlink_target": ""
}
|
from functools import total_ordering
import itertools
import re
all_modules = []
@total_ordering
class Module(object):
"""
A module is the basic abstraction in our test runner script. Each module consists of a set of
source files, a set of test commands, and a set of dependencies on other modules. We use modules
to define a dependency graph that lets determine which tests to run based on which files have
changed.
"""
def __init__(self, name, dependencies, source_file_regexes, build_profile_flags=(), environ={},
sbt_test_goals=(), python_test_goals=(), blacklisted_python_implementations=(),
test_tags=(), should_run_r_tests=False, should_run_build_tests=False):
"""
Define a new module.
:param name: A short module name, for display in logging and error messages.
:param dependencies: A set of dependencies for this module. This should only include direct
dependencies; transitive dependencies are resolved automatically.
:param source_file_regexes: a set of regexes that match source files belonging to this
module. These regexes are applied by attempting to match at the beginning of the
filename strings.
:param build_profile_flags: A set of profile flags that should be passed to Maven or SBT in
order to build and test this module (e.g. '-PprofileName').
:param environ: A dict of environment variables that should be set when files in this
module are changed.
:param sbt_test_goals: A set of SBT test goals for testing this module.
:param python_test_goals: A set of Python test goals for testing this module.
:param blacklisted_python_implementations: A set of Python implementations that are not
supported by this module's Python components. The values in this set should match
strings returned by Python's `platform.python_implementation()`.
:param test_tags A set of tags that will be excluded when running unit tests if the module
is not explicitly changed.
:param should_run_r_tests: If true, changes in this module will trigger all R tests.
:param should_run_build_tests: If true, changes in this module will trigger build tests.
"""
self.name = name
self.dependencies = dependencies
self.source_file_prefixes = source_file_regexes
self.sbt_test_goals = sbt_test_goals
self.build_profile_flags = build_profile_flags
self.environ = environ
self.python_test_goals = python_test_goals
self.blacklisted_python_implementations = blacklisted_python_implementations
self.test_tags = test_tags
self.should_run_r_tests = should_run_r_tests
self.should_run_build_tests = should_run_build_tests
self.dependent_modules = set()
for dep in dependencies:
dep.dependent_modules.add(self)
all_modules.append(self)
def contains_file(self, filename):
return any(re.match(p, filename) for p in self.source_file_prefixes)
def __repr__(self):
return "Module<%s>" % self.name
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.name == other.name
def __ne__(self, other):
return not (self.name == other.name)
def __hash__(self):
return hash(self.name)
tags = Module(
name="tags",
dependencies=[],
source_file_regexes=[
"common/tags/",
]
)
catalyst = Module(
name="catalyst",
dependencies=[tags],
source_file_regexes=[
"sql/catalyst/",
],
sbt_test_goals=[
"catalyst/test",
],
)
sql = Module(
name="sql",
dependencies=[catalyst],
source_file_regexes=[
"sql/core/",
],
sbt_test_goals=[
"sql/test",
],
)
hive = Module(
name="hive",
dependencies=[sql],
source_file_regexes=[
"sql/hive/",
"bin/spark-sql",
],
build_profile_flags=[
"-Phive",
],
sbt_test_goals=[
"hive/test",
],
test_tags=[
"org.apache.spark.tags.ExtendedHiveTest"
]
)
hive_thriftserver = Module(
name="hive-thriftserver",
dependencies=[hive],
source_file_regexes=[
"sql/hive-thriftserver",
"sbin/start-thriftserver.sh",
],
build_profile_flags=[
"-Phive-thriftserver",
],
sbt_test_goals=[
"hive-thriftserver/test",
]
)
sql_kafka = Module(
name="sql-kafka-0-10",
dependencies=[sql],
source_file_regexes=[
"external/kafka-0-10-sql",
],
sbt_test_goals=[
"sql-kafka-0-10/test",
]
)
sketch = Module(
name="sketch",
dependencies=[tags],
source_file_regexes=[
"common/sketch/",
],
sbt_test_goals=[
"sketch/test"
]
)
graphx = Module(
name="graphx",
dependencies=[tags],
source_file_regexes=[
"graphx/",
],
sbt_test_goals=[
"graphx/test"
]
)
streaming = Module(
name="streaming",
dependencies=[tags],
source_file_regexes=[
"streaming",
],
sbt_test_goals=[
"streaming/test",
]
)
# Don't set the dependencies because changes in other modules should not trigger Kinesis tests.
# Kinesis tests depends on external Amazon kinesis service. We should run these tests only when
# files in streaming_kinesis_asl are changed, so that if Kinesis experiences an outage, we don't
# fail other PRs.
streaming_kinesis_asl = Module(
name="streaming-kinesis-asl",
dependencies=[tags],
source_file_regexes=[
"external/kinesis-asl/",
"external/kinesis-asl-assembly/",
],
build_profile_flags=[
"-Pkinesis-asl",
],
environ={
"ENABLE_KINESIS_TESTS": "1"
},
sbt_test_goals=[
"streaming-kinesis-asl/test",
]
)
streaming_kafka = Module(
name="streaming-kafka-0-8",
dependencies=[streaming],
source_file_regexes=[
"external/kafka-0-8",
"external/kafka-0-8-assembly",
],
sbt_test_goals=[
"streaming-kafka-0-8/test",
]
)
streaming_kafka_0_10 = Module(
name="streaming-kafka-0-10",
dependencies=[streaming],
source_file_regexes=[
# The ending "/" is necessary otherwise it will include "sql-kafka" codes
"external/kafka-0-10/",
"external/kafka-0-10-assembly",
],
sbt_test_goals=[
"streaming-kafka-0-10/test",
]
)
streaming_flume_sink = Module(
name="streaming-flume-sink",
dependencies=[streaming],
source_file_regexes=[
"external/flume-sink",
],
sbt_test_goals=[
"streaming-flume-sink/test",
]
)
streaming_flume = Module(
name="streaming-flume",
dependencies=[streaming],
source_file_regexes=[
"external/flume",
],
sbt_test_goals=[
"streaming-flume/test",
]
)
streaming_flume_assembly = Module(
name="streaming-flume-assembly",
dependencies=[streaming_flume, streaming_flume_sink],
source_file_regexes=[
"external/flume-assembly",
]
)
mllib_local = Module(
name="mllib-local",
dependencies=[tags],
source_file_regexes=[
"mllib-local",
],
sbt_test_goals=[
"mllib-local/test",
]
)
mllib = Module(
name="mllib",
dependencies=[mllib_local, streaming, sql],
source_file_regexes=[
"data/mllib/",
"mllib/",
],
sbt_test_goals=[
"mllib/test",
]
)
examples = Module(
name="examples",
dependencies=[graphx, mllib, streaming, hive],
source_file_regexes=[
"examples/",
],
sbt_test_goals=[
"examples/test",
]
)
pyspark_core = Module(
name="pyspark-core",
dependencies=[],
source_file_regexes=[
"python/(?!pyspark/(ml|mllib|sql|streaming))"
],
python_test_goals=[
"pyspark.rdd",
"pyspark.context",
"pyspark.conf",
"pyspark.broadcast",
"pyspark.accumulators",
"pyspark.serializers",
"pyspark.profiler",
"pyspark.shuffle",
"pyspark.tests",
]
)
pyspark_sql = Module(
name="pyspark-sql",
dependencies=[pyspark_core, hive],
source_file_regexes=[
"python/pyspark/sql"
],
python_test_goals=[
"pyspark.sql.types",
"pyspark.sql.context",
"pyspark.sql.session",
"pyspark.sql.conf",
"pyspark.sql.catalog",
"pyspark.sql.column",
"pyspark.sql.dataframe",
"pyspark.sql.group",
"pyspark.sql.functions",
"pyspark.sql.readwriter",
"pyspark.sql.streaming",
"pyspark.sql.window",
"pyspark.sql.tests",
]
)
pyspark_streaming = Module(
name="pyspark-streaming",
dependencies=[
pyspark_core,
streaming,
streaming_kafka,
streaming_flume_assembly,
streaming_kinesis_asl
],
source_file_regexes=[
"python/pyspark/streaming"
],
python_test_goals=[
"pyspark.streaming.util",
"pyspark.streaming.tests",
]
)
pyspark_mllib = Module(
name="pyspark-mllib",
dependencies=[pyspark_core, pyspark_streaming, pyspark_sql, mllib],
source_file_regexes=[
"python/pyspark/mllib"
],
python_test_goals=[
"pyspark.mllib.classification",
"pyspark.mllib.clustering",
"pyspark.mllib.evaluation",
"pyspark.mllib.feature",
"pyspark.mllib.fpm",
"pyspark.mllib.linalg.__init__",
"pyspark.mllib.linalg.distributed",
"pyspark.mllib.random",
"pyspark.mllib.recommendation",
"pyspark.mllib.regression",
"pyspark.mllib.stat._statistics",
"pyspark.mllib.stat.KernelDensity",
"pyspark.mllib.tree",
"pyspark.mllib.util",
"pyspark.mllib.tests",
],
blacklisted_python_implementations=[
"PyPy" # Skip these tests under PyPy since they require numpy and it isn't available there
]
)
pyspark_ml = Module(
name="pyspark-ml",
dependencies=[pyspark_core, pyspark_mllib],
source_file_regexes=[
"python/pyspark/ml/"
],
python_test_goals=[
"pyspark.ml.feature",
"pyspark.ml.classification",
"pyspark.ml.clustering",
"pyspark.ml.linalg.__init__",
"pyspark.ml.recommendation",
"pyspark.ml.regression",
"pyspark.ml.tuning",
"pyspark.ml.tests",
"pyspark.ml.evaluation",
],
blacklisted_python_implementations=[
"PyPy" # Skip these tests under PyPy since they require numpy and it isn't available there
]
)
sparkr = Module(
name="sparkr",
dependencies=[hive, mllib],
source_file_regexes=[
"R/",
],
should_run_r_tests=True
)
docs = Module(
name="docs",
dependencies=[],
source_file_regexes=[
"docs/",
]
)
build = Module(
name="build",
dependencies=[],
source_file_regexes=[
".*pom.xml",
"dev/test-dependencies.sh",
],
should_run_build_tests=True
)
yarn = Module(
name="yarn",
dependencies=[],
source_file_regexes=[
"resource-managers/yarn/",
"common/network-yarn/",
],
build_profile_flags=["-Pyarn"],
sbt_test_goals=[
"yarn/test",
"network-yarn/test",
],
test_tags=[
"org.apache.spark.tags.ExtendedYarnTest"
]
)
mesos = Module(
name="mesos",
dependencies=[],
source_file_regexes=["resource-managers/mesos/"],
build_profile_flags=["-Pmesos"],
sbt_test_goals=["mesos/test"]
)
# The root module is a dummy module which is used to run all of the tests.
# No other modules should directly depend on this module.
root = Module(
name="root",
dependencies=[build], # Changes to build should trigger all tests.
source_file_regexes=[],
# In order to run all of the tests, enable every test profile:
build_profile_flags=list(set(
itertools.chain.from_iterable(m.build_profile_flags for m in all_modules))),
sbt_test_goals=[
"test",
],
python_test_goals=list(itertools.chain.from_iterable(m.python_test_goals for m in all_modules)),
should_run_r_tests=True,
should_run_build_tests=True
)
|
{
"content_hash": "7fdbcef951d791d861a61cdc6158b692",
"timestamp": "",
"source": "github",
"line_count": 492,
"max_line_length": 100,
"avg_line_length": 25.107723577235774,
"alnum_prop": 0.6060066380636283,
"repo_name": "sachintyagi22/spark",
"id": "10ad1fe3aa2c67610ac6ad743584703c70720d8b",
"size": "13138",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dev/sparktestsupport/modules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "32169"
},
{
"name": "Batchfile",
"bytes": "24063"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23922"
},
{
"name": "HTML",
"bytes": "8781"
},
{
"name": "Java",
"bytes": "2827518"
},
{
"name": "JavaScript",
"bytes": "134354"
},
{
"name": "Makefile",
"bytes": "7774"
},
{
"name": "PLpgSQL",
"bytes": "3817"
},
{
"name": "PowerShell",
"bytes": "3751"
},
{
"name": "Python",
"bytes": "2139302"
},
{
"name": "R",
"bytes": "939531"
},
{
"name": "Roff",
"bytes": "14302"
},
{
"name": "SQLPL",
"bytes": "6233"
},
{
"name": "Scala",
"bytes": "21978635"
},
{
"name": "Shell",
"bytes": "154403"
},
{
"name": "Thrift",
"bytes": "33605"
}
],
"symlink_target": ""
}
|
import logging
from distutils.util import strtobool
from configobj import ConfigObj
from fastapp.models import Base, Setting, Apy
from fastapp.utils import Connection
from StringIO import StringIO
logger = logging.getLogger(__name__)
def _read_config(app_config):
# read app.config
#app_config_file = StringIO()
#app_config_file.write(app_config)
appconfig = ConfigObj(app_config)
#app_config_file.close()
return appconfig
def _handle_settings(settings, base_obj, override_public=False, override_private=False):
"""
dict with settings (k, v)
"""
# get settings
print settings.items()
for k, v in settings.items():
setting_obj, created = Setting.objects.get_or_create(base=base_obj, key=k)
# set if empty
if not setting_obj.value:
setting_obj.value = v['value']
# override_public
if setting_obj.public and override_public:
setting_obj.value = v['value']
# override_private
if not setting_obj.public and override_private:
setting_obj.value = v['value']
setting_obj.public = strtobool(v['public'])
setting_obj.save()
def _handle_apy(filename, content, base_obj, appconfig):
name = filename.replace(".py", "")
apy, created = Apy.objects.get_or_create(base=base_obj, name=name)
apy.module = content
description = appconfig['modules'][name]['description']
if description:
apy.description = description
public = appconfig['modules'][name].get('public', None)
if public:
apy.public = strtobool(public)
apy.save()
def import_base(zf, user_obj, name, override_public, override_private):
base, created = Base.objects.get_or_create(user=user_obj, name=name)
if not created:
logger.info("base '%s' did already exist" % name)
base.save()
# Dropbox connection
try:
dropbox_connection = Connection(base.auth_token)
except Exception:
pass
# app.config
print zf.open("app.config")
appconfig = _read_config(zf.open("app.config"))
# Apy
for apy in appconfig['modules'].keys():
filename = apy+".py"
apy_content = zf.open(filename).read()
_handle_apy(filename, apy_content, base, appconfig)
# settings
settings = appconfig['settings']
_handle_settings(settings, base)
filelist = zf.namelist()
for filename in filelist:
# ignore files starting with '__'
if filename.startswith('__'):
continue
# static
logger.info("staticfile: "+filename)
content = zf.open(filename).read()
if filename == "index.html":
base.content = content
base.save()
if "static" in filename:
file = "/%s/%s" % (base.name, filename)
dropbox_connection.put_file(filename, content)
return base
|
{
"content_hash": "56daef90a3daba14608ee5498eb4ced2",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 88,
"avg_line_length": 30.46315789473684,
"alnum_prop": 0.6285418106427091,
"repo_name": "sahlinet/fastapp",
"id": "dc4f4ea9c8104c794ac554c4b5bf139847fc7c1b",
"size": "2894",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fastapp/importer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20437"
},
{
"name": "HTML",
"bytes": "56635"
},
{
"name": "JavaScript",
"bytes": "662602"
},
{
"name": "Python",
"bytes": "500209"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
from ..models import model_from_config, save_json
from ..hyperopt import Node, CMAES
from ..model_selection import CV
from .fit import fit
def obj_fn(s, x, y):
try:
model = model_from_config(config=s)
result = CV(method='split', test_size=0.2).run(
model=model,
x=x,
y=y,
scoring='rmse')['rmse']['mean']
except:
result = np.nan
return result
def optimization(search_space: Node,
x, y,
x0: list = None,
cmaes_params: dict = None,
output_dir: str = '',
refit_best_model: bool = True,
best_model_fit_kwargs: dict = None,
save_to_json: bool = False,
features: list = None,
max_threads: int = 1):
"""
Hyperparameters optimization and/or feature selection
:param search_space: search space variables definition
:param x: input data for optimization process
:param y: target values for optimization process
:param x0: optional list of initial guess of minimum solution. Values from 0 to 1.
Default value is [1] * search_space.size
:param cmaes_params: optional parameters for CMAES algorithm
:param output_dir: output directory for the output files
:param refit_best_model:
:param best_model_fit_kwargs:
:param save_to_json:
:param features:
:param max_threads:
:return: best model configuration
"""
if search_space.size == 0:
raise ValueError('empty search space')
if x0 is not None:
if len(x0) != search_space.size:
raise ValueError('invalid x0. It must contains {} elements'.format(search_space.size))
opt = CMAES(
verb_filenameprefix=os.path.join(output_dir, 'cmaes/out_'),
**(cmaes_params or {})
)
result = opt.fmin(
search_space=search_space,
x0=x0,
obj_func=obj_fn,
args=(x, y),
max_threads=max_threads
)
s = search_space.get_value(result[0])
model = model_from_config(config=s)
model.name += '_best'
if refit_best_model:
fit(model=model, x=x, y=y, **(best_model_fit_kwargs or {}))
if features and hasattr(model, 'get_support'):
sup = getattr(model, 'get_support')()
if sup:
features = list(np.array(features)[sup])
res = {
'model': {
'class_name': model.__class__.__name__,
'config': model.get_config()
},
'best_fit_func': result[1],
'best_x': list(result[0]),
'best_features': features
}
if save_to_json:
save_json(res, os.path.join(output_dir, '{}_opt.json'.format(model.name)))
return res, model
|
{
"content_hash": "e542be0cf5a8026aa5e4a93248f755f1",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 98,
"avg_line_length": 27.792079207920793,
"alnum_prop": 0.571072319201995,
"repo_name": "rafaeltg/pydl",
"id": "c0dda605f90df7e3c6bf5b950c759c856beb1a03",
"size": "2807",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pydl/scripts/optimization.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84942"
}
],
"symlink_target": ""
}
|
import RPi.GPIO as GPIO, feedparser, time
DEBUG = 1
USERNAME = "tachijuan" # just the part before the @ sign, add yours here
PASSWORD = ""
NEWMAIL_OFFSET = 1 # my unread messages never goes to zero, yours might
MAIL_CHECK_FREQ = 60 # check mail every 60 seconds
GPIO.setmode(GPIO.BOARD)
GREEN_LED = 7
RED_LED = 22
GPIO.setup(GREEN_LED, GPIO.OUT)
GPIO.setup(RED_LED, GPIO.OUT)
while True:
newmails = int(feedparser.parse("https://" + USERNAME + ":" + PASSWORD +"@mail.google.com/gmail/feed/atom")["feed"]["fullcount"])
if DEBUG:
print "You have", newmails, "new emails!"
if newmails > NEWMAIL_OFFSET:
GPIO.output(GREEN_LED, True)
GPIO.output(RED_LED, False)
else:
GPIO.output(GREEN_LED, False)
GPIO.output(RED_LED, True)
time.sleep(MAIL_CHECK_FREQ)
|
{
"content_hash": "197facf3a9b1981ccc35e1b8d85dd6c1",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 137,
"avg_line_length": 28.967741935483872,
"alnum_prop": 0.6035634743875279,
"repo_name": "tachijuan/python",
"id": "35b6f317e2e3176dd60929563c440f8ee7fedb03",
"size": "921",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mail.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1088"
},
{
"name": "Python",
"bytes": "65208"
}
],
"symlink_target": ""
}
|
import py, os, cffi, re
import _cffi_backend
def getlines():
try:
f = open(os.path.join(os.path.dirname(cffi.__file__),
'..', 'c', 'commontypes.c'))
except IOError:
py.test.skip("cannot find ../c/commontypes.c")
lines = [line for line in f.readlines() if line.strip().startswith('EQ(')]
f.close()
return lines
def test_alphabetical_order():
lines = getlines()
assert lines == sorted(lines)
def test_dependencies():
r = re.compile(r'EQ[(]"([^"]+)",(?:\s*"([A-Z0-9_]+)\s*[*]*"[)])?')
lines = getlines()
d = {}
for line in lines:
match = r.search(line)
if match is not None:
d[match.group(1)] = match.group(2)
for value in d.values():
if value:
assert value in d
def test_get_common_types():
d = {}
_cffi_backend._get_common_types(d)
assert d["bool"] == "_Bool"
|
{
"content_hash": "1c88ff65ce7d34450b0d3a0ea9d630c0",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 28,
"alnum_prop": 0.5157563025210085,
"repo_name": "hipnusleo/laserjet",
"id": "923568d4daf2ea6e7dbc24bb2cfba1afcb1e2567",
"size": "952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resource/pypi/cffi-1.9.1/testing/cffi1/test_commontypes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3096"
},
{
"name": "Batchfile",
"bytes": "13184"
},
{
"name": "C",
"bytes": "672858"
},
{
"name": "C++",
"bytes": "9678"
},
{
"name": "Go",
"bytes": "6671"
},
{
"name": "HTML",
"bytes": "850945"
},
{
"name": "Java",
"bytes": "14456"
},
{
"name": "Makefile",
"bytes": "14373"
},
{
"name": "Python",
"bytes": "5156663"
}
],
"symlink_target": ""
}
|
from fabric.api import run, settings, env
from cloudferrylib.base.action import action
from cloudferrylib.utils import forward_agent
from cloudferrylib.utils import utils as utl
INSTANCES = 'instances'
class ConvertFile(action.Action):
def run(self, info=None, **kwargs):
cfg = self.cloud.cloud_config.cloud
image_res = self.cloud.resources[utl.IMAGE_RESOURCE]
if image_res.config.image.convert_to_raw:
return {}
for instance_id, instance in info[utl.INSTANCES_TYPE].iteritems():
image_id = \
info[INSTANCES][instance_id][utl.INSTANCE_BODY]['image_id']
images = image_res.get_image_by_id_converted(image_id=image_id)
image = images[utl.IMAGES_TYPE][image_id]
disk_format = image[utl.IMAGE_BODY]['disk_format']
base_file = "%s/%s" % (cfg.temp, "temp%s_base" % instance_id)
if disk_format.lower() != utl.RAW:
self.convert_file_to_raw(cfg.host, disk_format, base_file)
return {}
@staticmethod
def convert_file_to_raw(host, disk_format, filepath):
with settings(host_string=host,
connection_attempts=env.connection_attempts):
with forward_agent(env.key_filename):
run("qemu-img convert -f %s -O raw %s %s.tmp" %
(disk_format, filepath, filepath))
run("mv -f %s.tmp %s" % (filepath, filepath))
|
{
"content_hash": "d2ab2c7237e14596a4d2529314b1c60c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 75,
"avg_line_length": 44.09090909090909,
"alnum_prop": 0.6109965635738832,
"repo_name": "japaniel/CloudFerry",
"id": "f86582dd81b004c774f3dd8deb3a4a7a4f8853d1",
"size": "1455",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cloudferrylib/os/actions/convert_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2615"
},
{
"name": "Python",
"bytes": "971902"
},
{
"name": "Ruby",
"bytes": "2695"
},
{
"name": "Shell",
"bytes": "25415"
}
],
"symlink_target": ""
}
|
"""
mistune
~~~~~~~
The fastest markdown parser in pure Python with renderer feature.
:copyright: (c) 2014 - 2017 by Hsiaoming Yang.
"""
import re
import inspect
__version__ = '0.7.4'
__author__ = 'Hsiaoming Yang <me@lepture.com>'
__all__ = [
'BlockGrammar', 'BlockLexer',
'InlineGrammar', 'InlineLexer',
'Renderer', 'Markdown',
'markdown', 'escape',
]
_key_pattern = re.compile(r'\s+')
_nonalpha_pattern = re.compile(r'\W')
_escape_pattern = re.compile(r'&(?!#?\w+;)')
_newline_pattern = re.compile(r'\r\n|\r')
_block_quote_leading_pattern = re.compile(r'^ *> ?', flags=re.M)
_block_code_leading_pattern = re.compile(r'^ {4}', re.M)
_inline_tags = [
'a', 'em', 'strong', 'small', 's', 'cite', 'q', 'dfn', 'abbr', 'data',
'time', 'code', 'var', 'samp', 'kbd', 'sub', 'sup', 'i', 'b', 'u', 'mark',
'ruby', 'rt', 'rp', 'bdi', 'bdo', 'span', 'br', 'wbr', 'ins', 'del',
'img', 'font',
]
_pre_tags = ['pre', 'script', 'style']
_valid_end = r'(?!:/|[^\w\s@]*@)\b'
_valid_attr = r'''\s*[a-zA-Z\-](?:\=(?:"[^"]*"|'[^']*'|[^\s'">]+))?'''
_block_tag = r'(?!(?:%s)\b)\w+%s' % ('|'.join(_inline_tags), _valid_end)
_scheme_blacklist = ('javascript:', 'vbscript:')
def _pure_pattern(regex):
pattern = regex.pattern
if pattern.startswith('^'):
pattern = pattern[1:]
return pattern
def _keyify(key):
return _key_pattern.sub(' ', key.lower())
def escape(text, quote=False, smart_amp=True):
"""Replace special characters "&", "<" and ">" to HTML-safe sequences.
The original cgi.escape will always escape "&", but you can control
this one for a smart escape amp.
:param quote: if set to True, " and ' will be escaped.
:param smart_amp: if set to False, & will always be escaped.
"""
if smart_amp:
text = _escape_pattern.sub('&', text)
else:
text = text.replace('&', '&')
text = text.replace('<', '<')
text = text.replace('>', '>')
if quote:
text = text.replace('"', '"')
text = text.replace("'", ''')
return text
def escape_link(url):
"""Remove dangerous URL schemes like javascript: and escape afterwards."""
lower_url = url.lower().strip('\x00\x1a \n\r\t')
for scheme in _scheme_blacklist:
if lower_url.startswith(scheme):
return ''
return escape(url, quote=True, smart_amp=False)
def preprocessing(text, tab=4):
text = _newline_pattern.sub('\n', text)
text = text.expandtabs(tab)
text = text.replace('\u00a0', ' ')
text = text.replace('\u2424', '\n')
pattern = re.compile(r'^ +$', re.M)
return pattern.sub('', text)
class BlockGrammar(object):
"""Grammars for block level tokens."""
def_links = re.compile(
r'^ *\[([^^\]]+)\]: *' # [key]:
r'<?([^\s>]+)>?' # <link> or link
r'(?: +["(]([^\n]+)[")])? *(?:\n+|$)'
)
def_footnotes = re.compile(
r'^\[\^([^\]]+)\]: *('
r'[^\n]*(?:\n+|$)' # [^key]:
r'(?: {1,}[^\n]*(?:\n+|$))*'
r')'
)
newline = re.compile(r'^\n+')
block_code = re.compile(r'^( {4}[^\n]+\n*)+')
fences = re.compile(
r'^ *(`{3,}|~{3,}) *(\S+)? *\n' # ```lang
r'([\s\S]+?)\s*'
r'\1 *(?:\n+|$)' # ```
)
hrule = re.compile(r'^ {0,3}[-*_](?: *[-*_]){2,} *(?:\n+|$)')
heading = re.compile(r'^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)')
lheading = re.compile(r'^([^\n]+)\n *(=|-)+ *(?:\n+|$)')
block_quote = re.compile(r'^( *>[^\n]+(\n[^\n]+)*\n*)+')
list_block = re.compile(
r'^( *)([*+-]|\d+\.) [\s\S]+?'
r'(?:'
r'\n+(?=\1?(?:[-*_] *){3,}(?:\n+|$))' # hrule
r'|\n+(?=%s)' # def links
r'|\n+(?=%s)' # def footnotes
r'|\n{2,}'
r'(?! )'
r'(?!\1(?:[*+-]|\d+\.) )\n*'
r'|'
r'\s*$)' % (
_pure_pattern(def_links),
_pure_pattern(def_footnotes),
)
)
list_item = re.compile(
r'^(( *)(?:[*+-]|\d+\.) [^\n]*'
r'(?:\n(?!\2(?:[*+-]|\d+\.) )[^\n]*)*)',
flags=re.M
)
list_bullet = re.compile(r'^ *(?:[*+-]|\d+\.) +')
paragraph = re.compile(
r'^((?:[^\n]+\n?(?!'
r'%s|%s|%s|%s|%s|%s|%s|%s|%s'
r'))+)\n*' % (
_pure_pattern(fences).replace(r'\1', r'\2'),
_pure_pattern(list_block).replace(r'\1', r'\3'),
_pure_pattern(hrule),
_pure_pattern(heading),
_pure_pattern(lheading),
_pure_pattern(block_quote),
_pure_pattern(def_links),
_pure_pattern(def_footnotes),
'<' + _block_tag,
)
)
block_html = re.compile(
r'^ *(?:%s|%s|%s) *(?:\n{2,}|\s*$)' % (
r'<!--[\s\S]*?-->',
r'<(%s)((?:%s)*?)>([\s\S]*?)<\/\1>' % (_block_tag, _valid_attr),
r'<%s(?:%s)*?\s*\/?>' % (_block_tag, _valid_attr),
)
)
table = re.compile(
r'^ *\|(.+)\n *\|( *[-:]+[-| :]*)\n((?: *\|.*(?:\n|$))*)\n*'
)
nptable = re.compile(
r'^ *(\S.*\|.*)\n *([-:]+ *\|[-| :]*)\n((?:.*\|.*(?:\n|$))*)\n*'
)
text = re.compile(r'^[^\n]+')
class BlockLexer(object):
"""Block level lexer for block grammars."""
grammar_class = BlockGrammar
default_rules = [
'newline', 'hrule', 'block_code', 'fences', 'heading',
'nptable', 'lheading', 'block_quote',
'list_block', 'block_html', 'def_links',
'def_footnotes', 'table', 'paragraph', 'text'
]
list_rules = (
'newline', 'block_code', 'fences', 'lheading', 'hrule',
'block_quote', 'list_block', 'block_html', 'text',
)
footnote_rules = (
'newline', 'block_code', 'fences', 'heading',
'nptable', 'lheading', 'hrule', 'block_quote',
'list_block', 'block_html', 'table', 'paragraph', 'text'
)
def __init__(self, rules=None, **kwargs):
self.tokens = []
self.def_links = {}
self.def_footnotes = {}
if not rules:
rules = self.grammar_class()
self.rules = rules
def __call__(self, text, rules=None):
return self.parse(text, rules)
def parse(self, text, rules=None):
text = text.rstrip('\n')
if not rules:
rules = self.default_rules
def manipulate(text):
for key in rules:
rule = getattr(self.rules, key)
m = rule.match(text)
if not m:
continue
getattr(self, 'parse_%s' % key)(m)
return m
return False # pragma: no cover
while text:
m = manipulate(text)
if m is not False:
text = text[len(m.group(0)):]
continue
if text: # pragma: no cover
raise RuntimeError('Infinite loop at: %s' % text)
return self.tokens
def parse_newline(self, m):
length = len(m.group(0))
if length > 1:
self.tokens.append({'type': 'newline'})
def parse_block_code(self, m):
# clean leading whitespace
code = _block_code_leading_pattern.sub('', m.group(0))
self.tokens.append({
'type': 'code',
'lang': None,
'text': code,
})
def parse_fences(self, m):
self.tokens.append({
'type': 'code',
'lang': m.group(2),
'text': m.group(3),
})
def parse_heading(self, m):
self.tokens.append({
'type': 'heading',
'level': len(m.group(1)),
'text': m.group(2),
})
def parse_lheading(self, m):
"""Parse setext heading."""
self.tokens.append({
'type': 'heading',
'level': 1 if m.group(2) == '=' else 2,
'text': m.group(1),
})
def parse_hrule(self, m):
self.tokens.append({'type': 'hrule'})
def parse_list_block(self, m):
bull = m.group(2)
self.tokens.append({
'type': 'list_start',
'ordered': '.' in bull,
})
cap = m.group(0)
self._process_list_item(cap, bull)
self.tokens.append({'type': 'list_end'})
def _process_list_item(self, cap, bull):
cap = self.rules.list_item.findall(cap)
_next = False
length = len(cap)
for i in range(length):
item = cap[i][0]
# remove the bullet
space = len(item)
item = self.rules.list_bullet.sub('', item)
# outdent
if '\n ' in item:
space = space - len(item)
pattern = re.compile(r'^ {1,%d}' % space, flags=re.M)
item = pattern.sub('', item)
# determine whether item is loose or not
loose = _next
if not loose and re.search(r'\n\n(?!\s*$)', item):
loose = True
rest = len(item)
if i != length - 1 and rest:
_next = item[rest-1] == '\n'
if not loose:
loose = _next
if loose:
t = 'loose_item_start'
else:
t = 'list_item_start'
self.tokens.append({'type': t})
# recurse
self.parse(item, self.list_rules)
self.tokens.append({'type': 'list_item_end'})
def parse_block_quote(self, m):
self.tokens.append({'type': 'block_quote_start'})
# clean leading >
cap = _block_quote_leading_pattern.sub('', m.group(0))
self.parse(cap)
self.tokens.append({'type': 'block_quote_end'})
def parse_def_links(self, m):
key = _keyify(m.group(1))
self.def_links[key] = {
'link': m.group(2),
'title': m.group(3),
}
def parse_def_footnotes(self, m):
key = _keyify(m.group(1))
if key in self.def_footnotes:
# footnote is already defined
return
self.def_footnotes[key] = 0
self.tokens.append({
'type': 'footnote_start',
'key': key,
})
text = m.group(2)
if '\n' in text:
lines = text.split('\n')
whitespace = None
for line in lines[1:]:
space = len(line) - len(line.lstrip())
if space and (not whitespace or space < whitespace):
whitespace = space
newlines = [lines[0]]
for line in lines[1:]:
newlines.append(line[whitespace:])
text = '\n'.join(newlines)
self.parse(text, self.footnote_rules)
self.tokens.append({
'type': 'footnote_end',
'key': key,
})
def parse_table(self, m):
item = self._process_table(m)
cells = re.sub(r'(?: *\| *)?\n$', '', m.group(3))
cells = cells.split('\n')
for i, v in enumerate(cells):
v = re.sub(r'^ *\| *| *\| *$', '', v)
cells[i] = re.split(r' *\| *', v)
item['cells'] = cells
self.tokens.append(item)
def parse_nptable(self, m):
item = self._process_table(m)
cells = re.sub(r'\n$', '', m.group(3))
cells = cells.split('\n')
for i, v in enumerate(cells):
cells[i] = re.split(r' *\| *', v)
item['cells'] = cells
self.tokens.append(item)
def _process_table(self, m):
header = re.sub(r'^ *| *\| *$', '', m.group(1))
header = re.split(r' *\| *', header)
align = re.sub(r' *|\| *$', '', m.group(2))
align = re.split(r' *\| *', align)
for i, v in enumerate(align):
if re.search(r'^ *-+: *$', v):
align[i] = 'right'
elif re.search(r'^ *:-+: *$', v):
align[i] = 'center'
elif re.search(r'^ *:-+ *$', v):
align[i] = 'left'
else:
align[i] = None
item = {
'type': 'table',
'header': header,
'align': align,
}
return item
def parse_block_html(self, m):
tag = m.group(1)
if not tag:
text = m.group(0)
self.tokens.append({
'type': 'close_html',
'text': text
})
else:
attr = m.group(2)
text = m.group(3)
self.tokens.append({
'type': 'open_html',
'tag': tag,
'extra': attr,
'text': text
})
def parse_paragraph(self, m):
text = m.group(1).rstrip('\n')
self.tokens.append({'type': 'paragraph', 'text': text})
def parse_text(self, m):
text = m.group(0)
self.tokens.append({'type': 'text', 'text': text})
class InlineGrammar(object):
"""Grammars for inline level tokens."""
escape = re.compile(r'^\\([\\`*{}\[\]()#+\-.!_>~|])') # \* \+ \! ....
inline_html = re.compile(
r'^(?:%s|%s|%s)' % (
r'<!--[\s\S]*?-->',
r'<(\w+%s)((?:%s)*?)\s*>([\s\S]*?)<\/\1>' % (_valid_end, _valid_attr),
r'<\w+%s(?:%s)*?\s*\/?>' % (_valid_end, _valid_attr),
)
)
autolink = re.compile(r'^<([^ >]+(@|:)[^ >]+)>')
link = re.compile(
r'^!?\[('
r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*'
r')\]\('
r'''\s*(<)?([\s\S]*?)(?(2)>)(?:\s+['"]([\s\S]*?)['"])?\s*'''
r'\)'
)
reflink = re.compile(
r'^!?\[('
r'(?:\[[^^\]]*\]|[^\[\]]|\](?=[^\[]*\]))*'
r')\]\s*\[([^^\]]*)\]'
)
nolink = re.compile(r'^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]')
url = re.compile(r'''^(https?:\/\/[^\s<]+[^<.,:;"')\]\s])''')
double_emphasis = re.compile(
r'^_{2}([\s\S]+?)_{2}(?!_)' # __word__
r'|'
r'^\*{2}([\s\S]+?)\*{2}(?!\*)' # **word**
)
emphasis = re.compile(
r'^\b_((?:__|[^_])+?)_\b' # _word_
r'|'
r'^\*((?:\*\*|[^\*])+?)\*(?!\*)' # *word*
)
code = re.compile(r'^(`+)\s*([\s\S]*?[^`])\s*\1(?!`)') # `code`
linebreak = re.compile(r'^ {2,}\n(?!\s*$)')
strikethrough = re.compile(r'^~~(?=\S)([\s\S]*?\S)~~') # ~~word~~
footnote = re.compile(r'^\[\^([^\]]+)\]')
text = re.compile(r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| {2,}\n|$)')
def hard_wrap(self):
"""Grammar for hard wrap linebreak. You don't need to add two
spaces at the end of a line.
"""
self.linebreak = re.compile(r'^ *\n(?!\s*$)')
self.text = re.compile(
r'^[\s\S]+?(?=[\\<!\[_*`~]|https?://| *\n|$)'
)
class InlineLexer(object):
"""Inline level lexer for inline grammars."""
grammar_class = InlineGrammar
default_rules = [
'escape', 'inline_html', 'autolink', 'url',
'footnote', 'link', 'reflink', 'nolink',
'double_emphasis', 'emphasis', 'code',
'linebreak', 'strikethrough', 'text',
]
inline_html_rules = [
'escape', 'autolink', 'url', 'link', 'reflink',
'nolink', 'double_emphasis', 'emphasis', 'code',
'linebreak', 'strikethrough', 'text',
]
def __init__(self, renderer, rules=None, **kwargs):
self.renderer = renderer
self.links = {}
self.footnotes = {}
self.footnote_index = 0
if not rules:
rules = self.grammar_class()
kwargs.update(self.renderer.options)
if kwargs.get('hard_wrap'):
rules.hard_wrap()
self.rules = rules
self._in_link = False
self._in_footnote = False
self._parse_inline_html = kwargs.get('parse_inline_html')
def __call__(self, text, rules=None):
return self.output(text, rules)
def setup(self, links, footnotes):
self.footnote_index = 0
self.links = links or {}
self.footnotes = footnotes or {}
def output(self, text, rules=None):
text = text.rstrip('\n')
if not rules:
rules = list(self.default_rules)
if self._in_footnote and 'footnote' in rules:
rules.remove('footnote')
output = self.renderer.placeholder()
def manipulate(text):
for key in rules:
pattern = getattr(self.rules, key)
m = pattern.match(text)
if not m:
continue
self.line_match = m
out = getattr(self, 'output_%s' % key)(m)
if out is not None:
return m, out
return False # pragma: no cover
while text:
ret = manipulate(text)
if ret is not False:
m, out = ret
output += out
text = text[len(m.group(0)):]
continue
if text: # pragma: no cover
raise RuntimeError('Infinite loop at: %s' % text)
return output
def output_escape(self, m):
text = m.group(1)
return self.renderer.escape(text)
def output_autolink(self, m):
link = m.group(1)
if m.group(2) == '@':
is_email = True
else:
is_email = False
return self.renderer.autolink(link, is_email)
def output_url(self, m):
link = m.group(1)
if self._in_link:
return self.renderer.text(link)
return self.renderer.autolink(link, False)
def output_inline_html(self, m):
tag = m.group(1)
if self._parse_inline_html and tag in _inline_tags:
text = m.group(3)
if tag == 'a':
self._in_link = True
text = self.output(text, rules=self.inline_html_rules)
self._in_link = False
else:
text = self.output(text, rules=self.inline_html_rules)
extra = m.group(2) or ''
html = '<%s%s>%s</%s>' % (tag, extra, text, tag)
else:
html = m.group(0)
return self.renderer.inline_html(html)
def output_footnote(self, m):
key = _keyify(m.group(1))
if key not in self.footnotes:
return None
if self.footnotes[key]:
return None
self.footnote_index += 1
self.footnotes[key] = self.footnote_index
return self.renderer.footnote_ref(key, self.footnote_index)
def output_link(self, m):
return self._process_link(m, m.group(3), m.group(4))
def output_reflink(self, m):
key = _keyify(m.group(2) or m.group(1))
if key not in self.links:
return None
ret = self.links[key]
return self._process_link(m, ret['link'], ret['title'])
def output_nolink(self, m):
key = _keyify(m.group(1))
if key not in self.links:
return None
ret = self.links[key]
return self._process_link(m, ret['link'], ret['title'])
def _process_link(self, m, link, title=None):
line = m.group(0)
text = m.group(1)
if line[0] == '!':
return self.renderer.image(link, title, text)
self._in_link = True
text = self.output(text)
self._in_link = False
return self.renderer.link(link, title, text)
def output_double_emphasis(self, m):
text = m.group(2) or m.group(1)
text = self.output(text)
return self.renderer.double_emphasis(text)
def output_emphasis(self, m):
text = m.group(2) or m.group(1)
text = self.output(text)
return self.renderer.emphasis(text)
def output_code(self, m):
text = m.group(2)
return self.renderer.codespan(text)
def output_linebreak(self, m):
return self.renderer.linebreak()
def output_strikethrough(self, m):
text = self.output(m.group(1))
return self.renderer.strikethrough(text)
def output_text(self, m):
text = m.group(0)
return self.renderer.text(text)
class Renderer(object):
"""The default HTML renderer for rendering Markdown.
"""
def __init__(self, **kwargs):
self.options = kwargs
def placeholder(self):
"""Returns the default, empty output value for the renderer.
All renderer methods use the '+=' operator to append to this value.
Default is a string so rendering HTML can build up a result string with
the rendered Markdown.
Can be overridden by Renderer subclasses to be types like an empty
list, allowing the renderer to create a tree-like structure to
represent the document (which can then be reprocessed later into a
separate format like docx or pdf).
"""
return ''
def block_code(self, code, lang=None):
"""Rendering block level code. ``pre > code``.
:param code: text content of the code block.
:param lang: language of the given code.
"""
code = code.rstrip('\n')
if not lang:
code = escape(code, smart_amp=False)
return '<pre><code>%s\n</code></pre>\n' % code
code = escape(code, quote=True, smart_amp=False)
return '<pre><code class="lang-%s">%s\n</code></pre>\n' % (lang, code)
def block_quote(self, text):
"""Rendering <blockquote> with the given text.
:param text: text content of the blockquote.
"""
return '<blockquote>%s\n</blockquote>\n' % text.rstrip('\n')
def block_html(self, html):
"""Rendering block level pure html content.
:param html: text content of the html snippet.
"""
if self.options.get('skip_style') and \
html.lower().startswith('<style'):
return ''
if self.options.get('escape'):
return escape(html)
return html
def header(self, text, level, raw=None):
"""Rendering header/heading tags like ``<h1>`` ``<h2>``.
:param text: rendered text content for the header.
:param level: a number for the header level, for example: 1.
:param raw: raw text content of the header.
"""
return '<h%d>%s</h%d>\n' % (level, text, level)
def hrule(self):
"""Rendering method for ``<hr>`` tag."""
if self.options.get('use_xhtml'):
return '<hr />\n'
return '<hr>\n'
def list(self, body, ordered=True):
"""Rendering list tags like ``<ul>`` and ``<ol>``.
:param body: body contents of the list.
:param ordered: whether this list is ordered or not.
"""
tag = 'ul'
if ordered:
tag = 'ol'
return '<%s>\n%s</%s>\n' % (tag, body, tag)
def list_item(self, text):
"""Rendering list item snippet. Like ``<li>``."""
return '<li>%s</li>\n' % text
def paragraph(self, text):
"""Rendering paragraph tags. Like ``<p>``."""
return '<p>%s</p>\n' % text.strip(' ')
def table(self, header, body):
"""Rendering table element. Wrap header and body in it.
:param header: header part of the table.
:param body: body part of the table.
"""
return (
'<table>\n<thead>%s</thead>\n'
'<tbody>\n%s</tbody>\n</table>\n'
) % (header, body)
def table_row(self, content):
"""Rendering a table row. Like ``<tr>``.
:param content: content of current table row.
"""
return '<tr>\n%s</tr>\n' % content
def table_cell(self, content, **flags):
"""Rendering a table cell. Like ``<th>`` ``<td>``.
:param content: content of current table cell.
:param header: whether this is header or not.
:param align: align of current table cell.
"""
if flags['header']:
tag = 'th'
else:
tag = 'td'
align = flags['align']
if not align:
return '<%s>%s</%s>\n' % (tag, content, tag)
return '<%s style="text-align:%s">%s</%s>\n' % (
tag, align, content, tag
)
def double_emphasis(self, text):
"""Rendering **strong** text.
:param text: text content for emphasis.
"""
return '<strong>%s</strong>' % text
def emphasis(self, text):
"""Rendering *emphasis* text.
:param text: text content for emphasis.
"""
return '<em>%s</em>' % text
def codespan(self, text):
"""Rendering inline `code` text.
:param text: text content for inline code.
"""
text = escape(text.rstrip(), smart_amp=False)
return '<code>%s</code>' % text
def linebreak(self):
"""Rendering line break like ``<br>``."""
if self.options.get('use_xhtml'):
return '<br />\n'
return '<br>\n'
def strikethrough(self, text):
"""Rendering ~~strikethrough~~ text.
:param text: text content for strikethrough.
"""
return '<del>%s</del>' % text
def text(self, text):
"""Rendering unformatted text.
:param text: text content.
"""
if self.options.get('parse_block_html'):
return text
return escape(text)
def escape(self, text):
"""Rendering escape sequence.
:param text: text content.
"""
return escape(text)
def autolink(self, link, is_email=False):
"""Rendering a given link or email address.
:param link: link content or email address.
:param is_email: whether this is an email or not.
"""
text = link = escape(link)
if is_email:
link = 'mailto:%s' % link
return '<a href="%s">%s</a>' % (link, text)
def link(self, link, title, text):
"""Rendering a given link with content and title.
:param link: href link for ``<a>`` tag.
:param title: title content for `title` attribute.
:param text: text content for description.
"""
link = escape_link(link)
if not title:
return '<a href="%s">%s</a>' % (link, text)
title = escape(title, quote=True)
return '<a href="%s" title="%s">%s</a>' % (link, title, text)
def image(self, src, title, text):
"""Rendering a image with title and text.
:param src: source link of the image.
:param title: title text of the image.
:param text: alt text of the image.
"""
src = escape_link(src)
text = escape(text, quote=True)
if title:
title = escape(title, quote=True)
html = '<img src="%s" alt="%s" title="%s"' % (src, text, title)
else:
html = '<img src="%s" alt="%s"' % (src, text)
if self.options.get('use_xhtml'):
return '%s />' % html
return '%s>' % html
def inline_html(self, html):
"""Rendering span level pure html content.
:param html: text content of the html snippet.
"""
if self.options.get('escape'):
return escape(html)
return html
def newline(self):
"""Rendering newline element."""
return ''
def footnote_ref(self, key, index):
"""Rendering the ref anchor of a footnote.
:param key: identity key for the footnote.
:param index: the index count of current footnote.
"""
html = (
'<sup class="footnote-ref" id="fnref-%s">'
'<a href="#fn-%s" rel="footnote">%d</a></sup>'
) % (escape(key), escape(key), index)
return html
def footnote_item(self, key, text):
"""Rendering a footnote item.
:param key: identity key for the footnote.
:param text: text content of the footnote.
"""
back = (
'<a href="#fnref-%s" rev="footnote">↩</a>'
) % escape(key)
text = text.rstrip()
if text.endswith('</p>'):
text = re.sub(r'<\/p>$', r'%s</p>' % back, text)
else:
text = '%s<p>%s</p>' % (text, back)
html = '<li id="fn-%s">%s</li>\n' % (escape(key), text)
return html
def footnotes(self, text):
"""Wrapper for all footnotes.
:param text: contents of all footnotes.
"""
html = '<div class="footnotes">\n%s<ol>%s</ol>\n</div>\n'
return html % (self.hrule(), text)
class Markdown(object):
"""The Markdown parser.
:param renderer: An instance of ``Renderer``.
:param inline: An inline lexer class or instance.
:param block: A block lexer class or instance.
"""
def __init__(self, renderer=None, inline=None, block=None, **kwargs):
if not renderer:
renderer = Renderer(**kwargs)
else:
kwargs.update(renderer.options)
self.renderer = renderer
if inline and inspect.isclass(inline):
inline = inline(renderer, **kwargs)
if block and inspect.isclass(block):
block = block(**kwargs)
if inline:
self.inline = inline
else:
self.inline = InlineLexer(renderer, **kwargs)
self.block = block or BlockLexer(BlockGrammar())
self.footnotes = []
self.tokens = []
# detect if it should parse text in block html
self._parse_block_html = kwargs.get('parse_block_html')
def __call__(self, text):
return self.parse(text)
def render(self, text):
"""Render the Markdown text.
:param text: markdown formatted text content.
"""
return self.parse(text)
def parse(self, text):
out = self.output(preprocessing(text))
keys = self.block.def_footnotes
# reset block
self.block.def_links = {}
self.block.def_footnotes = {}
# reset inline
self.inline.links = {}
self.inline.footnotes = {}
if not self.footnotes:
return out
footnotes = filter(lambda o: keys.get(o['key']), self.footnotes)
self.footnotes = sorted(
footnotes, key=lambda o: keys.get(o['key']), reverse=True
)
body = self.renderer.placeholder()
while self.footnotes:
note = self.footnotes.pop()
body += self.renderer.footnote_item(
note['key'], note['text']
)
out += self.renderer.footnotes(body)
return out
def pop(self):
if not self.tokens:
return None
self.token = self.tokens.pop()
return self.token
def peek(self):
if self.tokens:
return self.tokens[-1]
return None # pragma: no cover
def output(self, text, rules=None):
self.tokens = self.block(text, rules)
self.tokens.reverse()
self.inline.setup(self.block.def_links, self.block.def_footnotes)
out = self.renderer.placeholder()
while self.pop():
out += self.tok()
return out
def tok(self):
t = self.token['type']
# sepcial cases
if t.endswith('_start'):
t = t[:-6]
return getattr(self, 'output_%s' % t)()
def tok_text(self):
text = self.token['text']
while self.peek()['type'] == 'text':
text += '\n' + self.pop()['text']
return self.inline(text)
def output_newline(self):
return self.renderer.newline()
def output_hrule(self):
return self.renderer.hrule()
def output_heading(self):
return self.renderer.header(
self.inline(self.token['text']),
self.token['level'],
self.token['text'],
)
def output_code(self):
return self.renderer.block_code(
self.token['text'], self.token['lang']
)
def output_table(self):
aligns = self.token['align']
aligns_length = len(aligns)
cell = self.renderer.placeholder()
# header part
header = self.renderer.placeholder()
for i, value in enumerate(self.token['header']):
align = aligns[i] if i < aligns_length else None
flags = {'header': True, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
header += self.renderer.table_row(cell)
# body part
body = self.renderer.placeholder()
for i, row in enumerate(self.token['cells']):
cell = self.renderer.placeholder()
for j, value in enumerate(row):
align = aligns[j] if j < aligns_length else None
flags = {'header': False, 'align': align}
cell += self.renderer.table_cell(self.inline(value), **flags)
body += self.renderer.table_row(cell)
return self.renderer.table(header, body)
def output_block_quote(self):
body = self.renderer.placeholder()
while self.pop()['type'] != 'block_quote_end':
body += self.tok()
return self.renderer.block_quote(body)
def output_list(self):
ordered = self.token['ordered']
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_end':
body += self.tok()
return self.renderer.list(body, ordered)
def output_list_item(self):
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_item_end':
if self.token['type'] == 'text':
body += self.tok_text()
else:
body += self.tok()
return self.renderer.list_item(body)
def output_loose_item(self):
body = self.renderer.placeholder()
while self.pop()['type'] != 'list_item_end':
body += self.tok()
return self.renderer.list_item(body)
def output_footnote(self):
self.inline._in_footnote = True
body = self.renderer.placeholder()
key = self.token['key']
while self.pop()['type'] != 'footnote_end':
body += self.tok()
self.footnotes.append({'key': key, 'text': body})
self.inline._in_footnote = False
return self.renderer.placeholder()
def output_close_html(self):
text = self.token['text']
return self.renderer.block_html(text)
def output_open_html(self):
text = self.token['text']
tag = self.token['tag']
if self._parse_block_html and tag not in _pre_tags:
text = self.inline(text, rules=self.inline.inline_html_rules)
extra = self.token.get('extra') or ''
html = '<%s%s>%s</%s>' % (tag, extra, text, tag)
return self.renderer.block_html(html)
def output_paragraph(self):
return self.renderer.paragraph(self.inline(self.token['text']))
def output_text(self):
return self.renderer.paragraph(self.tok_text())
def markdown(text, escape=True, **kwargs):
"""Render markdown formatted text to html.
:param text: markdown formatted text content.
:param escape: if set to False, all html tags will not be escaped.
:param use_xhtml: output with xhtml tags.
:param hard_wrap: if set to True, it will use the GFM line breaks feature.
:param parse_block_html: parse text only in block level html.
:param parse_inline_html: parse text only in inline level html.
"""
return Markdown(escape=escape, **kwargs)(text)
|
{
"content_hash": "39aec537b34808e5d5e7a4316542d2d5",
"timestamp": "",
"source": "github",
"line_count": 1157,
"max_line_length": 82,
"avg_line_length": 30.60933448573898,
"alnum_prop": 0.4996470422137512,
"repo_name": "ammarkhann/FinalSeniorCode",
"id": "ee09aff1774db8a4d82a1e9ff6d830c4b40d954a",
"size": "35431",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/mistune.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "229289"
},
{
"name": "C++",
"bytes": "171536"
},
{
"name": "CSS",
"bytes": "928345"
},
{
"name": "Fortran",
"bytes": "14107"
},
{
"name": "HTML",
"bytes": "853239"
},
{
"name": "JavaScript",
"bytes": "4838516"
},
{
"name": "Jupyter Notebook",
"bytes": "518186"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "81804894"
},
{
"name": "Roff",
"bytes": "6673"
},
{
"name": "Shell",
"bytes": "3409"
},
{
"name": "Smarty",
"bytes": "28408"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1Role(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, kind=None, metadata=None, rules=None):
"""
V1beta1Role - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'rules': 'list[V1beta1PolicyRule]'
}
self.attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'rules': 'rules'
}
self._api_version = api_version
self._kind = kind
self._metadata = metadata
self._rules = rules
@property
def api_version(self):
"""
Gets the api_version of this V1beta1Role.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:return: The api_version of this V1beta1Role.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1beta1Role.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#resources
:param api_version: The api_version of this V1beta1Role.
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""
Gets the kind of this V1beta1Role.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:return: The kind of this V1beta1Role.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1beta1Role.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1beta1Role.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1beta1Role.
Standard object's metadata.
:return: The metadata of this V1beta1Role.
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1beta1Role.
Standard object's metadata.
:param metadata: The metadata of this V1beta1Role.
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def rules(self):
"""
Gets the rules of this V1beta1Role.
Rules holds all the PolicyRules for this Role
:return: The rules of this V1beta1Role.
:rtype: list[V1beta1PolicyRule]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""
Sets the rules of this V1beta1Role.
Rules holds all the PolicyRules for this Role
:param rules: The rules of this V1beta1Role.
:type: list[V1beta1PolicyRule]
"""
if rules is None:
raise ValueError("Invalid value for `rules`, must not be `None`")
self._rules = rules
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "bd79355c51dad7decb089b9d6a45e7fb",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 272,
"avg_line_length": 30.96875,
"alnum_prop": 0.5797174571140262,
"repo_name": "skuda/client-python",
"id": "e294646514d7073ed90916e363962c69e0891c3e",
"size": "5963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1beta1_role.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5907789"
},
{
"name": "Shell",
"bytes": "8195"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Weapon()
result.template = "object/weapon/melee/sword/crafted_saber/shared_sword_lightsaber_one_handed_s11_gen1.iff"
result.attribute_template_id = 10
result.stfName("weapon_name","sword_lightsaber_type11")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "31f885dcd4c5fa2e01193dba4a912b20",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 108,
"avg_line_length": 27.076923076923077,
"alnum_prop": 0.71875,
"repo_name": "anhstudios/swganh",
"id": "cbf5d9ac799d5afb016e8066bcd5d946655cbcba",
"size": "497",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/weapon/melee/sword/crafted_saber/shared_sword_lightsaber_one_handed_s11_gen1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
from test_helper import failed, passed, get_file_output, test_is_not_empty
def test_output():
output = get_file_output()
answers = ["('a', 'apple')", "('b', 'banana')", "('c', 'cherry')",
"('d', 'durian')", "('g', 'guava')", "('m', 'melon')"]
if all(kv in output for kv in answers):
passed()
else:
failed('Incorrect output. ' +
'Convert into a KV by its first letter and itself.')
if __name__ == '__main__':
test_is_not_empty()
test_output()
|
{
"content_hash": "6edff676eb5468965eda8e937c6efa78",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 74,
"avg_line_length": 27.263157894736842,
"alnum_prop": 0.5212355212355212,
"repo_name": "iemejia/incubator-beam",
"id": "7b37e800894f55bc1ffee1c3c73b15173f89025e",
"size": "1316",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "learning/katas/python/Common Transforms/WithKeys/WithKeys/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
from catalog.models import Product
class CartItem(models.Model):
"""
Corresponds to one type of item in the Cart
"""
date_added = models.DateTimeField(auto_now_add=True)
quantity = models.IntegerField(default=1)
product = models.ForeignKey('catalog.Product', unique=False)
cart_session = models.CharField(default='', max_length=50)
class Meta:
db_table = 'cart_items'
ordering = ['date_added']
def total(self):
""" Total returns the total value of the item in the Cart
"""
return self.quantity*self.product.price
def name(self):
""" Name returns the name of the item """
return self.product.name
def price(self):
""" Price returns the price of this particular item """
return self.product.price
def augment_quantity(self, quantity):
""" Adds one to the quantity of products """
self.quantity = self.quantity+int(quantity)
self.save()
|
{
"content_hash": "75cfcd8fd8e25ec543cf28e61795d313",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 65,
"avg_line_length": 32.84375,
"alnum_prop": 0.6460513796384396,
"repo_name": "meiordac/ecommerce",
"id": "5ad7fb954ca13a90129836bb3df6f5d467c29511",
"size": "1051",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cart/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3408"
},
{
"name": "HTML",
"bytes": "26192"
},
{
"name": "JavaScript",
"bytes": "1919"
},
{
"name": "Python",
"bytes": "31513"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
class Preprocess:
__metaclass__ = ABCMeta
def __init__(self, parent_preprocess=None):
self._parent_preprocess = parent_preprocess
def __call__(self, dset):
return self.apply_preprocess(dset)
def apply_preprocess(self, dset):
try:
dset = self._parent_preprocess.apply_preprocess(dset)
except AttributeError:
pass
return self._apply_preprocessing(dset)
def reverse_preprocessing(self, dset):
try:
dset = self._reverse_preprocessing(dset)
except NotImplementedError:
raise NotImplementedError(type(self).__name__ + " preprocessing can't be reversed.")
try:
dset = self._parent_preprocess.reverse_preprocess(dset)
except AttributeError:
pass
return dset
@staticmethod
def _dataset_copy(dataset, inputs=None, targets=None):
inp = inputs if inputs is not None else dataset.inputs
tar = targets if targets is not None else dataset.targets
return dataset.create_linked_dataset(inp, tar, dataset.name, dataset.keep_on_cpu)
@abstractmethod
def _reverse_preprocessing(self, dset):
raise NotImplementedError("Subclass of 'Preprocess' must implement '_reverse_preprocess'.")
@abstractmethod
def _apply_preprocessing(self, dset):
raise NotImplementedError("Subclass of 'Preprocess' must implement '_apply_preprocess'.")
|
{
"content_hash": "8397e5b6c59b9dd4425222d05050b55c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 99,
"avg_line_length": 34.674418604651166,
"alnum_prop": 0.6586183769282361,
"repo_name": "ASalvail/smartlearner",
"id": "812d9b048b2e120af79500b6bcfb7280778838e4",
"size": "1491",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "smartlearner/interfaces/preprocess.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "72470"
}
],
"symlink_target": ""
}
|
"""Tests for `strategy.reduce`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import strategy_combinations
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
class StrategyReduceTest(test.TestCase, parameterized.TestCase):
@combinations.generate(
combinations.combine(
distribution=strategy_combinations.all_strategies,
mode=["eager"]
))
def test_reduce_with_axis(self, distribution):
@def_function.function
def fn():
return constant_op.constant([1., 2.])
x = distribution.run(fn)
x_m = distribution.reduce(reduce_util.ReduceOp.MEAN, x, axis=0)
self.assertEqual(1.5, self.evaluate(x_m))
x_s = distribution.reduce(reduce_util.ReduceOp.SUM, x, axis=0)
self.assertEqual(3 * distribution.num_replicas_in_sync, self.evaluate(x_s))
if __name__ == "__main__":
test.main()
|
{
"content_hash": "4eddb6f9bcde8516f3e8db60b0000d40",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 79,
"avg_line_length": 31.157894736842106,
"alnum_prop": 0.7331081081081081,
"repo_name": "davidzchen/tensorflow",
"id": "a87cce2f0b8f10ec03bccaee5a01f3885f641135",
"size": "1873",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/strategy_reduce_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "32240"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "887514"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "81865221"
},
{
"name": "CMake",
"bytes": "6500"
},
{
"name": "Dockerfile",
"bytes": "112853"
},
{
"name": "Go",
"bytes": "1867241"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "971474"
},
{
"name": "Jupyter Notebook",
"bytes": "549437"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1921657"
},
{
"name": "Makefile",
"bytes": "65901"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "316967"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "19963"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37285698"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "8992"
},
{
"name": "Shell",
"bytes": "700629"
},
{
"name": "Smarty",
"bytes": "35540"
},
{
"name": "Starlark",
"bytes": "3604653"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
"""main module for the interface"""
import tkinter as tk
import os
from model.Request import Request
from gui.GUIApplication import GUIApplication
if __name__ == "__main__":
app = GUIApplication(master=tk.Tk())
app.mainloop()
|
{
"content_hash": "49a9e3dd1aa3dc2fecf717c0836f72c8",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 45,
"avg_line_length": 21.454545454545453,
"alnum_prop": 0.711864406779661,
"repo_name": "rmulton/lawen",
"id": "2099bbc2758beecbcd35aec226636da66be9d67b",
"size": "236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31647"
}
],
"symlink_target": ""
}
|
from . import ExplorationTechnique
import logging
l = logging.getLogger("angr.exploration_techniques.explorer")
class Explorer(ExplorationTechnique):
"""
Search for up to "num_find" paths that satisfy condition "find", avoiding condition "avoid". Stashes found paths into "find_stash' and avoided paths into "avoid_stash".
The "find" and "avoid" parameters may be any of:
- An address to find
- A set or list of addresses to find
- A function that takes a path and returns whether or not it matches.
If an angr CFG is passed in as the "cfg" parameter and "find" is either a number or a list or a set, then
any paths which cannot possibly reach a success state without going through a failure state will be
preemptively avoided.
If either the "find" or "avoid" parameter is a function returning a boolean, and a path triggers both conditions, it will be added to the find stash, unless "avoid_priority" is set to True.
"""
def __init__(self, find=None, avoid=None, find_stash='found', avoid_stash='avoid', cfg=None, num_find=1, avoid_priority=False):
super(Explorer, self).__init__()
self.find = self._condition_to_lambda(find)
self.avoid = self._condition_to_lambda(avoid)
self.find_stash = find_stash
self.avoid_stash = avoid_stash
self.cfg = cfg
self.ok_blocks = set()
self.num_find = num_find
self.avoid_priority = avoid_priority
self._project = None
# TODO: This is a hack for while CFGFast doesn't handle procedure continuations
from .. import analyses
if isinstance(cfg, analyses.CFGFast):
l.error("CFGFast is currently inappropriate for use with Explorer.")
l.error("Usage of the CFG has been disabled for this explorer.")
self.cfg = None
if self.cfg is not None:
if isinstance(avoid, (int, long)):
avoid = (avoid,)
elif isinstance(avoid, set):
avoid = list(avoid)
elif not isinstance(avoid, (list, tuple)):
avoid = ()
if isinstance(find, (int, long)):
find = (find,)
elif isinstance(find, set):
find = list(find)
elif not isinstance(find, (list, tuple)):
l.error("You must provide at least one 'find' address as a number, set, list, or tuple if you provide a CFG.")
l.error("Usage of the CFG has been disabled for this explorer.")
self.cfg = None
return
for a in avoid:
if cfg.get_any_node(a) is None:
l.warning("'Avoid' address %#x not present in CFG...", a)
# not a queue but a stack... it's just a worklist!
queue = []
for f in find:
nodes = cfg.get_all_nodes(f)
if len(nodes) == 0:
l.warning("'Find' address %#x not present in CFG...", f)
else:
queue.extend(nodes)
seen_nodes = set()
while len(queue) > 0:
n = queue.pop()
if id(n) in seen_nodes:
continue
if n.addr in avoid:
continue
self.ok_blocks.add(n.addr)
seen_nodes.add(id(n))
queue.extend(n.predecessors)
if len(self.ok_blocks) == 0:
l.error("No addresses could be validated by the provided CFG!")
l.error("Usage of the CFG has been disabled for this explorer.")
self.cfg = None
return
l.warning("Please be sure that the CFG you have passed in is complete.")
l.warning("Providng an incomplete CFG can cause viable paths to be discarded!")
def setup(self, pg):
self._project = pg._project
if not self.find_stash in pg.stashes: pg.stashes[self.find_stash] = []
if not self.avoid_stash in pg.stashes: pg.stashes[self.avoid_stash] = []
def filter(self, state):
rFind = self.find(state)
if rFind:
if not state.history.reachable:
return 'unsat'
rAvoid = self.avoid(state)
if rAvoid:
# if there is a conflict
if self.avoid_priority & ((type(rFind) is not set) | (type(rAvoid) is not set)):
# with avoid_priority and one of the conditions is not a set
return self.avoid_stash
if type(rAvoid) is not set:
# rAvoid is False or self.avoid_priority is False
# Setting rAvoid to {} simplifies the rest of the code
rAvoid = {}
if type(rFind) is set:
while state.addr not in rFind:
if state.addr in rAvoid:
return self.avoid_stash
state = self._project.factory.successors(state, num_inst=1).successors[0]
if self.avoid_priority & (state.addr in rAvoid):
# Only occurs if the intersection of rAvoid and rFind is not empty
# Why would anyone want that?
return self.avoid_stash
return (self.find_stash, state)
if self.avoid(state): return self.avoid_stash
if self.cfg is not None and self.cfg.get_any_node(state.addr) is not None:
if state.addr not in self.ok_blocks: return self.avoid_stash
return None
def complete(self, pg):
return len(pg.stashes[self.find_stash]) >= self.num_find
|
{
"content_hash": "87c5255386bfa4106033128d494f87c2",
"timestamp": "",
"source": "github",
"line_count": 128,
"max_line_length": 193,
"avg_line_length": 44.1953125,
"alnum_prop": 0.5663779388368393,
"repo_name": "Ruide/angr-dev",
"id": "23420e838c9d774aba86dc69755ef5699212d6e2",
"size": "5657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/angr/exploration_techniques/explorer.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "2962"
},
{
"name": "Batchfile",
"bytes": "4542"
},
{
"name": "C",
"bytes": "18511978"
},
{
"name": "C++",
"bytes": "295194"
},
{
"name": "Haskell",
"bytes": "2192"
},
{
"name": "Makefile",
"bytes": "12558"
},
{
"name": "OpenEdge ABL",
"bytes": "2415"
},
{
"name": "Perl",
"bytes": "9974"
},
{
"name": "Python",
"bytes": "5611416"
},
{
"name": "Shell",
"bytes": "41791"
}
],
"symlink_target": ""
}
|
import unittest
import re
import xss
RE_DOMXSS_SOURCES = re.compile(r'(location\s*[\[.])|([.\[]\s*["\']?\s*(arguments|dialogArguments|innerHTML|write(ln)?|open(Dialog)?|showModalDialog|cookie|URL|documentURI|baseURI|referrer|name|opener|parent|top|content|self|frames)\W)|(localStorage|sessionStorage|Database)')
RE_DOMXSS_SINKS = re.compile(r'((src|href|data|location|code|value|action)\s*["\'\]]*\s*\+?\s*=)|((replace|assign|navigate|getResponseHeader|open(Dialog)?|showModalDialog|eval|evaluate|execCommand|execScript|setTimeout|setInterval)\s*["\'\]]*\s*\()')
RE_DOMXSS_SINKS_JQUERY = re.compile(r'/after\(|\.append\(|\.before\(|\.html\(|\.prepend\(|\.replaceWith\(|\.wrap\(|\.wrapAll\(|\$\(|\.globalEval\(|\.add\(|jQUery\(|\$\(|\.parseHTML\(/')
class XssTest(unittest.TestCase):
u'''Test para xss.py'''
def test_obtener_payloads(self):
u'''Test para ver si se cargan bien las payloads del archivo'''
lista = ['''"\\"><imG/sRc=l oNerrOr=(prompt)() x>",''',
'''"<!--<iMg sRc=--><img src=x oNERror=(prompt)`` x>",''',
u'''"<deTails oNToggle=confi\\u0072m()>",''']
self.assertEqual(xss.obtener_payloads(),
lista
)
def test_buscar_vulnerabilidad_xss_en_url(self):
u'''Test para ver si se encuentra una vulnerabilidad en la URL'''
self.assertTrue(xss.buscar_vulnerabilidad_xss_en_url('https://google-gruyere.appspot.com/201813828985/snippets.gtl?uid=brie',
{'uid':['brie']},
u'''"<deTails oNToggle=confi\u0072m()>",'''
)
)
def test_script_analizado_previamente(self):
u'''Test para revisar si se evita el doble escaneo de un mismo script encontrado
en distintas URLs'''
self.assertFalse(xss.script_analizado_previamente('''<script>alert(1)</script>'''))
self.assertTrue(xss.script_analizado_previamente('''<script>alert(1)</script>'''))
def test_buscar_vulnerabilidades_dom_xss(self):
u'''Test para buscar vulnerabilidades DOM Based en un script'''
script = """document.write('<a href="https://www.gambling.com/bJuo_GA7331V2"
title="21 Grand Casino" target="_blank"><img width="300" height="250" border="0" alt="21 Grand Casino"
src="https://www.gambling-affiliation.com/uploads/ads/22759.gif"></a>');"""
self.assertEqual(xss.buscar_vulnerabilidades_dom_xss(script),
{1:['.write(', 'href='], 3:['src=']}
)
self.assertEqual(xss.buscar_vulnerabilidades_dom_xss('''ga('create', 'UA-71981724-1', 'auto');ga('send', 'pageview');'''),
{}
)
def test_obtener_vulnerabilidades_dom_xss(self):
u'''Test para buscar vulnerabilidades DOM Based en un línea de un script'''
script = """parent.frames[target].location.href = href;"""
self.assertEqual(xss.obtener_vulnerabilidades_dom_xss(script, RE_DOMXSS_SOURCES),
['location.']
)
self.assertEqual(xss.obtener_vulnerabilidades_dom_xss(script, RE_DOMXSS_SINKS),
['href =']
)
self.assertEqual(xss.obtener_vulnerabilidades_dom_xss(script, RE_DOMXSS_SINKS_JQUERY),
[]
)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "65843176d40e3ecf800c551bee4cf8af",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 275,
"avg_line_length": 51.5,
"alnum_prop": 0.5589459084604715,
"repo_name": "leapalazzolo/XSS",
"id": "cec1c1c4231031ab883c7cf615b041a9480cd2e2",
"size": "3649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_xss.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "45203"
}
],
"symlink_target": ""
}
|
from RGT.XML.SVG.baseContainerNode import BaseContainerNode
from types import StringType
class BaseGlyph(BaseContainerNode):
ATTRIBUTE_D = 'd'
ATTRIBUTE_HORIZ_ADV_X = 'horiz-adv-x'
ATTRIBUTE_VERT_ORIGIN_X = 'vert-origin-x'
ATTRIBUTE_VERT_ORIGIN_Y = 'vert-origin-y'
ATTRIBUTE_VERT_ADV_Y = 'vert-adv-y'
def __init__(self, ownerDoc, tagName):
BaseContainerNode.__init__(self, ownerDoc, tagName)
#add the groups
self._allowedSvgChildNodes.update(self.SVG_GROUP_ANIMATION_ELEMENTS, self.SVG_GROUP_DESCRIPTIVE_ELEMENTS,
self.SVG_GROUP_SHAPE_ELEMENTS, self.SVG_GROUP_STRUCTURAL_ELEMENTS,
self.SVG_GROUP_GRADIENT_ELEMENTS)
#add the individual nodes
self._allowedSvgChildNodes.update(
{self.SVG_A_NODE, self.SVG_ALT_GLYPH_DEF_NODE, self.SVG_CLIP_PATH_NODE, self.SVG_COLOR_PROFILE_NODE,
self.SVG_CURSOR_NODE, self.SVG_FILTER_NODE,
self.SVG_FONT_NODE, self.SVG_FONT_FACE_NODE, self.SVG_FOREIGN_OBJECT_NODE, self.SVG_IMAGE_NODE,
self.SVG_MARKER_NODE, self.SVG_MASK_NODE, self.SVG_PATTERN_NODE,
self.SVG_SCRIPT_NODE, self.SVG_STYLE_NODE, self.SVG_SWITCH_NODE, self.SVG_TEXT_NODE, self.SVG_VIEW_NODE})
def setD(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_D, data)
def setHorizAdvX(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_HORIZ_ADV_X, data)
def setVertOriginX(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_VERT_ORIGIN_X, data)
def setVertOriginY(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_VERT_ORIGIN_Y, data)
def setVertAdvY(self, data):
if data is not None:
if type(data) is not StringType:
data = str(data)
self._setNodeAttribute(self.ATTRIBUTE_VERT_ADV_Y, data)
def getD(self):
node = self._getNodeAttribute(self.ATTRIBUTE_D)
if node is not None:
return node.nodeValue
return None
def getHorizAdvX(self):
node = self._getNodeAttribute(self.ATTRIBUTE_HORIZ_ADV_X)
if node is not None:
return node.nodeValue
return None
def getVertOriginX(self):
node = self._getNodeAttribute(self.ATTRIBUTE_VERT_ORIGIN_X)
if node is not None:
return node.nodeValue
return None
def getVertOriginY(self):
node = self._getNodeAttribute(self.ATTRIBUTE_VERT_ORIGIN_Y)
if node is not None:
return node.nodeValue
return None
def getVertAdvY(self):
node = self._getNodeAttribute(self.ATTRIBUTE_VERT_ADV_Y)
if node is not None:
return node.nodeValue
return None
|
{
"content_hash": "773e25cd053e6866f9d6dd9867bcfd71",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 118,
"avg_line_length": 38.917647058823526,
"alnum_prop": 0.597037484885127,
"repo_name": "danrg/RGT-tool",
"id": "6f4ff2e0b6a557f9095593e93cef315e8e0c13e0",
"size": "3308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/RGT/XML/SVG/baseGlyph.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83200"
},
{
"name": "HTML",
"bytes": "93970"
},
{
"name": "JavaScript",
"bytes": "111380"
},
{
"name": "Python",
"bytes": "788710"
},
{
"name": "SQLPL",
"bytes": "722"
}
],
"symlink_target": ""
}
|
import pymongo
from sklearn.cluster import DBSCAN
import matplotlib.pyplot as plt
import numpy as np
import os
import urllib
import matplotlib.cbook as cbook
from sklearn.datasets.samples_generator import make_blobs
from matplotlib.patches import Ellipse
from copy import deepcopy
__author__ = 'greghines'
client = pymongo.MongoClient()
db = client['penguins']
collection = db["penguin_classifications"]
penguinsX = []
penguinsY = []
i = 0
pen = 0
total = 0
for r in collection.find():
if not("user_name" in r):
continue
#if r["user_name"] != "camallen":
# continue
for a in r["annotations"]:
if ('value' in a) and not(a["value"] in ["penguin", "adult", "no", "yes", "finished", "unfinished", "cant_tell", "", "chick", "eggs", "other"]):
numAnnotations = int(max(a["value"].keys(), key = lambda x:int(x)))
for index in range(numAnnotations):
penguinsX.append(float(a["value"][str(index)]['x']))
penguinsY.append(float(a["value"][str(index)]['y']))
heatmap, xedges, yedges = np.histogram2d(penguinsX, penguinsY, bins=100)
extent = [xedges[0], xedges[-1], yedges[0], yedges[-1]]
fig = plt.clf()
plt.imshow(heatmap, extent=extent)
plt.colorbar()
plt.show()
|
{
"content_hash": "3fe44854b2fe351f30b76882c86e0704",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 153,
"avg_line_length": 27.88888888888889,
"alnum_prop": 0.650996015936255,
"repo_name": "zooniverse/aggregation",
"id": "d739c308c239093776a133e5acce0479ea96994f",
"size": "1277",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "experimental/penguins/bias.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "723"
},
{
"name": "Python",
"bytes": "2184451"
},
{
"name": "Scala",
"bytes": "629"
},
{
"name": "Shell",
"bytes": "190"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import unittest
from zope.interface._compat import PYTHON2 as PY2
from zope.interface.common import builtins
from . import VerifyClassMixin
from . import VerifyObjectMixin
from . import add_verify_tests
class TestVerifyClass(VerifyClassMixin,
unittest.TestCase):
pass
add_verify_tests(TestVerifyClass, (
(builtins.IList, (list,)),
(builtins.ITuple, (tuple,)),
(builtins.ITextString, (type(u'abc'),)),
(builtins.IByteString, (bytes,)),
(builtins.INativeString, (str,)),
(builtins.IBool, (bool,)),
(builtins.IDict, (dict,)),
(builtins.IFile, (file,) if PY2 else ()),
))
class TestVerifyObject(VerifyObjectMixin,
TestVerifyClass):
CONSTRUCTORS = {
builtins.IFile: lambda: open(__file__)
}
|
{
"content_hash": "a157bd705ff84354d9bd7c9a6edd092d",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 49,
"avg_line_length": 24.441176470588236,
"alnum_prop": 0.6582430806257521,
"repo_name": "kimjinyong/i2nsf-framework",
"id": "1f0d33833e0df7f18c9d979ae1c5b12e6983375c",
"size": "1463",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Hackathon-112/analyzer/.local/lib/python3.5/site-packages/zope/interface/common/tests/test_builtins.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4396520"
},
{
"name": "C++",
"bytes": "9389"
},
{
"name": "CSS",
"bytes": "51736"
},
{
"name": "Dockerfile",
"bytes": "3839"
},
{
"name": "Emacs Lisp",
"bytes": "24812"
},
{
"name": "Erlang",
"bytes": "1364078"
},
{
"name": "HTML",
"bytes": "42486541"
},
{
"name": "Hack",
"bytes": "6349"
},
{
"name": "Java",
"bytes": "7976"
},
{
"name": "JavaScript",
"bytes": "533000"
},
{
"name": "Makefile",
"bytes": "401170"
},
{
"name": "PHP",
"bytes": "164007"
},
{
"name": "Perl",
"bytes": "2188"
},
{
"name": "Python",
"bytes": "3004949"
},
{
"name": "QMake",
"bytes": "360"
},
{
"name": "Roff",
"bytes": "3906372"
},
{
"name": "Shell",
"bytes": "83872"
},
{
"name": "XSLT",
"bytes": "167018"
}
],
"symlink_target": ""
}
|
"""
Machine arithmetics - determine the parameters of the
floating-point arithmetic system
Author: Pearu Peterson, September 2003
"""
from __future__ import division, absolute_import, print_function
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
from numpy.core.numeric import errstate
# Need to speed this up...especially for longfloat
class MachAr(object):
"""
Diagnosing machine parameters.
Attributes
----------
ibeta : int
Radix in which numbers are represented.
it : int
Number of base-`ibeta` digits in the floating point mantissa M.
machep : int
Exponent of the smallest (most negative) power of `ibeta` that,
added to 1.0, gives something different from 1.0
eps : float
Floating-point number ``beta**machep`` (floating point precision)
negep : int
Exponent of the smallest power of `ibeta` that, substracted
from 1.0, gives something different from 1.0.
epsneg : float
Floating-point number ``beta**negep``.
iexp : int
Number of bits in the exponent (including its sign and bias).
minexp : int
Smallest (most negative) power of `ibeta` consistent with there
being no leading zeros in the mantissa.
xmin : float
Floating point number ``beta**minexp`` (the smallest [in
magnitude] usable floating value).
maxexp : int
Smallest (positive) power of `ibeta` that causes overflow.
xmax : float
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
usable floating value).
irnd : int
In ``range(6)``, information on what kind of rounding is done
in addition, and on how underflow is handled.
ngrd : int
Number of 'guard digits' used when truncating the product
of two mantissas to fit the representation.
epsilon : float
Same as `eps`.
tiny : float
Same as `xmin`.
huge : float
Same as `xmax`.
precision : float
``- int(-log10(eps))``
resolution : float
``- 10**(-precision)``
Parameters
----------
float_conv : function, optional
Function that converts an integer or integer array to a float
or float array. Default is `float`.
int_conv : function, optional
Function that converts a float or float array to an integer or
integer array. Default is `int`.
float_to_float : function, optional
Function that converts a float array to float. Default is `float`.
Note that this does not seem to do anything useful in the current
implementation.
float_to_str : function, optional
Function that converts a single float to a string. Default is
``lambda v:'%24.16e' %v``.
title : str, optional
Title that is printed in the string representation of `MachAr`.
See Also
--------
finfo : Machine limits for floating point types.
iinfo : Machine limits for integer types.
References
----------
.. [1] Press, Teukolsky, Vetterling and Flannery,
"Numerical Recipes in C++," 2nd ed,
Cambridge University Press, 2002, p. 31.
"""
def __init__(self, float_conv=float, int_conv=int,
float_to_float=float,
float_to_str=lambda v: '%24.16e' % v,
title='Python floating point number'):
"""
float_conv - convert integer to float (array)
int_conv - convert float (array) to integer
float_to_float - convert float array to float
float_to_str - convert array float to str
title - description of used floating point numbers
"""
# We ignore all errors here because we are purposely triggering
# underflow to detect the properties of the runninng arch.
with errstate(under='ignore'):
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
max_iterN = 10000
msg = "Did not converge after %d tries with %s"
one = float_conv(1)
two = one + one
zero = one - one
# Do we really need to do this? Aren't they 2 and 2.0?
# Determine ibeta and beta
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
b = one
for _ in range(max_iterN):
b = b + b
temp = a + b
itemp = int_conv(temp - a)
if any(itemp != 0):
break
else:
raise RuntimeError(msg % (_, one.dtype))
ibeta = itemp
beta = float_conv(ibeta)
# Determine it and irnd
it = -1
b = one
for _ in range(max_iterN):
it = it + 1
b = b * beta
temp = b + one
temp1 = temp - b
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
betah = beta / two
a = one
for _ in range(max_iterN):
a = a + a
temp = a + one
temp1 = temp - a
if any(temp1 - one != zero):
break
else:
raise RuntimeError(msg % (_, one.dtype))
temp = a + betah
irnd = 0
if any(temp - a != zero):
irnd = 1
tempa = a + beta
temp = tempa + betah
if irnd == 0 and any(temp - tempa != zero):
irnd = 2
# Determine negep and epsneg
negep = it + 3
betain = one / beta
a = one
for i in range(negep):
a = a * betain
b = a
for _ in range(max_iterN):
temp = one - a
if any(temp - one != zero):
break
a = a * beta
negep = negep - 1
# Prevent infinite loop on PPC with gcc 4.0:
if negep < 0:
raise RuntimeError("could not determine machine tolerance "
"for 'negep', locals() -> %s" % (locals()))
else:
raise RuntimeError(msg % (_, one.dtype))
negep = -negep
epsneg = a
# Determine machep and eps
machep = - it - 3
a = b
for _ in range(max_iterN):
temp = one + a
if any(temp - one != zero):
break
a = a * beta
machep = machep + 1
else:
raise RuntimeError(msg % (_, one.dtype))
eps = a
# Determine ngrd
ngrd = 0
temp = one + eps
if irnd == 0 and any(temp * one - one != zero):
ngrd = 1
# Determine iexp
i = 0
k = 1
z = betain
t = one + eps
nxres = 0
for _ in range(max_iterN):
y = z
z = y * y
a = z * one # Check here for underflow
temp = z * t
if any(a + a == zero) or any(abs(z) >= y):
break
temp1 = temp * betain
if any(temp1 * beta == z):
break
i = i + 1
k = k + k
else:
raise RuntimeError(msg % (_, one.dtype))
if ibeta != 10:
iexp = i + 1
mx = k + k
else:
iexp = 2
iz = ibeta
while k >= iz:
iz = iz * ibeta
iexp = iexp + 1
mx = iz + iz - 1
# Determine minexp and xmin
for _ in range(max_iterN):
xmin = y
y = y * betain
a = y * one
temp = y * t
if any((a + a) != zero) and any(abs(y) < xmin):
k = k + 1
temp1 = temp * betain
if any(temp1 * beta == y) and any(temp != y):
nxres = 3
xmin = y
break
else:
break
else:
raise RuntimeError(msg % (_, one.dtype))
minexp = -k
# Determine maxexp, xmax
if mx <= k + k - 3 and ibeta != 10:
mx = mx + mx
iexp = iexp + 1
maxexp = mx + minexp
irnd = irnd + nxres
if irnd >= 2:
maxexp = maxexp - 2
i = maxexp + minexp
if ibeta == 2 and not i:
maxexp = maxexp - 1
if i > 20:
maxexp = maxexp - 1
if any(a != y):
maxexp = maxexp - 2
xmax = one - epsneg
if any(xmax * one != xmax):
xmax = one - beta * epsneg
xmax = xmax / (xmin * beta * beta * beta)
i = maxexp + minexp + 3
for j in range(i):
if ibeta == 2:
xmax = xmax + xmax
else:
xmax = xmax * beta
self.ibeta = ibeta
self.it = it
self.negep = negep
self.epsneg = float_to_float(epsneg)
self._str_epsneg = float_to_str(epsneg)
self.machep = machep
self.eps = float_to_float(eps)
self._str_eps = float_to_str(eps)
self.ngrd = ngrd
self.iexp = iexp
self.minexp = minexp
self.xmin = float_to_float(xmin)
self._str_xmin = float_to_str(xmin)
self.maxexp = maxexp
self.xmax = float_to_float(xmax)
self._str_xmax = float_to_str(xmax)
self.irnd = irnd
self.title = title
# Commonly used parameters
self.epsilon = self.eps
self.tiny = self.xmin
self.huge = self.xmax
import math
self.precision = int(-math.log10(float_to_float(self.eps)))
ten = two + two + two + two + two
resolution = ten ** (-self.precision)
self.resolution = float_to_float(resolution)
self._str_resolution = float_to_str(resolution)
def __str__(self):
fmt = (
'Machine parameters for %(title)s\n'
'---------------------------------------------------------------------\n'
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
'---------------------------------------------------------------------\n'
)
return fmt % self.__dict__
if __name__ == '__main__':
print(MachAr())
|
{
"content_hash": "1adeb9ae80ce58956d9beb34c59bee9a",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 89,
"avg_line_length": 31.580174927113703,
"alnum_prop": 0.4943685376661743,
"repo_name": "DailyActie/Surrogate-Model",
"id": "96a210fe07a594f66c25ed2ca14387162c161434",
"size": "10832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "01-codes/numpy-master/numpy/core/machar.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "345"
},
{
"name": "Batchfile",
"bytes": "18746"
},
{
"name": "C",
"bytes": "13004913"
},
{
"name": "C++",
"bytes": "14692003"
},
{
"name": "CMake",
"bytes": "72831"
},
{
"name": "CSS",
"bytes": "303488"
},
{
"name": "Fortran",
"bytes": "7339415"
},
{
"name": "HTML",
"bytes": "854774"
},
{
"name": "Java",
"bytes": "38854"
},
{
"name": "JavaScript",
"bytes": "2432846"
},
{
"name": "Jupyter Notebook",
"bytes": "829689"
},
{
"name": "M4",
"bytes": "1379"
},
{
"name": "Makefile",
"bytes": "48708"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "PHP",
"bytes": "93585"
},
{
"name": "Pascal",
"bytes": "1449"
},
{
"name": "Perl",
"bytes": "1152272"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "34668203"
},
{
"name": "Roff",
"bytes": "5925"
},
{
"name": "Ruby",
"bytes": "92498"
},
{
"name": "Shell",
"bytes": "94698"
},
{
"name": "TeX",
"bytes": "156540"
},
{
"name": "TypeScript",
"bytes": "41691"
}
],
"symlink_target": ""
}
|
from translations.models import Translation
def merge_dups():
done = []
count = 0
def get_dups(t):
return Translation.objects.filter(de__iexact=t.de, en__iexact=t.en).exclude(id=t.id)
def get_next():
qs = Translation.objects.exclude(id__in=done).order_by('created')
if qs:
return qs[0]
next = get_next()
while next:
qs = get_dups(next)
if qs:
for t in qs:
count += t.translateditem_set.update(translation=next)
t.delete()
done.append(next.id)
next = get_next()
print '%s TranslatedItem unified' % count
|
{
"content_hash": "9af0e8d160b8303a6dd23d02f04ca82f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 92,
"avg_line_length": 23.214285714285715,
"alnum_prop": 0.5584615384615385,
"repo_name": "placeB/translation-service",
"id": "4d1a5b89f03659eea515a89be38974230b4ac83b",
"size": "707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/translations/merge.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "483"
},
{
"name": "Python",
"bytes": "56777"
}
],
"symlink_target": ""
}
|
from typing import Any, Optional
from cleo.styles import OutputStyle
from sdoc.sdoc2 import in_scope, node_store, out_scope
from sdoc.sdoc2.node.Node import Node
class Formatter:
"""
Abstract parent class for all formatters for generating the output of nodes in a requested format.
"""
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, io: OutputStyle, parent):
"""
Object constructor.
:param OutputStyle io: The IO object.
:param sdoc.sdoc2.formatter.Formatter.Formatter|None parent: The formatter for the parent node.
"""
self._io: OutputStyle = io
"""
The IO object.
"""
self._parent = parent
"""
The formatter for the parent node.
:type: sdoc.sdoc2.formatter.Formatter.Formatter
"""
self._errors: int = 0
"""
The error count.
"""
# ------------------------------------------------------------------------------------------------------------------
@property
def errors(self) -> int:
"""
Getter for the error count.
:rtype: int
"""
if self._parent:
return self._parent.errors
return self._errors
# ------------------------------------------------------------------------------------------------------------------
def error(self, message: str, node: Optional[Node] = None) -> None:
"""
Logs an error.
:param str message: The error message.this message will be appended with 'at filename:line.column' ot the token.
:param Node node: The node where the error occurred.
"""
if self._parent:
self._parent.error(message, node)
else:
self._errors += 1
messages = [message]
if node:
filename = node.position.file_name
line_number = node.position.start_line
column_number = node.position.start_column + 1
messages.append('Position: {0!s}:{1:d}.{2:d}'.format(filename, line_number, column_number))
self._io.error(messages)
# ------------------------------------------------------------------------------------------------------------------
def generate(self, node: Node, file: Any) -> None:
"""
Generates the representation of a node in the requested output format.
:param Node node: The node for which the output must be generated.
:param any file: The output file.
"""
for node_id in node.child_nodes:
child_node = in_scope(node_id)
formatter = node_store.create_formatter(self._io, child_node.get_command(), self)
formatter.generate(child_node, file)
out_scope(child_node)
# ----------------------------------------------------------------------------------------------------------------------
|
{
"content_hash": "eca2d914cf1d58a40f318ff42aadc78b",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 120,
"avg_line_length": 33.96629213483146,
"alnum_prop": 0.4608005292755541,
"repo_name": "SDoc/py-sdoc",
"id": "0d31640b37d0f64887d1d77cb26ab9459a635e18",
"size": "3023",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdoc/sdoc2/formatter/Formatter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ANTLR",
"bytes": "7968"
},
{
"name": "Python",
"bytes": "406820"
}
],
"symlink_target": ""
}
|
import multiprocessing
import Queue
import signal
import os
import time
from beaver.config import BeaverConfig
from beaver.queue import run_queue
from beaver.ssh_tunnel import create_ssh_tunnel
from beaver.utils import setup_custom_logger, REOPEN_FILES
from beaver.worker.worker import Worker
def run(args=None):
logger = setup_custom_logger('beaver', args)
beaver_config = BeaverConfig(args, logger=logger)
if beaver_config.get('logstash_version') not in [0, 1]:
raise LookupError("Invalid logstash_version")
queue = multiprocessing.Queue(beaver_config.get('max_queue_size'))
worker_proc = None
ssh_tunnel = create_ssh_tunnel(beaver_config, logger=logger)
def cleanup(signalnum, frame):
if signalnum is not None:
sig_name = tuple((v) for v, k in signal.__dict__.iteritems() if k == signalnum)[0]
logger.info('{0} detected'.format(sig_name))
logger.info('Shutting down. Please wait...')
else:
logger.info('Worker process cleanup in progress...')
try:
queue.put_nowait(('exit', ()))
except Queue.Full:
pass
if worker_proc is not None:
try:
worker_proc.terminate()
worker_proc.join()
except RuntimeError:
pass
if ssh_tunnel is not None:
logger.info('Closing ssh tunnel...')
ssh_tunnel.close()
if signalnum is not None:
logger.info('Shutdown complete.')
return os._exit(signalnum)
signal.signal(signal.SIGTERM, cleanup)
signal.signal(signal.SIGINT, cleanup)
signal.signal(signal.SIGQUIT, cleanup)
def create_queue_consumer():
process_args = (queue, beaver_config, logger)
proc = multiprocessing.Process(target=run_queue, args=process_args)
logger.info('Starting queue consumer')
proc.start()
return proc
def create_queue_producer():
worker = Worker(beaver_config, queue_consumer_function=create_queue_consumer, callback=queue.put, logger=logger)
worker.loop()
while 1:
try:
if REOPEN_FILES:
logger.debug('Detected non-linux platform. Files will be reopened for tailing')
t = time.time()
while True:
if worker_proc is None or not worker_proc.is_alive():
logger.info('Starting worker...')
t = time.time()
worker_proc = multiprocessing.Process(target=create_queue_producer)
worker_proc.start()
logger.info('Working...')
worker_proc.join(10)
if beaver_config.get('refresh_worker_process'):
if beaver_config.get('refresh_worker_process') < time.time() - t:
logger.info('Worker has exceeded refresh limit. Terminating process...')
cleanup(None, None)
except KeyboardInterrupt:
pass
|
{
"content_hash": "3ecd9cb7d139a7fe17d2a19400a3b480",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 120,
"avg_line_length": 33.53846153846154,
"alnum_prop": 0.5963302752293578,
"repo_name": "moniker-dns/debian-beaver",
"id": "93f76fb9e6ef30b74341e2c329d757566151403e",
"size": "3076",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beaver/dispatcher/worker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "D",
"bytes": "4149"
},
{
"name": "Python",
"bytes": "121852"
},
{
"name": "Shell",
"bytes": "929"
}
],
"symlink_target": ""
}
|
from optparse import OptionParser
import os
import sys
import glob
import numpy as np
from collections import defaultdict
from core.external import parse_tree
from core.util import dirs
from core.util import file_handling as fh
from core.feature_extractors.vocabulary_with_counts import VocabWithCounts
pronoun_list = ['he', 'her', 'hers', 'herself', 'him', 'himself', 'his',
'i', 'it', 'its', 'itself',
'me', 'my', 'myself',
'our', 'ours', 'ourselves',
'she',
'that', 'their', 'theirs', 'them', 'themselves', 'these', 'they', 'this', 'those',
'us',
'we',
'you', 'your', 'yours', 'yourself',
"'s"]
#stopwords = ['say', 'be', 'have']
stopwords = []
def main():
usage = "%prog dataset"
parser = OptionParser(usage=usage)
#parser.add_option('--keyword', dest='key', default=None,
# help='Keyword argument: default=%default')
#parser.add_option('--boolarg', action="store_true", dest="boolarg", default=False,
# help='Keyword argument: default=%default')
(options, args) = parser.parse_args()
if len(args) < 1:
sys.exit("Please specify a project / dataset")
project = args[0]
dirs.make_base_dir(project)
extract_story_elements()
def extract_story_elements():
min_head_vocab = 5
min_role_vocab = 4
min_tuples = 3
ATTRIBUTE = 0
AGENT_ROLE = 1
PATIENT_ROLE = 2
SURFACE_FORM = 3
parsed_dir = os.path.join(dirs.data_stanford_dir, 'parsed')
parsed_files = glob.glob(os.path.join(parsed_dir, '*.json'))
dependencies_file = os.path.join(dirs.data_stanford_dir, 'dependency_tuple_ids.json')
dependencies = fh.read_json(dependencies_file)
coref_file = os.path.join(dirs.data_stanford_dir, 'coref_heads.json')
coref_heads = fh.read_json(coref_file)
supersense_tags = fh.read_json(os.path.join(dirs.data_amalgram_dir, 'all_tags.json'))
heads = defaultdict(int)
tokens = defaultdict(int)
attributes = defaultdict(int)
agent_roles = defaultdict(int)
patient_roles = defaultdict(int)
story_elements = {}
print "Extracting story elements"
for f_i, f in enumerate(parsed_files):
sentences = fh.read_json(f)
basename = fh.get_basename_wo_ext(f)
element_list = extract_story_elements_from_article(sentences, dependencies[basename], coref_heads[basename], supersense_tags[basename], basename)
story_elements[basename] = element_list
for element in element_list:
for h in element.head_words:
heads[h] += 1
for t in element.attributes:
attributes[t] += 1
for t in element.agent_roles:
agent_roles[t] += 1
for t in element.patient_roles:
patient_roles[t] += 1
print "Finding most common tokens"
common_heads = [(v, k) for k, v in heads.items()]
common_heads.sort()
common_heads.reverse()
output_filename = os.path.join(dirs.lda_dir, 'common_heads.json')
fh.write_to_json(common_heads, output_filename, sort_keys=False)
"""
common_tokens = [(v, k) for k, v in tokens.items()]
common_tokens.sort()
common_tokens.reverse()
output_filename = os.path.join(dirs.lda_dir, 'common_tokens.json')
fh.write_to_json(common_tokens, output_filename, sort_keys=False)
"""
common_attributes = [(v, k) for k, v in attributes.items()]
common_attributes.sort()
common_attributes.reverse()
output_filename = os.path.join(dirs.lda_dir, 'common_attributes.json')
fh.write_to_json(common_attributes, output_filename, sort_keys=False)
common_agent_roles = [(v, k) for k, v in agent_roles.items()]
common_agent_roles.sort()
common_agent_roles.reverse()
output_filename = os.path.join(dirs.lda_dir, 'common_agent_roles.json')
fh.write_to_json(common_agent_roles, output_filename, sort_keys=False)
common_patient_roles = [(v, k) for k, v in patient_roles.items()]
common_patient_roles.sort()
common_patient_roles.reverse()
output_filename = os.path.join(dirs.lda_dir, 'common_patient_roles.json')
fh.write_to_json(common_patient_roles, output_filename, sort_keys=False)
print pronoun_list
#most_common_heads = {k: v for v, k in common_heads if v >= min_head_vocab and k not in pronoun_list}
most_common_attributes = {k: v for v, k in common_attributes if (v >= min_role_vocab and k not in pronoun_list)}
most_common_agent_roles = {k: v for v, k in common_agent_roles if (v >= min_role_vocab and k not in pronoun_list and k not in stopwords)}
most_common_patient_roles = {k: v for v, k in common_patient_roles if (v >= min_role_vocab and k not in pronoun_list and k not in stopwords)}
output_filename = os.path.join(dirs.lda_dir, 'most_common_attributes.json')
fh.write_to_json(most_common_attributes, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'most_common_agent_roles.json')
fh.write_to_json(most_common_agent_roles, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'most_common_patient_roles.json')
fh.write_to_json(most_common_patient_roles, output_filename, sort_keys=False)
print len(most_common_attributes)
print len(most_common_agent_roles)
print len(most_common_patient_roles)
print "Filtering tuples"
valid_elements = defaultdict(list)
for basename, element_list in story_elements.items():
for se in element_list:
se.valid_heads = [h for h in se.head_words if h not in pronoun_list]
se.valid_phrases = [h for h in se.phrases if h not in pronoun_list]
if len(se.valid_heads) > 0:
se.valid_attributes = [t for t in se.attributes if t in most_common_attributes]
se.valid_agent_roles = [t for t in se.agent_roles if t in most_common_agent_roles]
se.valid_patient_roles = [t for t in se.patient_roles if t in most_common_patient_roles]
se.tuples = [(ATTRIBUTE, t) for t in se.valid_attributes] + \
[(AGENT_ROLE, t) for t in se.valid_agent_roles] + \
[(PATIENT_ROLE, t) for t in se.valid_patient_roles]
#[(SURFACE_FORM, t) for t in se.valid_heads]
if len(se.tuples) >= min_tuples:
valid_elements[basename].append(se)
print "Constructing vocabulary"
n_tuples = 0
vocab = VocabWithCounts('', add_oov=False)
n_entities = 0
for basename, element_list in valid_elements.items():
for se in element_list:
tokens = [token for role, token in se.tuples]
vocab.add_tokens(tokens)
n_tuples += len(tokens)
n_entities += 1
head_word_vocab = VocabWithCounts('', add_oov=False)
for basename, element_list in valid_elements.items():
for se in element_list:
tokens = [token for token in se.valid_heads]
head_word_vocab.add_tokens(tokens)
head_phrase_vocab = VocabWithCounts('', add_oov=False)
for basename, element_list in valid_elements.items():
for se in element_list:
tokens = [token for token in se.valid_phrases]
head_phrase_vocab.add_tokens(tokens)
print "Building indices"
tuple_vocab = np.zeros(n_tuples, dtype=int) # vocab index of the ith word
tuple_entity = np.zeros(n_tuples, dtype=int)
tuple_role = []
entity_doc = np.zeros(n_entities, dtype=int) # topic of the ith word
docs = valid_elements.keys()
docs.sort()
vocab_counts = np.zeros(len(vocab), dtype=int)
article_mapping = []
entity_index = 0
head_word_vocab_list = []
head_word_entity_list = []
head_phrase_vocab_list = []
head_phrase_entity_list = []
t_i = 0
for d_i, d in enumerate(docs):
element_list = valid_elements[d]
for se in element_list:
entity_doc[entity_index] = d_i
for role, token in se.tuples:
tuple_entity[t_i] = entity_index
tuple_role.append(role)
vocab_index = vocab.get_index(token)
tuple_vocab[t_i] = vocab_index
vocab_counts[vocab_index] += 1
t_i += 1
for token in se.valid_heads:
head_word_vocab_index = head_word_vocab.get_index(token)
head_word_vocab_list.append(head_word_vocab_index)
head_word_entity_list.append(entity_index)
for token in se.valid_phrases:
head_phrase_vocab_index = head_phrase_vocab.get_index(token)
head_phrase_vocab_list.append(head_phrase_vocab_index)
head_phrase_entity_list.append(entity_index)
article_mapping.append(str(entity_index) + ':' + d + ':' + ','.join(se.head_words) + ':' + ','.join(se.valid_attributes) + ':' + ','.join(se.valid_agent_roles) + ':' + ','.join(se.valid_patient_roles))
entity_index += 1
print len(docs), "valid documents"
print entity_index, "entities"
print t_i, "tuples"
print len(vocab), "word types"
print np.min(vocab_counts), np.max(vocab_counts), np.sum(vocab_counts)
output_filename = os.path.join(dirs.lda_dir, 'tuple_vocab.json')
fh.write_to_json(list(tuple_vocab), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'tuple_role.json')
fh.write_to_json(list(tuple_role), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'tuple_entity.json')
fh.write_to_json(list(tuple_entity), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'entity_doc.json')
fh.write_to_json(list(entity_doc), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'vocab.json')
fh.write_to_json(vocab.index2token, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'docs.json')
fh.write_to_json(list(docs), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'article_map.json')
fh.write_to_json(list(article_mapping), output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'head_word_vocab.json')
fh.write_to_json(head_word_vocab.index2token, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'head_phrase_vocab.json')
fh.write_to_json(head_phrase_vocab.index2token, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'head_word_vocab_list.json')
fh.write_to_json(head_word_vocab_list, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'head_word_entity_list.json')
fh.write_to_json(head_word_entity_list, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'head_phrase_vocab_list.json')
fh.write_to_json(head_phrase_vocab_list, output_filename, sort_keys=False)
output_filename = os.path.join(dirs.lda_dir, 'head_phrase_entity_list.json')
fh.write_to_json(head_phrase_entity_list, output_filename, sort_keys=False)
def extract_story_elements_from_article(sentences, dependencies, coref_heads, supersense_tags, basename):
story_elements = []
# make a parse tree of dependencies:
for sent_index, sent in enumerate(dependencies):
tree = parse_tree.ParseTree()
# go throguh each dependent in the tree
for dependency in sent:
relation, governor_id, dependent_id = dependency
#governor = sentences[sent_index][governor_id]
#dependent = sentences[sent_index][dependent_id]['word']
dependent_lemma = sentences[sent_index][dependent_id]['lemma']
dependent_POS = sentences[sent_index][dependent_id]['POS']
dependent_NER = sentences[sent_index][dependent_id]['NER']
# check if this dependent corefers to another entity
if len(coref_heads) > 0 and str(dependent_id) in coref_heads[sent_index]:
entity = str(coref_heads[sent_index][str(dependent_id)])
else:
entity = '_'
# check if this dependent has a super sense tag associated with it
if dependent_id < len(supersense_tags[sent_index]):
ss_word, ss_tag = supersense_tags[sent_index][dependent_id]
assert ss_word == sentences[sent_index][dependent_id]['word']
else:
ss_tag = '_'
print "missing tags for", basename
# add this dependent to the tree
tree.add_relation(dependent_id, dependent_lemma, dependent_POS, dependent_NER, entity, ss_tag, governor_id, relation)
# printing for debug
#print sent_index, ' '.join([t['word'] for t in sentences[sent_index]])
#tree.print_tree(target_relation='iobj', increase_indent=False)
#tree.print_tree()
# extract the story elements from this tree, and add them to the list
story_elements.extend(tree.get_story_elements())
#print '** ORIGINAL ELEMENTS **'
#for se in story_elements:
# se.display()
# now that we have all of the story elements for this article, cluster them
clustered_elements = cluster_story_elements(story_elements)
# printing for debug
"""
print '** CLUSTERED ELEMENTS **'
for cluster_num, cluster in clustered_elements.items():
print "Cluster ", cluster_num
for element in cluster:
element.display()
"""
# filter elements to exclude the ones with few events (or other?)
#print '** FILTERED ELEMENTS **'
filtered_elements = []
for se in clustered_elements:
use = False
if 'PERSON' in se.sst or 'GROUP' in se.sst or 'LOCATION' in se.sst or 'OBJECT' in se.sst or 'ARTIFACT' in se.sst or 'PROCESS' in se.sst or 'ACT' in se.sst or 'PERSON' in se.ner or 'ORGANIZATION' in se.ner or 'LOCATION' in se.ner:
use = True
#if se.sst in ['PERSON', 'GROUP', 'ACT', 'ARTIFACT', 'LOCATION', 'COGNITION', 'ATTRIBUTE', 'SUBSTANCE', 'PROCESS', 'OBJECT']:
# use = True
#elif se.NER in ['PERSON', 'LOCATION', 'ORGANIZATION']:
# use = True
if use:
filtered_elements.append(se)
#se.display()
return filtered_elements
#return clustered_elements
# come back to this later...
# will be useful once I start incorporating descriptions, but no so much for surface form.
# also, this is not working properly as it is...
def cluster_story_elements(elements):
# make a list of pronouns to exclude from clustering (all elements should already be lower cased)
# convert to a dict for fast lookup
pronouns = dict(zip(pronoun_list, np.ones(len(pronoun_list))))
n_elements = len(elements)
coref_clusters = defaultdict(list)
head_words_clusters = defaultdict(list)
# find story elements that have been clustered by coref, or share a (non-pronoun) head word
for se_i, se in enumerate(elements):
# check if this word is part of a cluster identified by coref
cluster = se.coref
if cluster != '_':
cluster = int(cluster)
# note the cluster assignment of this story element
coref_clusters[cluster].append(se_i)
# get the head word
head_word = se.head_word
# investigate the head word, if it's not a pronoun
if head_word not in pronouns:
head_words_clusters[head_word].append(se_i)
# make a dictionary of forward links based on coref clusters
links = defaultdict(set)
for cluster, se_indices in coref_clusters.items():
se_indices.sort()
for start, se_i in enumerate(se_indices[:-1]):
for se_i2 in se_indices[start+1:]:
links[se_i].add(se_i2)
# do the same based on head words
for head_word, se_indices in head_words_clusters.items():
se_indices.sort()
for start, se_i in enumerate(se_indices[:-1]):
for se_i2 in se_indices[start+1:]:
links[se_i].add(se_i2)
# follow the links to assign elements to clusters
n_clusters = 0
clusters = np.zeros(n_elements, dtype=int) # array for cluster assignments
for se_i, se in enumerate(elements):
if clusters[se_i] == 0:
n_clusters += 1
clusters[se_i] = n_clusters
cluster_num = clusters[se_i]
for se_i2 in links[se_i]:
clusters[se_i2] = cluster_num
# make a dict of the final clusters
clustered_elements = defaultdict(list)
for se_i, cluster in enumerate(clusters):
clustered_elements[int(cluster)].append(elements[se_i])
# aggregate information from clustered elements (?)
aggregate_elements = []
for cluster_num, cluster in clustered_elements.items():
# make a copy of the first element in the cluster
agg_element = parse_tree.AggregateElement(cluster[0])
# add the information from all others
for se in cluster[1:]:
agg_element.add_data(se)
aggregate_elements.append(agg_element)
#print '**', cluster_num, '**'
#agg_element.display()
return aggregate_elements
#return clustered_elements
if __name__ == '__main__':
main()
|
{
"content_hash": "783bb6ad0a84b82507a8867337d2c799",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 237,
"avg_line_length": 41.301886792452834,
"alnum_prop": 0.6299109182275011,
"repo_name": "dallascard/guac",
"id": "b118f462b8a8b5b296decbca2b04da75823938b7",
"size": "17512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/personas/stanford_story_elements.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "997183"
},
{
"name": "Shell",
"bytes": "7969"
}
],
"symlink_target": ""
}
|
import pytest
import uuid
from mock import MagicMock, mock
from polyaxon import settings
from polyaxon.client import RunClient
from polyaxon.lifecycle import V1Statuses
from polyaxon.utils.test_utils import BaseTestCase
@pytest.mark.client_mark
class TestStatuses(BaseTestCase):
@mock.patch("polyaxon_sdk.RunsV1Api.get_run_statuses")
def test_get_statuses(self, sdk_get_run_statuses):
client = RunClient(owner="owner", project="project", run_uuid=uuid.uuid4().hex)
for _ in client.get_statuses():
pass
assert sdk_get_run_statuses.call_count == 1
@mock.patch("polyaxon_sdk.RunsV1Api.get_run_statuses")
def test_get_statuses_watch(self, sdk_get_run_statuses):
settings.CLIENT_CONFIG.watch_interval = 1
client = RunClient(owner="owner", project="project", run_uuid=uuid.uuid4().hex)
for _ in client.watch_statuses():
resp = MagicMock(status=V1Statuses.FAILED, status_conditions=[])
sdk_get_run_statuses.return_value = resp
assert sdk_get_run_statuses.call_count == 2
|
{
"content_hash": "39d6b8517ef986ff69d1a167e71e36b5",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 87,
"avg_line_length": 38.464285714285715,
"alnum_prop": 0.7019498607242339,
"repo_name": "polyaxon/polyaxon",
"id": "4506bb16bb52d9aceb6db074a274c83c332d94a1",
"size": "1682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/tests/test_client/test_statuses.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1989"
},
{
"name": "Python",
"bytes": "5201898"
},
{
"name": "Shell",
"bytes": "1565"
}
],
"symlink_target": ""
}
|
from dataclasses import dataclass, field
from dataclasses_json import LetterCase, dataclass_json, config
@dataclass_json
@dataclass
class CamelCasePerson:
given_name: str = field(
metadata={'dataclasses_json': {
'letter_case': LetterCase.CAMEL
}}
)
@dataclass_json
@dataclass
class KebabCasePerson:
given_name: str = field(
metadata={'dataclasses_json': {
'letter_case': LetterCase.KEBAB
}}
)
@dataclass_json
@dataclass
class SnakeCasePerson:
given_name: str = field(
metadata={'dataclasses_json': {
'letter_case': LetterCase.SNAKE
}}
)
@dataclass_json
@dataclass
class PascalCasePerson:
given_name: str = field(
metadata={'dataclasses_json': {
'letter_case': LetterCase.PASCAL
}}
)
@dataclass_json
@dataclass
class FieldNamePerson:
given_name: str = field(metadata=config(field_name='givenName'))
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class CamelCasePersonWithOverride:
given_name: str
years_on_earth: str = field(metadata=config(field_name='age'))
class TestLetterCase:
def test_camel_encode(self):
assert CamelCasePerson('Alice').to_json() == '{"givenName": "Alice"}'
def test_camel_decode(self):
assert CamelCasePerson.from_json(
'{"givenName": "Alice"}') == CamelCasePerson('Alice')
def test_kebab_encode(self):
assert KebabCasePerson('Alice').to_json() == '{"given-name": "Alice"}'
def test_kebab_decode(self):
assert KebabCasePerson.from_json(
'{"given-name": "Alice"}') == KebabCasePerson('Alice')
def test_snake_encode(self):
assert SnakeCasePerson('Alice').to_json() == '{"given_name": "Alice"}'
def test_snake_decode(self):
assert SnakeCasePerson.from_json(
'{"given_name": "Alice"}') == SnakeCasePerson('Alice')
def test_pascal_encode(self):
assert PascalCasePerson('Alice').to_json() == '{"GivenName": "Alice"}'
def test_pascal_decode(self):
assert PascalCasePerson.from_json(
'{"GivenName": "Alice"}') == PascalCasePerson('Alice')
def test_field_name_encode(self):
assert FieldNamePerson('Alice').to_json() == '{"givenName": "Alice"}'
def test_field_name_decode(self):
assert FieldNamePerson.from_json(
'{"givenName": "Alice"}') == FieldNamePerson('Alice')
def test_camel_with_override_encode(self):
assert CamelCasePersonWithOverride(
'Alice', 10).to_json() == '{"givenName": "Alice", "age": 10}'
def test_camel_with_override_decode(self):
expected = CamelCasePersonWithOverride('Alice', 10)
assert CamelCasePersonWithOverride.from_json(
'{"givenName": "Alice", "age": 10}') == expected
def test_from_dict(self):
assert CamelCasePerson.from_dict(
{'givenName': 'Alice'}) == CamelCasePerson('Alice')
def test_to_dict(self):
assert {'givenName': 'Alice'} == CamelCasePerson('Alice').to_dict()
|
{
"content_hash": "8623ab068f611f699a054339b24589b0",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 28.26605504587156,
"alnum_prop": 0.6270691333982473,
"repo_name": "lidatong/dataclasses-json",
"id": "ccc1d45cbf34fd6cfcef67c32e1484f89b67f8f6",
"size": "3081",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_letter_case.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "132753"
},
{
"name": "Shell",
"bytes": "209"
}
],
"symlink_target": ""
}
|
"""
Enable wxPython to be used interacive by setting PyOS_InputHook.
Authors: Robin Dunn, Brian Granger, Ondrej Certik
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
import os
import signal
import sys
import time
from timeit import default_timer as clock
import wx
from IPython.lib.inputhook import stdin_ready
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
def inputhook_wx1():
"""Run the wx event loop by processing pending events only.
This approach seems to work, but its performance is not great as it
relies on having PyOS_InputHook called regularly.
"""
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# Make a temporary event loop and process system events until
# there are no more waiting, then allow idle events (which
# will also deal with pending or posted wx events.)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
while evtloop.Pending():
evtloop.Dispatch()
app.ProcessIdle()
del ea
except KeyboardInterrupt:
pass
return 0
class EventLoopTimer(wx.Timer):
def __init__(self, func):
self.func = func
wx.Timer.__init__(self)
def Notify(self):
self.func()
class EventLoopRunner(object):
def Run(self, time):
self.evtloop = wx.EventLoop()
self.timer = EventLoopTimer(self.check_stdin)
self.timer.Start(time)
self.evtloop.Run()
def check_stdin(self):
if stdin_ready():
self.timer.Stop()
self.evtloop.Exit()
def inputhook_wx2():
"""Run the wx event loop, polling for stdin.
This version runs the wx eventloop for an undetermined amount of time,
during which it periodically checks to see if anything is ready on
stdin. If anything is ready on stdin, the event loop exits.
The argument to elr.Run controls how often the event loop looks at stdin.
This determines the responsiveness at the keyboard. A setting of 1000
enables a user to type at most 1 char per second. I have found that a
setting of 10 gives good keyboard response. We can shorten it further,
but eventually performance would suffer from calling select/kbhit too
often.
"""
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
elr = EventLoopRunner()
# As this time is made shorter, keyboard response improves, but idle
# CPU load goes up. 10 ms seems like a good compromise.
elr.Run(time=10) # CHANGE time here to control polling interval
except KeyboardInterrupt:
pass
return 0
def inputhook_wx3():
"""Run the wx event loop by processing pending events only.
This is like inputhook_wx1, but it keeps processing pending events
until stdin is ready. After processing all pending events, a call to
time.sleep is inserted. This is needed, otherwise, CPU usage is at 100%.
This sleep time should be tuned though for best performance.
"""
# We need to protect against a user pressing Control-C when IPython is
# idle and this is running. We trap KeyboardInterrupt and pass.
try:
app = wx.GetApp()
if app is not None:
assert wx.Thread_IsMain()
# The import of wx on Linux sets the handler for signal.SIGINT
# to 0. This is a bug in wx or gtk. We fix by just setting it
# back to the Python default.
if not callable(signal.getsignal(signal.SIGINT)):
signal.signal(signal.SIGINT, signal.default_int_handler)
evtloop = wx.EventLoop()
ea = wx.EventLoopActivator(evtloop)
t = clock()
while not stdin_ready():
while evtloop.Pending():
t = clock()
evtloop.Dispatch()
app.ProcessIdle()
# We need to sleep at this point to keep the idle CPU load
# low. However, if sleep to long, GUI response is poor. As
# a compromise, we watch how often GUI events are being processed
# and switch between a short and long sleep time. Here are some
# stats useful in helping to tune this.
# time CPU load
# 0.001 13%
# 0.005 3%
# 0.01 1.5%
# 0.05 0.5%
used_time = clock() - t
if used_time > 5*60.0:
# print 'Sleep for 5 s' # dbg
time.sleep(5.0)
elif used_time > 10.0:
# print 'Sleep for 1 s' # dbg
time.sleep(1.0)
elif used_time > 0.1:
# Few GUI events coming in, so we can sleep longer
# print 'Sleep for 0.05 s' # dbg
time.sleep(0.05)
else:
# Many GUI events coming in, so sleep only very little
time.sleep(0.001)
del ea
except KeyboardInterrupt:
pass
return 0
# This is our default implementation
inputhook_wx = inputhook_wx3
|
{
"content_hash": "e0f1a391fb2c9c34051004ea34667e3b",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 81,
"avg_line_length": 36.122699386503065,
"alnum_prop": 0.5441576086956522,
"repo_name": "sodafree/backend",
"id": "1eff4cee77f98ee5ee6bbda16e85b186405cf7d0",
"size": "5907",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "build/ipython/IPython/lib/inputhookwx.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Emacs Lisp",
"bytes": "21800"
},
{
"name": "JavaScript",
"bytes": "1050184"
},
{
"name": "Python",
"bytes": "21215906"
},
{
"name": "Shell",
"bytes": "7557"
},
{
"name": "VimL",
"bytes": "25012"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import get_user_model
from django.http import JsonResponse
from django.shortcuts import get_object_or_404
from django.utils import timezone
from oauth2_provider.contrib.rest_framework.authentication import OAuth2Authentication
from oauth2_provider.contrib.rest_framework.permissions import TokenHasScope
from rest_framework.authentication import TokenAuthentication
from rest_framework.generics import ListAPIView
from rest_framework.permissions import IsAuthenticated
from rest_framework.status import HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_200_OK, HTTP_403_FORBIDDEN
from rest_framework.views import APIView
from accounts.models import CompsocUser
from events.models import EventPage, EventSignup, SeatingRevision
from .serializers import DiscordUserSerialiser, EventSerialiser, EventSignupSerialiser, LanAppProfileSerialiser
class LanAppProfileView(APIView):
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ['lanapp']
def get(self, request):
if not request.user.is_authenticated:
return JsonResponse({'detail': 'cannot perform that action on an unauthenticated user'},
status=HTTP_403_FORBIDDEN)
user = request.user
compsoc_user = CompsocUser.objects.get(user_id=user.id)
serializer = LanAppProfileSerialiser(compsoc_user)
return JsonResponse(serializer.data)
class MemberDiscordInfoApiView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, uni_id):
user = get_object_or_404(get_user_model(), username=uni_id)
compsoc_user = CompsocUser.objects.get(user_id=user.id)
serializer = DiscordUserSerialiser(compsoc_user)
return JsonResponse(serializer.data)
class SeatingView(APIView):
def get(self, request, event_id):
event = get_object_or_404(EventPage, id=event_id)
if not event.has_seating:
return JsonResponse({'detail': 'that event doesn\'t have a seating plan'}, status=HTTP_400_BAD_REQUEST)
if not event.seating_location:
return JsonResponse({'detail': 'the event requires a seating plan but has none set'},
status=HTTP_400_BAD_REQUEST)
seating_location = event.seating_location
revisions = SeatingRevision.objects.for_event(event)
return JsonResponse({'hello': 'world'}, status=HTTP_200_OK)
class EventSignupView(APIView):
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ['event']
def post(self, request):
if not request.data.get('event_id'):
return JsonResponse({'detail': 'event_id field is required but wasn\'t found'}, status=HTTP_400_BAD_REQUEST)
data = {
'event': request.data.get('event_id'),
'member': request.user.id,
'comment': request.data.get('comment')
}
serialiser = EventSignupSerialiser(data=data)
if serialiser.is_valid():
serialiser.save()
else:
return JsonResponse(serialiser.errors, status=HTTP_400_BAD_REQUEST)
return JsonResponse(serialiser.data, status=HTTP_201_CREATED)
class EventDeregisterView(APIView):
authentication_classes = [OAuth2Authentication]
permission_classes = [TokenHasScope]
required_scopes = ['event']
def get(self, request, event_id):
signup = get_object_or_404(EventSignup, event=event_id, member=request.user.id)
signup.delete()
return JsonResponse({'detail': 'signup deleted'}, status=HTTP_200_OK)
class EventListView(ListAPIView):
queryset = EventPage.objects.live().filter(finish__gte=timezone.now()).order_by('start')
serializer_class = EventSerialiser
|
{
"content_hash": "1adf7a8e177c3925889c1ff09551a990",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 120,
"avg_line_length": 38.504950495049506,
"alnum_prop": 0.7068655181280535,
"repo_name": "davidjrichardson/uwcs-zarya",
"id": "27cb2c7372e61bf0ffe385a833e0cac5093a71de",
"size": "3889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "77041"
},
{
"name": "Python",
"bytes": "162015"
},
{
"name": "SCSS",
"bytes": "54876"
}
],
"symlink_target": ""
}
|
import os
import logging
import argparse
from math import log, floor
from argparse import RawDescriptionHelpFormatter
import matplotlib
matplotlib.use('Agg')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from secimtools.dataManager import logger as sl
from secimtools.dataManager.flags import Flags
from secimtools.dataManager.interface import wideToDesign
from secimtools.visualManager import module_bar as bar
from secimtools.visualManager import module_box as box
from secimtools.visualManager import module_hist as hist
from secimtools.visualManager import module_lines as lines
from secimtools.visualManager import module_scatter as scatter
from secimtools.visualManager import module_distribution as dist
from secimtools.visualManager.manager_color import colorHandler
from secimtools.visualManager.manager_figure import figureHandler
def getOptions(myopts=None):
"""Function to pull in arguments"""
description="""The coefficient of variation (CV) is defined as the ratio
of the sample standard deviation to the mean. It is a method to measure the
variations of compounds. The variation of a peak intensity increases as
its CV increases. And adjusted for the sample mean, CV does not have unit;
thus, it is a standardized measurement for variation.
A density plot of CVs for all compounds across samples by group is performed.
And a set of flags of compounds with large CVs will be output. """
parser=argparse.ArgumentParser(description=description,
formatter_class=RawDescriptionHelpFormatter)
# Standard Input
standard = parser.add_argument_group(title="Standard input",
description="Standard input for SECIM tools.")
standard.add_argument("-i","--input", dest="input", action="store",
required=True, help="Input dataset in wide format.")
standard.add_argument("-d","--design", dest="design", action="store",
required=True, help="Design file.")
standard.add_argument("-id","--ID", dest="uniqID", action="store", required=True,
help="Name of the column with unique identifiers.")
standard.add_argument("-g","--group", dest="group", action='store',
required=False, default=False, help="Name of column in "\
"design file with Group/treatment information.")
standard.add_argument("-l","--levels",dest="levels",action="store",
required=False, default=False, help="Different groups to"\
"sort by separeted by commas.")
# Tool Input
tool = parser.add_argument_group(title="Tool specific input",
description="Input specific for the tool.")
tool.add_argument("-c","--CVcutoff", dest="CVcutoff", action="store",
required=False, default=False, type=float,
help="The default CV cutoff will flag 10 percent of "\
"the rowIDs with larger CVs. If you want to set a CV "\
"cutoff, put the number here. [optional]")
# Tool output
output = parser.add_argument_group(title="Output",
description="Paths for output files.")
output.add_argument("-f","--figure", dest="figure", action="store",
required=True, default="figure",
help="Name of the output PDF for CV plots.")
output.add_argument("-o","--flag", dest="flag", action="store",
required=True, default="RTflag",
help="Name of the output TSV for CV flags.")
# Plot options
plot = parser.add_argument_group(title='Plot options')
plot.add_argument("-pal","--palette",dest="palette",action='store',required=False,
default="tableau", help="Name of the palette to use.")
plot.add_argument("-col","--color",dest="color",action="store",required=False,
default="Tableau_20", help="Name of a valid color scheme"\
" on the selected palette")
args = parser.parse_args()
# Standardize paths
args.input = os.path.abspath(args.input)
args.design = os.path.abspath(args.design)
args.figure = os.path.abspath(args.figure)
args.flag = os.path.abspath(args.flag)
# if args.levels then split otherwise args.level = emptylist
if args.levels:
args.levels = args.levels.split(",")
return(args)
def calculateCV(data, design, cutoff, levels):
"""
Runs Count Values by group
:Arguments:
:type data: pandas.DataFrame
:param data: wide dataset
:type design: pandas.DataFrame
:param design: design dataset
:type cutoff: float
:param cutoff: Value of cutoff if non provided it will be calculated.
:type levels: str
:param levels: Name of the groups to groupby
# Get max CV
"""
#Open CV and CVcutoffs dataframes
CV = pd.DataFrame(index=data.index)
CVcutoff = pd.Series(index=list(set(design[levels])), dtype="Float64")
# Split design file by treatment group
for title, group in design.groupby(levels):
# Create empty dataset with metabolites names as index and calculate their
# standar deviation and mean
DATstat=pd.DataFrame(index=data[group.index].index)
# ddof =1 is necessary to subtract n-1 in denominatior for standard deviation.
DATstat.loc[:,"std"] = np.std(data[group.index], axis=1, ddof=1)
DATstat.loc[:,"mean"] = np.mean(data[group.index],axis=1)
# Calculate the Coefficient of Variation for that group (if groups)
# or al data (if not groups provided).
CV.loc[:,"cv_"+title] = abs(DATstat["std"] / DATstat["mean"])
# Calculate the CVcutoffs for each group (if groups provided)
# or all data (if not).
if not cutoff:
CVcutoff[title] = np.nanpercentile(CV["cv_"+title].values, q=90)
CVcutoff[title] = round(CVcutoff[title],
-int(floor(log(abs(CVcutoff[title]),10)))+2)
else:
CVcutoff[title] = np.nanpercentile(CV["cv_"+title].values, q=(1-cutoff)*100 )
CVcutoff[title] = round(CVcutoff[title],
-int(floor(log(abs(CVcutoff[title]),10)))+2)
# Calculate the maximum coefficient of variation
CV.loc[:,'cv'] = CV.apply(np.max, axis=1)
return (CV, CVcutoff)
def plotCVplots(data, cutoff, palette, pdf):
#Iterate over groups
for name,group in palette.design.groupby(palette.combName):
# Open figure handler
fh=figureHandler(proj='2d',figsize=(14,8))
# Get xmin and xmax
xmin = -np.nanpercentile(data['cv_'+name].values,99)*0.2
xmax = np.nanpercentile(data['cv_'+name].values,99)*1.5
# Plot histogram
hist.serHist(ax=fh.ax[0],dat=data['cv_'+name],color='grey',normed=1,
range=(xmin,xmax),bins=15)
# Plot density plot
dist.plotDensityDF(data=data['cv_'+name],ax=fh.ax[0], lb="CV density",
colors=palette.ugColors[name])
# Plot cutoff
lines.drawCutoffVert(ax=fh.ax[0],x=cutoff[name], lb="Cutoff at: {0}".format(cutoff[name]))
# Plot legend
fh.makeLegendLabel(ax=fh.ax[0])
# Give format to the axis
fh.formatAxis(yTitle='Density',xlim=(xmin,xmax), ylim="ignore",
figTitle = "Density Plot of Coefficients of Variation in {0}".format(name))
# Shrink figure to fit legend
fh.shrink()
# Add plot to PDF
fh.addToPdf(pdfPages=pdf)
def plotDistributions(data, cutoff, palette,pdf):
# Open new figureHandler instance
fh=figureHandler(proj='2d', figsize=(14,8))
#Get xmin and xmax
xmin = -np.nanpercentile(data['cv'].values,99)*0.2
xmax = np.nanpercentile(data['cv'].values,99)*1.5
# Split design file by treatment group and plot density plot
for name, group in palette.design.groupby(palette.combName):
dist.plotDensityDF(data=data["cv_"+name],ax=fh.ax[0],colors=palette.ugColors[name],
lb="{0}".format(name))
# Plot legend
fh.makeLegendLabel(ax=fh.ax[0])
# Give format to the axis
fh.formatAxis(yTitle="Density", xlim=(xmin,xmax), ylim="ignore",
figTitle="Density Plot of Coefficients of Variation by {0}".format(palette.combName))
fh.shrink()
fh.addToPdf(pdfPages=pdf)
def main(args):
""" Function to input all the arguments"""
# Checking if levels
if args.levels and args.group:
levels = [args.group]+args.levels
elif args.group and not args.levels:
levels = [args.group]
else:
levels = []
logger.info("Groups used to color by: {0}".format(",".join(levels)))
# Import data
dat = wideToDesign(args.input, args.design, args.uniqID, group=args.group,
anno=args.levels, logger=logger)
# Remove groups with just one element
dat.removeSingle()
# Cleaning from missing data
dat.dropMissing()
# Treat everything as float and round it to 3 digits
dat.wide = dat.wide.applymap(lambda x: round(x,3))
# Get colors
palette.getColors(dat.design,levels)
# Use group separation or not depending on user input
CV,CVcutoff = calculateCV(data=dat.wide, design=palette.design,
cutoff=args.CVcutoff, levels=palette.combName)
# Plot CVplots for each group and a distribution plot for all groups together
logger.info("Plotting Data")
with PdfPages(args.figure) as pdf:
plotCVplots (data=CV, cutoff=CVcutoff, palette=palette, pdf=pdf)
plotDistributions(data=CV, cutoff=CVcutoff, palette=palette, pdf=pdf)
# Create flag file instance and output flags by group
logger.info("Creatting Flags")
flag = Flags(index=CV['cv'].index)
for name, group in palette.design.groupby(palette.combName):
flag.addColumn(column="flag_feature_big_CV_{0}".format(name),
mask=((CV['cv_'+name] > CVcutoff[name]) | CV['cv_'+name].isnull()))
flag.df_flags.to_csv(args.flag, sep='\t')
logger.info("Script Complete!")
if __name__ == '__main__':
args=getOptions(myopts=True)
logger=logging.getLogger()
sl.setLogger(logger)
logger.info("Importing data with following parameters: "\
"\n\tWide: {0}"\
"\n\tDesign: {1}"\
"\n\tUnique ID: {2}"\
"\n\tGroup: {3}".format(args.input, args.design, args.uniqID,
args.group))
# Etablish a color palette for data and cutoffs
palette = colorHandler(pal=args.palette, col=args.color)
logger.info("Using {0} color scheme from {1} palette".format(args.color,
args.palette))
main(args)
|
{
"content_hash": "6a8a1ea440125781a4246efb0f2f5def",
"timestamp": "",
"source": "github",
"line_count": 273,
"max_line_length": 98,
"avg_line_length": 40.413919413919416,
"alnum_prop": 0.6317411402157165,
"repo_name": "secimTools/SECIMTools",
"id": "639ee9e33dffbcfd1ffb12f4b6eab7ae89934e75",
"size": "11469",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/scripts/coefficient_variation_flags.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "684170"
},
{
"name": "Python",
"bytes": "579384"
},
{
"name": "R",
"bytes": "13698"
},
{
"name": "Shell",
"bytes": "81657"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import json
import logging
from urllib import urlencode
from django.core.urlresolvers import reverse
from django.http.response import JsonResponse, Http404
from django.shortcuts import render, redirect
from django.template import loader
from django.utils import timezone
from django.views.generic.base import TemplateView
from trello_reporter.authentication.models import KeyVal
from trello_reporter.charting import forms
from trello_reporter.charting.constants import CUMULATIVE_FLOW_INITIAL_WORKFLOW, COMPLETED_COLUMNS, \
SELECTED_COLUMNS_DESCRIPTION, SPRINT_COMMITMENT_DESCRIPTION, DATA_SYNCHRONIZATION_DESCRIPTION, \
SPRINT_CALCULATION_DESCRIPTION, BURNDOWN_CHART_DESCRIPTION, CONTROL_CHART_DESCRIPTION, \
VELOCITY_CHART_DESCRIPTION, CUMULATIVE_FLOW_CHART_DESCRIPTION
from trello_reporter.charting.models import Board, CardAction, List, Card, Sprint, ListStat
from trello_reporter.charting.processing import ChartExporter, ControlChart
from trello_reporter.charting.templatetags.card import display_card
from trello_reporter.harvesting.models import CardActionEvent
logger = logging.getLogger(__name__)
# local constants
CONTROL_INITIAL_WORKFLOW = [["Next"], ["Complete"]]
def index(request):
logger.debug("display index")
boards = Board.list_boards(request.user, request.COOKIES["token"])
return render(request, "index.html", {
"boards": boards,
"breadcrumbs": [Breadcrumbs.text("Boards")]
})
class Breadcrumbs(object):
@classmethod
def text(cls, text):
return {"text": text}
@classmethod
def url(cls, url, text):
t = {
"url": url,
"text": text
}
return t
@classmethod
def boards_index(cls):
return cls.url(reverse("index"), "Boards")
@classmethod
def board_detail(cls, board):
return [
cls.boards_index(),
Breadcrumbs.url(reverse("board-detail", args=(board.id, )), board.name)
]
class BaseView(TemplateView):
view_name = None # for javascript
def humanize_form_errors(form_list=None, formsets=None):
""" return html with errors in forms; should be piped into notification widget """
texts = []
for form in form_list:
if form and form.errors:
form_errors_text = form.errors.as_text()
logger.info("form errors: %s", form_errors_text)
texts.append(form_errors_text)
if formsets:
for formset in formsets:
nfe = formset.non_form_errors()
if nfe:
nfe_text = nfe.as_text()
logger.info("non formset errors: %s", nfe_text)
texts.append(nfe_text)
for fe in formset.errors:
if fe:
formset_form_error_text = fe.as_text()
logger.info("formset, form error: %s", formset_form_error_text)
texts.append(formset_form_error_text)
return "<br>".join(texts)
class ChartView(BaseView):
chart_name = None
chart_data_url = None
form_class = None
view_name = "chart"
def __init__(self, **kwargs):
super(ChartView, self).__init__(**kwargs)
# initial data populated in the form
self.initial_form_data = {}
# data from request.POST
self.form_data = None
self.formset_data = None
self.form = None
def get_context_data(self, **kwargs):
context = super(ChartView, self).get_context_data(**kwargs)
context["view_name"] = self.view_name # django uses view to link self
context["chart_name"] = self.chart_name
context["chart_data_url"] = self.chart_data_url
self.form = self.form_class(data=self.form_data, initial=self.initial_form_data)
context["form"] = self.form
return context
@staticmethod
def respond_json_form_errors(form_list, formset=None):
return JsonResponse({"error": "Form is not valid: " +
humanize_form_errors(form_list, formsets=[formset])})
class ControlChartBase(ChartView):
""" common code for data and html """
chart_name = "control"
form_class = forms.ControlChartForm
def get_context_data(self, board_id, **kwargs):
board = Board.objects.by_id(board_id)
sprint = Sprint.objects.latest_for_board(board)
self.initial_form_data["sprint"] = sprint
self.initial_form_data["count"] = 1
self.initial_form_data["time_type"] = "d"
context = super(ControlChartBase, self).get_context_data(**kwargs)
self.form.set_sprint_choices(Sprint.objects.for_board_by_end_date(board))
lis = List.objects.get_all_listnames_for_board(board)
formset = forms.get_workflow_formset(zip(lis, lis), CONTROL_INITIAL_WORKFLOW,
form_class=forms.MultiWorkflowMixin,
data=self.formset_data)
context["board"] = board
context["formset"] = formset
context["latest_sprint"] = sprint
return context
class ControlChartView(ControlChartBase):
template_name = "chart/control_chart.html"
def get_context_data(self, board_id, **kwargs):
logger.debug("display control chart")
self.chart_data_url = reverse("control-chart-data", args=(board_id, ))
context = super(ControlChartView, self).get_context_data(board_id, **kwargs)
context["breadcrumbs"] = Breadcrumbs.board_detail(context["board"]) + \
[Breadcrumbs.text("Control Chart")]
context["control_chart_description"] = CONTROL_CHART_DESCRIPTION
return context
class ControlChartDataView(ControlChartBase):
def post(self, request, board_id, *args, **kwargs):
self.form_data = request.POST
self.formset_data = request.POST
context = super(ControlChartDataView, self).get_context_data(board_id, **kwargs)
form, formset = context["form"], context["formset"]
if not (form.is_valid() and formset.is_valid()):
return self.respond_json_form_errors([form], formset=formset)
chart = ControlChart(
context["board"], formset.workflow, form.cleaned_data["beginning"],
form.cleaned_data["end"])
data = chart.chart_data
html = loader.render_to_string("chunks/control_chart_table.html",
context=chart.render_stats())
return JsonResponse({"data": data, "html": html})
class BurndownChartBase(ChartView):
chart_name = "burndown"
form_class = forms.BurndownChartForm
def get_context_data(self, board_id, **kwargs):
board = Board.objects.by_id(board_id)
sprint = Sprint.objects.latest_for_board(board)
self.initial_form_data["sprint"] = sprint
context = super(BurndownChartBase, self).get_context_data(**kwargs)
self.form.set_sprint_choices(Sprint.objects.for_board_by_end_date(board))
lis = List.objects.get_all_listnames_for_board(board)
self.commitment_cols = KeyVal.objects.sprint_commitment_columns(board).value["columns"]
com_form = forms.ListsSelectorForm(
self.commitment_cols,
lis,
data=self.form_data,
)
context["board"] = board
context["com_form"] = com_form
context["latest_sprint"] = sprint
return context
class BurndownChartView(BurndownChartBase):
template_name = "chart/burndown_chart.html"
def get_context_data(self, board_id, **kwargs):
logger.debug("display burndown chart")
self.chart_data_url = reverse("burndown-chart-data", args=(board_id, ))
context = super(BurndownChartView, self).get_context_data(board_id, **kwargs)
context["breadcrumbs"] = Breadcrumbs.board_detail(context["board"]) + \
[Breadcrumbs.text("Burndown Chart")]
context["burndown_chart_description"] = BURNDOWN_CHART_DESCRIPTION
return context
class BurndownChartDataView(BurndownChartBase):
def get(self, request, *args, **kwargs):
sprint_id = request.GET.get("sprint_id", None)
if not sprint_id:
raise Http404("Selected view of burndown chart does not exist, please specify sprint.")
# so self.commitment_cols is set
super(BurndownChartDataView, self).get_context_data(*args, **kwargs)
sprint = Sprint.objects.get(id=sprint_id)
data = ChartExporter.burndown_chart_c3(
sprint.board, sprint.start_dt,
sprint.end_dt, self.commitment_cols)
return JsonResponse({"data": data})
def post(self, request, board_id, *args, **kwargs):
logger.debug("get data for burndown chart")
self.form_data = request.POST
context = super(BurndownChartDataView, self).get_context_data(board_id, **kwargs)
form, com_form = context["form"], context["com_form"]
if not (form.is_valid() and com_form.is_valid()):
return self.respond_json_form_errors(form_list=(form, com_form))
data = ChartExporter.burndown_chart_c3(
context["board"], form.cleaned_data["beginning"],
form.cleaned_data["end"], com_form.workflow)
return JsonResponse({"data": data})
class CumulativeFlowChartBase(ChartView):
chart_name = "cumulative_flow"
form_class = forms.CumulativeFlowChartForm
def get_context_data(self, board_id, **kwargs):
board = Board.objects.by_id(board_id)
today = timezone.now().date()
self.initial_form_data["from_dt"] = today - datetime.timedelta(days=30)
self.initial_form_data["to_dt"] = today
self.initial_form_data["time_type"] = "d"
self.initial_form_data["count"] = 1
context = super(CumulativeFlowChartBase, self).get_context_data(**kwargs)
self.form.set_sprint_choices(Sprint.objects.for_board_by_end_date(board))
lis = List.objects.get_all_listnames_for_board(board)
context["all_lists"] = lis
formset = forms.get_workflow_formset([("", "")] + zip(lis, lis),
CUMULATIVE_FLOW_INITIAL_WORKFLOW,
data=self.formset_data)
context["board"] = board
context["formset"] = formset
return context
class CumulativeFlowChartView(CumulativeFlowChartBase):
template_name = "chart/cumulative_flow_chart.html"
def get_context_data(self, board_id, **kwargs):
logger.debug("display cumulative flow chart")
self.chart_data_url = reverse("cumulative-flow-chart-data", args=(board_id, ))
context = super(CumulativeFlowChartView, self).get_context_data(board_id, **kwargs)
context["breadcrumbs"] = Breadcrumbs.board_detail(context["board"]) + \
[Breadcrumbs.text("Cumulative flow chart")]
context["cumulative_flow_chart_description"] = CUMULATIVE_FLOW_CHART_DESCRIPTION
return context
class CumulativeFlowChartDataView(CumulativeFlowChartBase):
def post(self, request, board_id, *args, **kwargs):
logger.debug("get data for cumulative flow chart")
self.form_data = request.POST
self.formset_data = request.POST
context = super(CumulativeFlowChartDataView, self).get_context_data(board_id, **kwargs)
form, formset = context["form"], context["formset"]
if not (form.is_valid() and formset.is_valid()):
return self.respond_json_form_errors([form], formset=formset)
order = formset.workflow
data = ChartExporter.cumulative_chart_c3(
context["board"],
order,
form.cleaned_data["beginning"], form.cleaned_data["end"],
form.cleaned_data["delta"],
form.cleaned_data["cards_or_sp"]
)
# c3 wants reversed order
return JsonResponse({"data": data, "order": list(reversed(order)),
"all_lists": context["all_lists"]})
class VelocityChartBase(ChartView):
chart_name = "velocity"
form_class = forms.VelocityChartForm
def get_context_data(self, board_id, **kwargs):
board = Board.objects.by_id(board_id)
today = timezone.now().date()
self.initial_form_data["from_dt"] = today - datetime.timedelta(days=180)
self.initial_form_data["to_dt"] = today
context = super(VelocityChartBase, self).get_context_data(**kwargs)
context["board"] = board
return context
def get_chart_data(self, context):
if self.form.is_bound:
last_n = self.form.cleaned_data["last_n"]
else:
last_n = self.form.fields["last_n"].initial
sprints = Sprint.objects.for_board_last_n(context["board"], last_n)
cc = KeyVal.objects.sprint_commitment_columns(context["board"]).value["columns"]
return ChartExporter.velocity_chart_c3(sprints, cc)
class VelocityChartView(VelocityChartBase):
template_name = "chart/velocity_chart.html"
def get_context_data(self, board_id, **kwargs):
logger.debug("display velocity chart")
self.chart_data_url = reverse("velocity-chart-data", args=(board_id, ))
context = super(VelocityChartView, self).get_context_data(board_id, **kwargs)
context["breadcrumbs"] = Breadcrumbs.board_detail(context["board"]) + \
[Breadcrumbs.text("Velocity chart")]
context["sprint_data"] = self.get_chart_data(context)
context["velocity_chart_description"] = VELOCITY_CHART_DESCRIPTION
return context
class VelocityChartDataView(VelocityChartBase):
def post(self, request, board_id, *args, **kwargs):
logger.debug("get data for velocity chart")
self.form_data = request.POST
context = super(VelocityChartDataView, self).get_context_data(board_id, **kwargs)
form = context["form"]
if not form.is_valid():
return self.respond_json_form_errors([form])
data = self.get_chart_data(context)
return JsonResponse({"data": data})
class ListDetailBase(ChartView):
chart_name = "list_history"
form_class = forms.ListDetailForm
def get_context_data(self, list_id, **kwargs):
li = List.objects.get(id=list_id)
today = timezone.now().date()
self.initial_form_data["from_dt"] = today - datetime.timedelta(days=60)
self.initial_form_data["to_dt"] = today
context = super(ListDetailBase, self).get_context_data(**kwargs)
context["list"] = li
return context
class ListDetailView(ListDetailBase):
template_name = "list_detail.html"
def get_context_data(self, list_id, **kwargs):
logger.debug("list detail: %s", list_id)
self.chart_data_url = reverse("list-history-chart-data", args=(list_id, ))
context = super(ListDetailView, self).get_context_data(list_id, **kwargs)
context["breadcrumbs"] = Breadcrumbs.board_detail(context["list"].latest_action.board) + \
[Breadcrumbs.text("Column \"%s\"" % context["list"].name)]
context["list_stats"] = ListStat.objects.for_list_in_range(
context["list"], self.initial_form_data["from_dt"], self.initial_form_data["to_dt"])
return context
class ListDetailDataView(ListDetailBase):
def post(self, request, list_id, *args, **kwargs):
logger.debug("get data for list history chart: %s", list_id)
self.form_data = request.POST
context = super(ListDetailDataView, self).get_context_data(list_id, **kwargs)
form = context["form"]
if not form.is_valid():
return self.respond_json_form_errors([form])
data = ChartExporter.list_history_chart_c3(context["list"],
form.cleaned_data["from_dt"],
form.cleaned_data["to_dt"])
return JsonResponse({"data": data})
def board_detail(request, board_id):
board = Board.objects.by_id(board_id)
logger.debug("board detail %s", board)
kv_displ_cols = KeyVal.objects.displayed_cols_in_board_detail(request.user, board)
kv_com = KeyVal.objects.sprint_commitment_columns(board)
if request.method == "POST":
form_data = request.POST
else:
form_data = None
lis = List.objects.get_all_listnames_for_board(board)
columns_form = forms.ListsSelectorForm(
kv_displ_cols.value["columns"],
lis,
data=form_data,
prefix="col"
)
commitment_form = forms.ListsSelectorForm(
kv_com.value["columns"],
lis,
data=form_data,
prefix="com"
)
if request.method == "POST":
if commitment_form.is_valid() and columns_form.is_valid():
kv_displ_cols.value["columns"] = columns_form.workflow
kv_displ_cols.save()
kv_com.value["columns"] = commitment_form.workflow
kv_com.save()
else:
logger.warning("formsets are not valid: %s %s", commitment_form, columns_form)
# TODO: propagate to client
lists = List.objects.filter_lists_for_board(board, f=kv_displ_cols.value["columns"])
lists = sorted(lists, key=lambda x: x.name)
sprints = Sprint.objects.filter(board__id=board_id).order_by("start_dt")
context = {
"board": board,
"lists": lists,
"sprints": sprints,
"columns_form": columns_form,
"commitment_form": commitment_form,
"form_post_url": reverse("board-detail", args=(board_id, )),
"errors": KeyVal.objects.board_messages(board).value["messages"],
"breadcrumbs": [
Breadcrumbs.url(reverse("index"), "Boards"),
Breadcrumbs.text(board.name)
],
"selected_columns_description": SELECTED_COLUMNS_DESCRIPTION,
"sprint_commitment_description": SPRINT_COMMITMENT_DESCRIPTION,
"data_synchronization_description": DATA_SYNCHRONIZATION_DESCRIPTION,
"sprint_calculation_description": SPRINT_CALCULATION_DESCRIPTION,
}
return render(request, "board_detail.html", context)
def board_refresh(request, board_id):
board = Board.objects.by_id(board_id)
logger.debug("refresh board %s", board)
board.ensure_actions(request.COOKIES["token"])
return redirect('board-detail', board_id=board_id)
def sprint_create(request, board_id):
board = Board.objects.by_id(board_id)
logger.debug("sprint create for board: %s", board)
if request.method == "POST":
form = forms.SprintCreateForm(data=request.POST)
form.instance.board = board
logger.debug("user's timezone = %s", request.user.timezone)
if form.is_valid():
sprint = form.save()
logger.debug("creating new sprint: %s", sprint)
Sprint.set_completed_list(board)
return redirect('sprint-detail', sprint_id=sprint.id)
else:
form = forms.SprintCreateForm()
context = {
"form": form,
"post_url": reverse("sprint-create", args=(board_id, )),
"breadcrumbs": Breadcrumbs.board_detail(board) +
[Breadcrumbs.text("Create sprint")]
}
return render(request, "sprint_create.html", context)
def sprint_detail(request, sprint_id):
sprint = Sprint.objects.get(id=sprint_id)
logger.debug("sprint detail: %s", sprint)
# edit sprint as soon as possible
if request.method == "POST":
sprint_edit_form = forms.SprintEditForm(data=request.POST, instance=sprint)
logger.debug("user's timezone = %s", request.user.timezone)
if sprint_edit_form.is_valid():
sprint = sprint_edit_form.save()
logger.debug("saving updated sprint: %s", sprint)
else:
sprint_edit_form = forms.SprintEditForm(instance=sprint)
sprint_cards = Card.objects.sprint_cards_with_latest_actions(sprint)
sprint_card_ids = [x.id for x in sprint_cards]
unfinished_cards = []
if sprint.completed_list is not None:
# don't supply date, we want latest stuff
completed_card_actions = CardAction.objects.safe_card_actions_on_list_in(
sprint.board,
sprint.completed_list,
)
completed_card_ids = [x.card_id for x in completed_card_actions]
unfinished_cards = [card for card in sprint_cards if card.id not in completed_card_ids]
else:
completed_card_actions = CardAction.objects.card_actions_on_list_names_in(
sprint.board,
COMPLETED_COLUMNS
)
current_sprint_cas = CardAction.objects.card_actions_on_list_names_in(
sprint.board, ["Next", "In Progress", "Complete"], min(timezone.now(), sprint.end_dt))
added_after_sprint_card_actions = [ca for ca in current_sprint_cas if ca.card_id not in sprint_card_ids]
chart_url = reverse("burndown-chart-data", args=(sprint.board.id, ), )
chart_url += "?" + urlencode({"sprint_id": sprint.id})
context = {
"form": sprint_edit_form,
"post_url": reverse("sprint-detail", args=(sprint_id, )),
"sprint": sprint,
"sprint_cards": sprint_cards,
"completed_card_actions": completed_card_actions,
"unfinished_cards": unfinished_cards,
"after_sprint_cas": added_after_sprint_card_actions,
"view_name": "chart_without_form",
"chart_name": "burndown",
"chart_data_url": chart_url,
"submit_input_type": "submit",
"breadcrumbs": Breadcrumbs.board_detail(sprint.board) +
[Breadcrumbs.text("Sprint \"%s\"" % sprint.name)]
}
return render(request, "sprint_detail.html", context)
def card_detail(request, card_id):
card = Card.objects.get(id=card_id)
logger.debug("card detail: %s", card)
# (previous_action, action)
action_list = list(card.actions.order_by("date"))
actions = zip([None] + action_list[:-1], action_list)
events = [json.dumps(x.data, indent=2)
for x in CardActionEvent.objects.for_card_by_date(card.trello_id)]
context = {
"card": card,
"actions": actions,
"events": events,
"breadcrumbs": Breadcrumbs.board_detail(action_list[-1].board) +
[Breadcrumbs.text("Card \"%s\"" % display_card(action_list[-1]))]
}
return render(request, "card_detail.html", context)
def stalled_cards(request, list_id):
li = List.objects.get(id=list_id)
board = li.latest_action.board
card_actions = CardAction.objects.safe_card_actions_on_list_in(board, li)
card_actions = sorted(card_actions, key=lambda x: x.date)
context = {
"list": li,
"card_actions": card_actions,
"breadcrumbs": Breadcrumbs.board_detail(board) +
[Breadcrumbs.text("Stalled cards on \"%s\"" % li.name)]
}
return render(request, "stalled_cards.html", context)
# API
def api_get_card(request, card_id):
card = Card.objects.get(id=card_id)
logger.debug("api: get card %s", card)
response = {
"id": card.id,
"name": card.name,
"url": request.build_absolute_uri(reverse('card-detail', args=(card_id, ))),
}
return JsonResponse(response)
|
{
"content_hash": "939c34377b424f58c5336cccb7379afb",
"timestamp": "",
"source": "github",
"line_count": 623,
"max_line_length": 108,
"avg_line_length": 37.45104333868379,
"alnum_prop": 0.6299074232813303,
"repo_name": "TomasTomecek/trello-reporter",
"id": "d128179e988907f6581ee933717fd8b63f70b5f2",
"size": "23332",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trello_reporter/charting/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "419"
},
{
"name": "HTML",
"bytes": "30644"
},
{
"name": "JavaScript",
"bytes": "11694"
},
{
"name": "Makefile",
"bytes": "618"
},
{
"name": "Python",
"bytes": "129269"
},
{
"name": "Shell",
"bytes": "483"
}
],
"symlink_target": ""
}
|
"""Helpers to traverse the Dataset dependency structure."""
from six.moves import queue as Queue # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
OP_TYPES_ALLOWLIST = ["DummyIterationCounter"]
# We allowlist all ops that produce variant tensors as output. This is a bit
# of overkill but the other dataset _inputs() traversal strategies can't
# cover the case of function inputs that capture dataset variants.
TENSOR_TYPES_ALLOWLIST = [dtypes.variant]
def _traverse(dataset, op_filter_fn):
"""Traverse a dataset graph, returning nodes matching `op_filter_fn`."""
result = []
bfs_q = Queue.Queue()
bfs_q.put(dataset._variant_tensor.op) # pylint: disable=protected-access
visited = []
while not bfs_q.empty():
op = bfs_q.get()
visited.append(op)
if op_filter_fn(op):
result.append(op)
for i in op.inputs:
input_op = i.op
if input_op not in visited:
bfs_q.put(input_op)
return result
def obtain_capture_by_value_ops(dataset):
"""Given an input dataset, finds all allowlisted ops used for construction.
Allowlisted ops are stateful ops which are known to be safe to capture by
value.
Args:
dataset: Dataset to find allowlisted stateful ops for.
Returns:
A list of variant_tensor producing dataset ops used to construct this
dataset.
"""
def capture_by_value(op):
return (op.outputs[0].dtype in TENSOR_TYPES_ALLOWLIST or
op.type in OP_TYPES_ALLOWLIST)
return _traverse(dataset, capture_by_value)
def obtain_all_variant_tensor_ops(dataset):
"""Given an input dataset, finds all dataset ops used for construction.
A series of transformations would have created this dataset with each
transformation including zero or more Dataset ops, each producing a dataset
variant tensor. This method outputs all of them.
Args:
dataset: Dataset to find variant tensors for.
Returns:
A list of variant_tensor producing dataset ops used to construct this
dataset.
"""
return _traverse(dataset, lambda op: op.outputs[0].dtype == dtypes.variant)
|
{
"content_hash": "969c867cdd9579cecf8b3b50c8d1b81c",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 77,
"avg_line_length": 31.388059701492537,
"alnum_prop": 0.7213504517356157,
"repo_name": "Intel-Corporation/tensorflow",
"id": "41987bd2e348f480cfaeaa2167c037c98468d2fb",
"size": "2792",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/util/traverse.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "183416"
},
{
"name": "C++",
"bytes": "24549804"
},
{
"name": "CMake",
"bytes": "160888"
},
{
"name": "Go",
"bytes": "849081"
},
{
"name": "HTML",
"bytes": "681293"
},
{
"name": "Java",
"bytes": "307123"
},
{
"name": "Jupyter Notebook",
"bytes": "1833659"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37393"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64142"
},
{
"name": "Protocol Buffer",
"bytes": "218430"
},
{
"name": "Python",
"bytes": "21875003"
},
{
"name": "Shell",
"bytes": "337846"
},
{
"name": "TypeScript",
"bytes": "849555"
}
],
"symlink_target": ""
}
|
import h5py
import numpy as np
import torch
from collections import defaultdict, namedtuple, Mapping
from glob import glob
from torch.utils.data import Dataset
import os
class Invertible:
def inv(self, y):
raise NotImplemented('Subclasses of Invertible must implement an inv method')
class DataTransform:
def initialize(self, dataset):
pass
def __repr__(self):
return self.__class__.__name__
class SubsampleNeurons(DataTransform):
def __init__(self, datakey, idx, axis):
super().__init__()
self.idx = idx
self.datakey = datakey
self._subsamp = None
self.axis = axis
def initialize(self, dataset):
self._subsamp = []
for d in dataset.data_keys:
if d == self.datakey:
self._subsamp.append([slice(None) for _ in range(self.axis - 1)] + [self.idx, ...])
else:
self._subsamp.append(...)
def __call__(self, item):
return tuple(it[sub] for sub, it in zip(self._subsamp, item))
class Neurons2Behavior(DataTransform):
def __init__(self, idx):
super().__init__()
self.idx = idx
def __call__(self, item):
return tuple((item[0], np.hstack((item[1], item[3][~self.idx])), item[2], item[3][self.idx]))
class ToTensor(DataTransform):
def __call__(self, item):
return tuple(torch.from_numpy(it) for it in item)
class H5Dataset(Dataset):
def __init__(self, filename, *data_keys, info_name=None, transform=None):
self.fid = h5py.File(filename, 'r')
m = None
for key in data_keys:
assert key in self.fid, 'Could not find {} in file'.format(key)
if m is None:
m = len(self.fid[key])
else:
assert m == len(self.fid[key]), 'Length of datasets do not match'
self._len = m
self.data_keys = data_keys
if info_name is not None:
self.info = self.fid[info_name]
if transform is None:
self.transform = Chain(TransformFromFuncs(), ToTensor())
else:
self.transform = transform
self.transform.initialize(self)
def __getitem__(self, item):
return self.transform(tuple(self.fid[d][item] for d in self.data_keys))
def __iter__(self):
yield from map(self.__getitem__, range(len(self)))
def __len__(self):
return self._len
def __repr__(self):
return '\n'.join(['Tensor {}: {} '.format(key, self.fid[key].shape)
for key in self.data_keys] + ['Transforms: ' + repr(self.transform)])
class TransformDataset(Dataset):
def transform(self, x, exclude=None):
for tr in self.transforms:
if exclude is None or not isinstance(tr, exclude):
x = tr(x)
return x
def invert(self, x, exclude=None):
for tr in reversed(filter(lambda tr: not isinstance(tr, exclude), self.transforms)):
if not isinstance(tr, Invertible):
raise TypeError('Cannot invert', tr.__class__.__name__)
else:
x = tr.inv(x)
return x
def __iter__(self):
yield from map(self.__getitem__, range(len(self)))
def __len__(self):
return self._len
def __repr__(self):
return '{} m={}:\n\t({})'.format(self.__class__.__name__, len(self), ', '.join(self.data_groups)) \
+ '\n\t[Transforms: ' + '->'.join([repr(tr) for tr in self.transforms]) + ']'
class NumpyZSet(TransformDataset):
def __init__(self, cachedir, *data_groups, transforms=None):
self.cachedir = cachedir
tmp = np.load(os.path.join(cachedir, '0.npz'))
for key in data_groups:
assert key in tmp, 'Could not find {} in file'.format(key)
self._len = len(glob('{}/[0-9]*.npz'.format(self.cachedir)))
self.data_groups = data_groups
self.transforms = transforms or []
self.data_point = namedtuple('DataPoint', data_groups)
def __getitem__(self, item):
dat = np.load(os.path.join(self.cachedir, '{}.npz'.format(item)))
x = self.data_point(*(dat[g] for g in self.data_groups))
for tr in self.transforms:
x = tr(x)
return x
def __getattr__(self, item):
dat = np.load(os.path.join(self.cachedir, 'meta.npz'))
if item in dat:
item = dat[item]
if item.dtype.char == 'S': # convert bytes to univcode
item = item.astype(str)
return item
else:
raise AttributeError('Item {} not found in {}'.format(item, self.__class__.__name__))
class H5SequenceSet(TransformDataset):
def __init__(self, filename, *data_groups, transforms=None):
self._fid = h5py.File(filename, 'r')
m = None
for key in data_groups:
assert key in self._fid, 'Could not find {} in file'.format(key)
l = len(self._fid[key])
if m is not None and l != m:
raise ValueError('groups have different length')
m = l
self._len = m
self.data_groups = data_groups
self.transforms = transforms or []
self.data_point = namedtuple('DataPoint', data_groups)
def __getitem__(self, item):
x = self.data_point(*(np.array(self._fid[g][str(item)]) for g in self.data_groups))
for tr in self.transforms:
x = tr(x)
return x
def __getattr__(self, item):
if item in self._fid:
item = self._fid[item]
if isinstance(item, h5py._hl.dataset.Dataset):
item = item.value
if item.dtype.char == 'S': # convert bytes to univcode
item = item.astype(str)
return item
return item
else:
raise AttributeError('Item {} not found in {}'.format(item, self.__class__.__name__))
|
{
"content_hash": "e6b9e16f0ffdf545d59352f6ddb79a7a",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 107,
"avg_line_length": 31.6968085106383,
"alnum_prop": 0.5563013928511495,
"repo_name": "atlab/attorch",
"id": "74b36936ed45d2e5ff045c441ba427fa8139a051",
"size": "5959",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "attorch/dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "79587"
}
],
"symlink_target": ""
}
|
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetConnectApp(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetConnectApp Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetConnectApp, self).__init__(temboo_session, '/Library/Twilio/ConnectApps/GetConnectApp')
def new_input_set(self):
return GetConnectAppInputSet()
def _make_result_set(self, result, path):
return GetConnectAppResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetConnectAppChoreographyExecution(session, exec_id, path)
class GetConnectAppInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetConnectApp
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccountSID(self, value):
"""
Set the value of the AccountSID input for this Choreo. ((required, string) The AccountSID provided when you signed up for a Twilio account.)
"""
super(GetConnectAppInputSet, self)._set_input('AccountSID', value)
def set_AuthToken(self, value):
"""
Set the value of the AuthToken input for this Choreo. ((required, string) The authorization token provided when you signed up for a Twilio account.)
"""
super(GetConnectAppInputSet, self)._set_input('AuthToken', value)
def set_ConnectAppSID(self, value):
"""
Set the value of the ConnectAppSID input for this Choreo. ((required, string) The id of the Connect App to retrieve.)
"""
super(GetConnectAppInputSet, self)._set_input('ConnectAppSID', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) The format that the response should be in. Valid values are: json (the default) and xml.)
"""
super(GetConnectAppInputSet, self)._set_input('ResponseFormat', value)
class GetConnectAppResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetConnectApp Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from Twilio.)
"""
return self._output.get('Response', None)
class GetConnectAppChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetConnectAppResultSet(response, path)
|
{
"content_hash": "68c6409fed96c2acb9e1cb095929bd3f",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 176,
"avg_line_length": 41.67605633802817,
"alnum_prop": 0.6998986143967557,
"repo_name": "jordanemedlock/psychtruths",
"id": "818418d8bd22e779fba9d492486f42e17a0cadec",
"size": "3863",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "temboo/core/Library/Twilio/ConnectApps/GetConnectApp.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "18544"
},
{
"name": "HTML",
"bytes": "34650"
},
{
"name": "JavaScript",
"bytes": "423"
},
{
"name": "PHP",
"bytes": "1097"
},
{
"name": "Python",
"bytes": "23444578"
}
],
"symlink_target": ""
}
|
__author__ = 'nlw'
import argparse
import logging
import unittest
import re
from scigraph.api.SciGraph import SciGraph
from scigraph.renderers.TabRenderer import *
from scigraph.renderers.RawRenderer import *
from scigraph.renderers.EntityAnnotationTabRenderer import *
import wikipedia
renderer = EntityAnnotationTabRenderer()
def main():
parser = argparse.ArgumentParser(description='SciGraph-wikiparser'
'Wikipedia section parser',
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-u', '--url', type=str, default="http://scigraph-ontology-dev.monarchinitiative.org/scigraph/",
help='A base URL for SciGraph')
parser.add_argument('-t', '--to', type=str, required=False,
help='Renderer')
parser.add_argument('-p', '--page', type=str, required=True,
help='Page')
args = parser.parse_args()
sg = SciGraph(args.url)
parse_wikipage(sg, args.page)
def parse_wikipage(sg, n):
page = wikipedia.page(n)
print("#TITLE: " + page.title)
sentences = page.content.split("\n")
h2 = 'Main'
h3 = ''
for sentence in sentences:
if (sentence == ""):
continue
m = re.match("=== (.*) ===", sentence)
if m:
h3 = m.group(1)
sentence = h3
else:
m = re.match("== (.*) ==", sentence)
if m:
h2 = m.group(1)
sentence = h2
print("#HEADING: " + h2 + " . " +h3)
rs = sg.annotate(sentence)
print("#TEXT: \"" + sentence + "\"")
print("#SPANS:")
for span in rs.spans:
vals = [str(x) for x in [page.title,h2,h3,span.start, span.end, span.token.id, ",".join(span.token.terms), span.text]]
print("\t".join(vals))
if __name__ == "__main__":
main()
|
{
"content_hash": "c6f145de14b7cec175c06d618a3d1933",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 130,
"avg_line_length": 28.955882352941178,
"alnum_prop": 0.542915185373286,
"repo_name": "SciGraph/py-SciGraph",
"id": "af6e745fa3e8cae9f8a88825eba478617cb683c0",
"size": "1993",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wikipedia-miner.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "15583"
}
],
"symlink_target": ""
}
|
import re
import imp
import os.path
import sys
from datetime import datetime
from datetime import timedelta
from httplib import BadStatusLine
from HTMLParser import HTMLParseError
import logging
import urllib2
from django.conf import settings
from django.utils.importlib import import_module
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import signals as model_signals
from django.test.client import Client
from linkcheck_settings import MAX_URL_LENGTH
from linkcheck_settings import MEDIA_PREFIX
from linkcheck_settings import SITE_DOMAINS
from linkcheck_settings import EXTERNAL_REGEX_STRING
from linkcheck_settings import EXTERNAL_RECHECK_INTERVAL
logger = logging.getLogger('linkcheck')
TIMEOUT = None
if sys.version_info >= (2,6): #timeout arg of urlopen is available
TIMEOUT = 10
EXTERNAL_REGEX = re.compile(EXTERNAL_REGEX_STRING)
class HeadRequest(urllib2.Request):
def get_method(self):
return "HEAD"
class Url(models.Model):
# A URL represents a distinct URL.
# A single Url can have multiple Links associated with it
url = models.CharField(max_length=MAX_URL_LENGTH, unique=True) # See http://www.boutell.com/newfaq/misc/urllength.html
last_checked = models.DateTimeField(blank=True, null=True)
status = models.NullBooleanField()
message = models.CharField(max_length=1024, blank=True, null=True)
still_exists = models.BooleanField()
@property
def type(self):
if EXTERNAL_REGEX.match(self.url):
return 'external'
if self.url.startswith('mailto'):
return 'mailto'
elif str(self.url)=='':
return 'empty'
elif self.url.startswith('#'):
return 'anchor'
elif self.url.startswith(MEDIA_PREFIX):
return 'file'
else:
return 'unknown'
@property
def get_message(self):
if self.last_checked:
return self.message
else:
return "URL Not Yet Checked"
@property
def colour(self):
if not self.last_checked:
return 'blue'
elif self.status==True:
return 'green'
else:
return 'red'
def __unicode__(self):
return self.url
@property
def external(self):
return EXTERNAL_REGEX.match(self.url)
def url_unquoted(self):
try:
# URLs should be ascii encodable
url = self.url.encode('ascii')
except UnicodeEncodeError:
url = self.url
return urllib2.unquote(url).decode('utf8')
def check(self, check_internal=True, check_external=True, external_recheck_interval=EXTERNAL_RECHECK_INTERVAL):
from linkcheck.utils import LinkCheckHandler
external_recheck_datetime = datetime.now() - timedelta(minutes=external_recheck_interval)
self.status = False
# Remove current domain from URLs as the test client chokes when trying to test them during a page save
# They shouldn't generally exist but occasionally slip through
# If settings.SITE_DOMAINS isn't set then use settings.SITE_DOMAIN
# but also check for variants: example.org, www.example.org, test.example.org
original_url = None # used to restore the original url afterwards
if SITE_DOMAINS: #if the setting is present
internal_exceptions = SITE_DOMAINS
else: # try using SITE_DOMAIN
root_domain = settings.SITE_DOMAIN
if root_domain.startswith('www.'):
root_domain = root_domain[4:]
elif root_domain.startswith('test.'):
root_domain = root_domain[5:]
internal_exceptions = ['http://'+root_domain, 'http://www.'+root_domain, 'http://test.'+root_domain]
for ex in internal_exceptions:
if ex and self.url.startswith(ex):
original_url = self.url
self.url = self.url.replace(ex, '', 1)
if check_internal and (not self.external):
if not(self.url):
self.message = 'Empty link'
elif self.url.startswith('mailto:'):
self.status = None
self.message = 'Email link (not automatically checked)'
elif self.url.startswith('#'):
self.status = None
self.message = 'Link to within the same page (not automatically checked)'
elif self.url.startswith(MEDIA_PREFIX):
#TODO Assumes a direct mapping from media url to local filesystem path. This will break quite easily for alternate setups
if os.path.exists(settings.MEDIA_ROOT + self.url_unquoted()[len(MEDIA_PREFIX)-1:]):
self.message = 'Working file link'
self.status = True
else:
self.message = 'Missing Document'
elif getattr(self, '_internal_hash', False) and getattr(self, '_instance', None):
# This is a hash link pointing to itself
from linkcheck import parse_anchors
hash = self._internal_hash
instance = self._instance
if hash == '#': # special case, point to #
self.message = 'Working internal hash anchor'
self.status = True
else:
hash = hash[1:] #'#something' => 'something'
html_content = ''
for field in instance._linklist.html_fields:
html_content += getattr(instance, field, '')
names = parse_anchors(html_content)
if hash in names:
self.message = 'Working internal hash anchor'
self.status = True
else:
self.message = 'Broken internal hash anchor'
elif self.url.startswith('/'):
old_prepend_setting = settings.PREPEND_WWW
settings.PREPEND_WWW = False
c = Client()
c.handler = LinkCheckHandler()
response = c.get(self.url, follow=True)
if response.status_code == 200:
self.message = 'Working internal link'
self.status = True
# see if the internal link points an anchor
if self.url[-1] == '#': # special case, point to #
self.message = 'Working internal hash anchor'
elif self.url.count('#'):
anchor = self.url.split('#')[1]
from linkcheck import parse_anchors
names = parse_anchors(response.content)
if anchor in names:
self.message = 'Working internal hash anchor'
self.status = True
else:
self.message = 'Broken internal hash anchor'
self.status = False
elif (response.status_code == 302 or response.status_code == 301):
self.status = None
self.message = 'This link redirects: code %d (not automatically checked)' % (response.status_code, )
else:
self.message = 'Broken internal link'
settings.PREPEND_WWW = old_prepend_setting
else:
self.message = 'Invalid URL'
if original_url: # restore the original url before saving
self.url = original_url
self.last_checked = datetime.now()
self.save()
elif check_external and self.external:
logger.info('checking external link: %s' % self.url)
if self.last_checked and (self.last_checked > external_recheck_datetime):
return self.status
try:
# Remove URL fragment identifiers
url = self.url.rsplit('#')[0]
if self.url.count('#'):
# We have to get the content so we can check the anchors
if TIMEOUT:
response = urllib2.urlopen(url, timeout=TIMEOUT)
else:
response = urllib2.urlopen(url)
else:
# Might as well just do a HEAD request
req = HeadRequest(url, headers={'User-Agent' : "http://%s Linkchecker" % settings.SITE_DOMAIN})
try:
if TIMEOUT:
response = urllib2.urlopen(req, timeout=TIMEOUT)
else:
response = urllib2.urlopen(req)
except ValueError:
# ...except sometimes it triggers a bug in urllib2
if TIMEOUT:
response = urllib2.urlopen(url, timeout=TIMEOUT)
else:
response = urllib2.urlopen(url)
self.message = ' '.join([str(response.code), response.msg])
self.status = True
if self.url.count('#'):
anchor = self.url.split('#')[1]
from linkcheck import parse_anchors
try:
names = parse_anchors(response.read())
if anchor in names:
self.message = 'Working external hash anchor'
self.status = True
else:
self.message = 'Broken external hash anchor'
self.status = False
except:
# The external web page is mal-formatted #or maybe other parse errors like encoding
# I reckon a broken anchor on an otherwise good URL should count as a pass
self.message = "Page OK but anchor can't be checked"
self.status = True
except BadStatusLine:
self.message = "Bad Status Line"
except urllib2.HTTPError, e:
if hasattr(e, 'code') and hasattr(e, 'msg'):
self.message = ' '.join([str(e.code), e.msg])
else:
self.message = "Unknown Error"
except urllib2.URLError, e:
if hasattr(e, 'reason'):
self.message = 'Unreachable: '+str(e.reason)
elif hasattr(e, 'code') and e.code!=301:
self.message = 'Error: '+str(e.code)
else:
self.message = 'Redirect. Check manually: '+str(e.code)
except Exception, e:
self.message = 'Other Error: %s' % e
self.last_checked = datetime.now()
self.save()
return self.status
class Link(models.Model):
# A Link represents a specific URL in a specific field in a specific model
# Multiple Links can reference a single Url
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = generic.GenericForeignKey('content_type', 'object_id')
field = models.CharField(max_length=128)
url = models.ForeignKey(Url, related_name="links")
text = models.CharField(max_length=256, default='')
ignore = models.BooleanField(default=False)
@property
def display_url(self):
# when page /test/ has a anchor link to /test/#anchor, we display it
# as "#anchor" rather than "/test/#anchor"
if self.url.url.count('#'):
url_part, anchor_part = self.url.url.split('#')
absolute_url = self.content_object.get_absolute_url()
if url_part == absolute_url:
return '#' + anchor_part
return self.url.url
def link_post_delete(sender, instance, **kwargs):
try:
#url.delete() => link.delete() => link_post_delete
#in this case link.url is already deleted from db, so we need a try here.
url = instance.url
count = url.links.all().count()
if count == 0:
url.delete()
except Url.DoesNotExist:
pass
model_signals.post_delete.connect(link_post_delete, sender=Link)
#-------------------------auto discover of LinkLists-------------------------
class AlreadyRegistered(Exception):
pass
all_linklists = {}
for app in settings.INSTALLED_APPS:
try:
app_path = import_module(app).__path__
except AttributeError:
continue
try:
imp.find_module('linklists', app_path)
except ImportError:
continue
the_module = import_module("%s.linklists" % app)
try:
for k in the_module.linklists.keys():
if k in all_linklists.keys():
raise AlreadyRegistered('The key %s is already registered in all_linklists' % k)
for l in the_module.linklists.values():
for l2 in all_linklists.values():
if l.model == l2.model:
raise AlreadyRegistered('The LinkList %s is already registered in all_linklists' % l)
all_linklists.update(the_module.linklists)
except AttributeError:
pass
#add a reference to the linklist in the model. This change is for internal hash link,
#but might also be useful elsewhere in the future
for key, linklist in all_linklists.items():
setattr(linklist.model, '_linklist', linklist)
#-------------------------register listeners-------------------------
import listeners
|
{
"content_hash": "74f3d983c7c3759a5d00cae94d0e252c",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 137,
"avg_line_length": 39.19602272727273,
"alnum_prop": 0.5554105965064869,
"repo_name": "glarrain/django-linkcheck",
"id": "e50555a65ef8330806b48ba50e50367aff89e0fd",
"size": "13797",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "linkcheck/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import json
from decimal import Decimal
from unittest.mock import Mock, patch
import pytest
from django.core.exceptions import ValidationError
from django.utils import timezone
from stripe.stripe_object import StripeObject
from .....checkout.complete_checkout import complete_checkout
from .....order.actions import order_captured, order_refunded, order_voided
from .... import ChargeStatus, TransactionKind
from ....utils import price_to_minor_unit
from ..consts import (
AUTHORIZED_STATUS,
FAILED_STATUSES,
PROCESSING_STATUS,
SUCCESS_STATUS,
WEBHOOK_AUTHORIZED_EVENT,
WEBHOOK_CANCELED_EVENT,
WEBHOOK_FAILED_EVENT,
WEBHOOK_PROCESSING_EVENT,
WEBHOOK_SUCCESS_EVENT,
)
from ..webhooks import (
_finalize_checkout,
_process_payment_with_checkout,
_update_payment_with_new_transaction,
handle_authorized_payment_intent,
handle_failed_payment_intent,
handle_processing_payment_intent,
handle_refund,
handle_successful_payment_intent,
update_payment_method_details_from_intent,
)
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_successful_payment_intent_for_checkout(
_wrapped_update_payment_method,
wrapped_checkout_complete,
payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
):
payment = payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount_received"] = price_to_minor_unit(
payment.total, payment.currency
)
payment_intent["setup_future_usage"] = None
payment_intent["currency"] = payment.currency
payment_intent["status"] = SUCCESS_STATUS
payment_intent["payment_method"] = StripeObject()
handle_successful_payment_intent(payment_intent, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert wrapped_checkout_complete.called
assert payment.checkout_id is None
assert payment.order
assert payment.order.checkout_token == str(checkout_with_items.token)
transaction = payment.transactions.get(kind=TransactionKind.CAPTURE)
assert transaction.token == payment_intent.id
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
@patch("saleor.payment.gateway.refund")
def test_handle_successful_payment_intent_for_checkout_inactive_payment(
refund_mock,
wrapped_checkout_complete,
inactive_payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
):
payment = inactive_payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount_received"] = price_to_minor_unit(
payment.total, payment.currency
)
payment_intent["setup_future_usage"] = None
payment_intent["currency"] = payment.currency
payment_intent["status"] = SUCCESS_STATUS
handle_successful_payment_intent(payment_intent, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert refund_mock.called
assert not wrapped_checkout_complete.called
@patch("saleor.payment.gateway.refund")
@patch("saleor.checkout.complete_checkout._get_order_data")
def test_handle_successful_payment_intent_when_order_creation_raises_exception(
order_data_mock,
refund_mock,
payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
stripe_payment_intent,
):
order_data_mock.side_effect = ValidationError("Test error")
payment = payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.CAPTURE,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
handle_successful_payment_intent(
stripe_payment_intent, plugin.config, channel_USD.slug
)
payment.refresh_from_db()
assert not payment.order
assert refund_mock.called
@pytest.mark.parametrize(
["metadata", "called"],
[({f"key{i}": f"value{i}" for i in range(5)}, True), ({}, False)],
)
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_successful_payment_intent_with_metadata(
wrapped_update_payment_method,
_wrapped_checkout_complete,
payment_stripe_for_order,
stripe_plugin,
channel_USD,
metadata,
called,
):
# given
payment = payment_stripe_for_order
current_metadata = {"currentkey": "currentvalue"}
payment.metadata = metadata
payment.charge_status = ChargeStatus.PENDING
payment.save()
plugin = stripe_plugin()
payment_intent = StripeObject(id="token", last_response={})
payment_intent["amount_received"] = price_to_minor_unit(
payment.total, payment.currency
)
payment_intent["metadata"] = current_metadata
payment_intent["charges"] = {"data": [{"payment_method_details": {"type": "card"}}]}
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["payment_method"] = StripeObject()
# when
handle_successful_payment_intent(payment_intent, plugin.config, channel_USD.slug)
# then
if not called:
assert wrapped_update_payment_method.call_count == 0
else:
wrapped_update_payment_method.assert_called_once_with(
plugin.config.connection_params["secret_api_key"],
payment_intent.payment_method,
metadata,
)
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_successful_payment_intent_for_order(
_wrapped_update_payment_method,
wrapped_checkout_complete,
payment_stripe_for_order,
stripe_plugin,
channel_USD,
):
payment = payment_stripe_for_order
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["capture_method"] = "automatic"
payment_intent["payment_method"] = StripeObject()
handle_successful_payment_intent(payment_intent, plugin.config, channel_USD.slug)
assert wrapped_checkout_complete.called is False
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_successful_payment_intent_for_order_with_auth_payment(
_wrapped_update_payment_method,
wrapped_checkout_complete,
payment_stripe_for_order,
stripe_plugin,
channel_USD,
):
payment = payment_stripe_for_order
plugin = stripe_plugin()
payment_intent = StripeObject(id="token", last_response={})
payment_intent["amount_received"] = price_to_minor_unit(
payment.total, payment.currency
)
payment_intent["currency"] = payment.currency
payment_intent["setup_future_usage"] = None
payment_intent["status"] = SUCCESS_STATUS
payment_intent["payment_method"] = StripeObject()
handle_successful_payment_intent(payment_intent, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert payment.is_active
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.captured_amount == payment.total
assert payment.transactions.filter(kind=TransactionKind.CAPTURE).exists()
assert wrapped_checkout_complete.called is False
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_successful_payment_intent_for_order_with_pending_payment(
_wrapped_update_payment_method,
wrapped_checkout_complete,
payment_stripe_for_order,
stripe_plugin,
channel_USD,
):
payment = payment_stripe_for_order
transaction = payment.transactions.first()
transaction.kind = TransactionKind.PENDING
transaction.save()
plugin = stripe_plugin()
payment_intent = StripeObject(id="token", last_response={})
payment_intent["amount_received"] = price_to_minor_unit(
payment.total, payment.currency
)
payment_intent["currency"] = payment.currency
payment_intent["setup_future_usage"] = None
payment_intent["status"] = SUCCESS_STATUS
payment_intent["payment_method"] = StripeObject()
handle_successful_payment_intent(payment_intent, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert payment.is_active
assert payment.charge_status == ChargeStatus.FULLY_CHARGED
assert payment.captured_amount == payment.total
assert payment.transactions.filter(kind=TransactionKind.CAPTURE).exists()
assert wrapped_checkout_complete.called is False
@pytest.mark.parametrize("called", [True, False])
@patch(
"saleor.payment.gateways.stripe.webhooks._process_payment_with_checkout",
wraps=_process_payment_with_checkout,
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_successful_payment_intent_different_checkout_channel_slug(
_wrapped_update_payment_method,
wrapped_process_payment_with_checkout,
payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
channel_PLN,
called,
):
# given
channel = channel_USD if called else channel_PLN
payment = payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount_received"] = price_to_minor_unit(
payment.total, payment.currency
)
payment_intent["setup_future_usage"] = None
payment_intent["currency"] = payment.currency
payment_intent["status"] = SUCCESS_STATUS
payment_intent["payment_method"] = StripeObject()
# when
handle_successful_payment_intent(payment_intent, plugin.config, channel.slug)
# then
assert wrapped_process_payment_with_checkout.called == called
@pytest.mark.parametrize("called", [True, False])
@patch("saleor.payment.gateways.stripe.webhooks.order_captured", wraps=order_captured)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_successful_payment_intent_different_order_channel_slug(
_wrapped_update_payment_method,
wrapped_order_captured,
payment_stripe_for_order,
stripe_plugin,
channel_USD,
channel_PLN,
called,
):
# given
channel = channel_USD if called else channel_PLN
payment = payment_stripe_for_order
plugin = stripe_plugin()
payment_intent = StripeObject(id="token", last_response={})
payment_intent["amount_received"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["capture_method"] = "automatic"
payment_intent["setup_future_usage"] = None
payment_intent["payment_method"] = StripeObject()
# when
handle_successful_payment_intent(payment_intent, plugin.config, channel.slug)
# then
assert wrapped_order_captured.called == called
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_authorized_payment_intent_for_checkout(
_wrapped_update_payment_method,
wrapped_checkout_complete,
payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
):
payment = payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = price_to_minor_unit(payment.total, payment.currency)
payment_intent["currency"] = payment.currency
payment_intent["status"] = AUTHORIZED_STATUS
payment_intent["payment_method"] = StripeObject()
handle_authorized_payment_intent(payment_intent, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert wrapped_checkout_complete.called
assert payment.checkout_id is None
assert not payment.cc_brand
assert not payment.cc_last_digits
assert not payment.cc_exp_year
assert not payment.cc_exp_month
assert not payment.payment_method_type
assert payment.order
assert payment.order.checkout_token == str(checkout_with_items.token)
transaction = payment.transactions.get(kind=TransactionKind.AUTH)
assert transaction.token == payment_intent.id
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
def test_handle_authorized_payment_intent_for_checkout_with_payment_details(
wrapped_checkout_complete,
payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
stripe_payment_intent_with_details,
):
intent = stripe_payment_intent_with_details
payment = payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
intent["amount"] = price_to_minor_unit(payment.total, payment.currency)
intent["currency"] = payment.currency
intent["status"] = AUTHORIZED_STATUS
handle_authorized_payment_intent(intent, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert wrapped_checkout_complete.called
assert payment.checkout_id is None
assert payment.cc_brand == "visa"
assert payment.cc_last_digits == "3220"
assert payment.cc_exp_year == 2030
assert payment.cc_exp_month == 3
assert payment.payment_method_type == "card"
assert payment.order
assert payment.order.checkout_token == str(checkout_with_items.token)
transaction = payment.transactions.get(kind=TransactionKind.AUTH)
assert transaction.token == intent.id
@patch("saleor.payment.gateway.void")
def test_handle_authorized_payment_intent_for_checkout_inactive_payment(
void_mock,
inactive_payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
):
payment = inactive_payment_stripe_for_checkout
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = price_to_minor_unit(payment.total, payment.currency)
payment_intent["currency"] = payment.currency
payment_intent["status"] = AUTHORIZED_STATUS
handle_authorized_payment_intent(payment_intent, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert void_mock.called
@patch("saleor.checkout.complete_checkout._get_order_data")
@patch("saleor.payment.gateway.void")
def test_handle_authorized_payment_intent_when_order_creation_raises_exception(
void_mock,
order_data_mock,
payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
stripe_payment_intent,
):
order_data_mock.side_effect = ValidationError("Test error")
payment = payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
handle_authorized_payment_intent(
stripe_payment_intent, plugin.config, channel_USD.slug
)
payment.refresh_from_db()
assert not payment.order
assert void_mock.called
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_authorized_payment_intent_for_order(
_wrapped_update_payment_method,
wrapped_checkout_complete,
payment_stripe_for_order,
checkout_with_items,
stripe_plugin,
channel_USD,
):
payment = payment_stripe_for_order
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["status"] = AUTHORIZED_STATUS
handle_authorized_payment_intent(payment_intent, plugin.config, channel_USD.slug)
assert wrapped_checkout_complete.called is False
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_authorized_payment_intent_for_processing_order_payment(
_wrapped_update_payment_method,
wrapped_checkout_complete,
payment_stripe_for_order,
checkout_with_items,
stripe_plugin,
channel_USD,
):
payment = payment_stripe_for_order
payment.charge_status = ChargeStatus.PENDING
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["status"] = AUTHORIZED_STATUS
handle_authorized_payment_intent(payment_intent, plugin.config, channel_USD.slug)
assert wrapped_checkout_complete.called is False
@pytest.mark.parametrize(
["metadata", "called"], [({"key": "value"}, True), ({}, False)]
)
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_authorized_payment_intent_with_metadata(
wrapped_update_payment_method,
_wrapped_checkout_complete,
payment_stripe_for_order,
checkout_with_items,
stripe_plugin,
channel_USD,
metadata,
called,
):
# given
payment = payment_stripe_for_order
current_metadata = {"currentkey": "currentvalue"}
payment.metadata = metadata
payment.charge_status = ChargeStatus.PENDING
payment.save()
plugin = stripe_plugin()
payment_intent = StripeObject(id="token", last_response={})
payment_intent["metadata"] = current_metadata
payment_intent["payment_method"] = StripeObject()
payment_intent["charges"] = {"data": [{"payment_method_details": {"type": "card"}}]}
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
# when
handle_authorized_payment_intent(payment_intent, plugin.config, channel_USD.slug)
# then
if not called:
assert wrapped_update_payment_method.call_count == 0
else:
wrapped_update_payment_method.assert_called_with(
plugin.config.connection_params["secret_api_key"],
payment_intent.payment_method,
metadata,
)
@pytest.mark.parametrize("called", [True, False])
@patch(
"saleor.payment.gateways.stripe.webhooks._update_payment_with_new_transaction",
wraps=_update_payment_with_new_transaction,
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_authorized_payment_intent_different_order_channel_slug(
_wrapped_update_payment_method,
wrapped_update_payment_with_new_transaction,
channel_PLN,
payment_stripe_for_order,
checkout_with_items,
stripe_plugin,
channel_USD,
called,
):
# given
channel = channel_USD if called else channel_PLN
payment = payment_stripe_for_order
payment.charge_status = ChargeStatus.PENDING
payment.checkout = None
payment.save()
plugin = stripe_plugin()
payment_intent = StripeObject(id="token", last_response={})
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["status"] = AUTHORIZED_STATUS
payment_intent["payment_method"] = StripeObject()
# when
handle_authorized_payment_intent(payment_intent, plugin.config, channel.slug)
# then
assert wrapped_update_payment_with_new_transaction.called == called
@pytest.mark.parametrize("called", [True, False])
@patch(
"saleor.payment.gateways.stripe.webhooks._process_payment_with_checkout",
wraps=_process_payment_with_checkout,
)
@patch("saleor.payment.gateways.stripe.webhooks.update_payment_method")
def test_handle_authorized_payment_intent_different_checkout_channel_slug(
_wrapped_update_payment_method,
wrapped_process_payment_with_checkout,
payment_stripe_for_checkout,
stripe_plugin,
channel_USD,
channel_PLN,
called,
):
# given
channel = channel_USD if called else channel_PLN
payment = payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = price_to_minor_unit(payment.total, payment.currency)
payment_intent["currency"] = payment.currency
payment_intent["status"] = AUTHORIZED_STATUS
payment_intent["payment_method"] = StripeObject()
# when
handle_authorized_payment_intent(payment_intent, plugin.config, channel.slug)
# then
assert wrapped_process_payment_with_checkout.called == called
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
def test_handle_processing_payment_intent_for_order(
wrapped_checkout_complete,
payment_stripe_for_order,
checkout_with_items,
stripe_plugin,
channel_USD,
):
payment = payment_stripe_for_order
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["status"] = PROCESSING_STATUS
handle_processing_payment_intent(payment_intent, plugin.config, channel_USD.slug)
assert wrapped_checkout_complete.called is False
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
def test_handle_processing_payment_intent_for_checkout(
wrapped_checkout_complete,
payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
):
payment = payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = price_to_minor_unit(payment.total, payment.currency)
payment_intent["currency"] = payment.currency
payment_intent["status"] = PROCESSING_STATUS
handle_processing_payment_intent(payment_intent, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert wrapped_checkout_complete.called
assert payment.checkout_id is None
assert payment.order
assert payment.order.checkout_token == str(checkout_with_items.token)
transaction = payment.transactions.get(kind=TransactionKind.PENDING)
assert transaction.token == payment_intent.id
@patch(
"saleor.payment.gateways.stripe.webhooks.complete_checkout", wraps=complete_checkout
)
def test_handle_processing_payment_intent_for_checkout_inactive_payment(
wrapped_checkout_complete,
inactive_payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
):
payment = inactive_payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = price_to_minor_unit(payment.total, payment.currency)
payment_intent["currency"] = payment.currency
payment_intent["status"] = PROCESSING_STATUS
handle_processing_payment_intent(payment_intent, plugin.config, channel_USD.slug)
assert not wrapped_checkout_complete.called
@patch("saleor.checkout.complete_checkout._get_order_data")
@patch("saleor.payment.gateway.void")
@patch("saleor.payment.gateway.refund")
def test_handle_processing_payment_intent_when_order_creation_raises_exception(
refund_mock,
void_mock,
order_data_mock,
payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
stripe_payment_intent,
):
order_data_mock.side_effect = ValidationError("Test error")
payment = payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
stripe_payment_intent["status"] = PROCESSING_STATUS
handle_processing_payment_intent(
stripe_payment_intent, plugin.config, channel_USD.slug
)
payment.refresh_from_db()
assert not payment.order
assert not void_mock.called
assert not refund_mock.called
@pytest.mark.parametrize("called", [True, False])
@patch(
"saleor.payment.gateways.stripe.webhooks._process_payment_with_checkout",
wraps=_process_payment_with_checkout,
)
def test_handle_processing_payment_intent_different_order_channel_slug(
wrapped_process_payment_with_checkout,
payment_stripe_for_order,
checkout_with_items,
stripe_plugin,
channel_USD,
channel_PLN,
called,
):
# given
channel = channel_USD if called else channel_PLN
payment = payment_stripe_for_order
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["status"] = PROCESSING_STATUS
# when
handle_processing_payment_intent(payment_intent, plugin.config, channel.slug)
# then
assert not wrapped_process_payment_with_checkout.called
@pytest.mark.parametrize("called", [True, False])
@patch(
"saleor.payment.gateways.stripe.webhooks._process_payment_with_checkout",
wraps=_process_payment_with_checkout,
)
def test_handle_processing_payment_intent_different_checkout_channel_slug(
wrapped_process_payment_with_checkout,
payment_stripe_for_checkout,
checkout_with_items,
stripe_plugin,
channel_USD,
channel_PLN,
called,
):
# given
channel = channel_USD if called else channel_PLN
payment = payment_stripe_for_checkout
payment.to_confirm = True
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = price_to_minor_unit(payment.total, payment.currency)
payment_intent["currency"] = payment.currency
payment_intent["status"] = PROCESSING_STATUS
# when
handle_processing_payment_intent(payment_intent, plugin.config, channel.slug)
# then
assert wrapped_process_payment_with_checkout.called == called
def test_handle_failed_payment_intent_for_checkout(
stripe_plugin, payment_stripe_for_checkout, channel_USD
):
payment = payment_stripe_for_checkout
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["status"] = FAILED_STATUSES[0]
handle_failed_payment_intent(payment_intent, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert not payment.order_id
assert not payment.is_active
assert payment.charge_status == ChargeStatus.CANCELLED
assert payment.transactions.filter(kind=TransactionKind.CANCEL).exists()
def test_handle_failed_payment_intent_for_order(
stripe_plugin, payment_stripe_for_order, channel_USD
):
payment = payment_stripe_for_order
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["status"] = FAILED_STATUSES[0]
handle_failed_payment_intent(payment_intent, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert not payment.is_active
assert payment.charge_status == ChargeStatus.CANCELLED
assert payment.transactions.filter(kind=TransactionKind.CANCEL).exists()
@pytest.mark.parametrize("called", [True, False])
@patch(
"saleor.payment.gateways.stripe.webhooks._update_payment_with_new_transaction",
wraps=_update_payment_with_new_transaction,
)
@patch("saleor.payment.gateways.stripe.webhooks.order_voided", wraps=order_voided)
def test_handle_failed_payment_intent_different_order_channel_slug(
wrapped_update_payment_with_new_transaction,
wrapped_order_voided,
payment_stripe_for_order,
stripe_plugin,
channel_USD,
channel_PLN,
called,
):
# given
channel = channel_USD if called else channel_PLN
payment = payment_stripe_for_order
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["status"] = FAILED_STATUSES[0]
# when
handle_failed_payment_intent(payment_intent, plugin.config, channel.slug)
# then
assert wrapped_update_payment_with_new_transaction.called == called
assert wrapped_order_voided.called == called
@pytest.mark.parametrize("called", [True, False])
@patch("saleor.payment.gateways.stripe.webhooks.order_voided", wraps=order_voided)
@patch(
"saleor.payment.gateways.stripe.webhooks._update_payment_with_new_transaction",
wraps=_update_payment_with_new_transaction,
)
def test_handle_failed_payment_intent_different_checkout_channel_slug(
wrapped_update_payment_with_new_transaction,
wrapped_order_voided,
payment_stripe_for_checkout,
stripe_plugin,
channel_USD,
channel_PLN,
called,
):
# given
channel = channel_USD if called else channel_PLN
payment = payment_stripe_for_checkout
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.ACTION_TO_CONFIRM,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
payment_intent = StripeObject(id="ABC", last_response={})
payment_intent["amount"] = payment.total
payment_intent["currency"] = payment.currency
payment_intent["status"] = FAILED_STATUSES[0]
# when
handle_failed_payment_intent(payment_intent, plugin.config, channel.slug)
# then
assert wrapped_update_payment_with_new_transaction.called == called
assert not wrapped_order_voided.called
def test_handle_fully_refund(stripe_plugin, payment_stripe_for_order, channel_USD):
payment = payment_stripe_for_order
payment.captured_amount = payment.total
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.CAPTURE,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
refund = StripeObject(id="refund_id")
refund["amount"] = price_to_minor_unit(payment.total, payment.currency)
refund["currency"] = payment.currency
refund["last_response"] = None
charge = StripeObject()
charge["payment_intent"] = "ABC"
charge["refunds"] = StripeObject()
charge["refunds"]["data"] = [refund]
handle_refund(charge, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.FULLY_REFUNDED
assert payment.is_active is False
assert payment.captured_amount == Decimal("0")
def test_handle_partial_refund(stripe_plugin, payment_stripe_for_order, channel_USD):
payment = payment_stripe_for_order
payment.captured_amount = payment.total
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.CAPTURE,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
refund = StripeObject(id="refund_id")
refund["amount"] = price_to_minor_unit(Decimal("10"), payment.currency)
refund["currency"] = payment.currency
refund["last_response"] = None
charge = StripeObject()
charge["payment_intent"] = "ABC"
charge["refunds"] = StripeObject()
charge["refunds"]["data"] = [refund]
handle_refund(charge, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.PARTIALLY_REFUNDED
assert payment.is_active is True
assert payment.captured_amount == payment.total - Decimal("10")
def test_handle_refund_already_processed(
stripe_plugin, payment_stripe_for_order, channel_USD
):
payment = payment_stripe_for_order
payment.charge_status = ChargeStatus.PARTIALLY_REFUNDED
payment.captured_amount = payment.total - Decimal("10")
payment.save()
refund_id = "refund_abc"
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.REFUND,
amount=payment.total,
currency=payment.currency,
token=refund_id,
gateway_response={},
)
plugin = stripe_plugin()
refund = StripeObject(id=refund_id)
refund["amount"] = price_to_minor_unit(Decimal("10"), payment.currency)
refund["currency"] = payment.currency
refund["last_response"] = None
charge = StripeObject()
charge["payment_intent"] = "ABC"
charge["refunds"] = StripeObject()
charge["refunds"]["data"] = [refund]
handle_refund(charge, plugin.config, channel_USD.slug)
payment.refresh_from_db()
assert payment.charge_status == ChargeStatus.PARTIALLY_REFUNDED
assert payment.is_active is True
assert payment.captured_amount == payment.total - Decimal("10")
@pytest.mark.parametrize("called", [True, False])
@patch(
"saleor.payment.gateways.stripe.webhooks._update_payment_with_new_transaction",
wraps=_update_payment_with_new_transaction,
)
@patch(
"saleor.payment.gateways.stripe.webhooks.order_refunded",
wraps=order_refunded,
)
def test_handle_refund_different_order_channel_slug(
wrapped_update_payment_with_new_transaction,
wrapped_order_refunded,
stripe_plugin,
payment_stripe_for_order,
channel_USD,
channel_PLN,
called,
):
# given
channel = channel_USD if called else channel_PLN
payment = payment_stripe_for_order
payment.captured_amount = payment.total
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.CAPTURE,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
refund = StripeObject(id="refund_id")
refund["amount"] = price_to_minor_unit(payment.total, payment.currency)
refund["currency"] = payment.currency
refund["last_response"] = None
charge = StripeObject()
charge["payment_intent"] = "ABC"
charge["refunds"] = StripeObject()
charge["refunds"]["data"] = [refund]
# when
handle_refund(charge, plugin.config, channel.slug)
# then
assert wrapped_update_payment_with_new_transaction.called == called
assert wrapped_order_refunded.called == called
@pytest.mark.parametrize("called", [True, False])
@patch(
"saleor.payment.gateways.stripe.webhooks.order_refunded",
wraps=order_refunded,
)
@patch(
"saleor.payment.gateways.stripe.webhooks._update_payment_with_new_transaction",
wraps=_update_payment_with_new_transaction,
)
def test_handle_refund_different_checkout_channel_slug(
wrapped_update_payment_with_new_transaction,
wrapped_order_refunded,
stripe_plugin,
payment_stripe_for_checkout,
channel_USD,
channel_PLN,
called,
):
# given
channel = channel_USD if called else channel_PLN
payment = payment_stripe_for_checkout
payment.captured_amount = payment.total
payment.save()
payment.transactions.create(
is_success=True,
action_required=True,
kind=TransactionKind.CAPTURE,
amount=payment.total,
currency=payment.currency,
token="ABC",
gateway_response={},
)
plugin = stripe_plugin()
refund = StripeObject(id="refund_id")
refund["amount"] = price_to_minor_unit(payment.total, payment.currency)
refund["currency"] = payment.currency
refund["last_response"] = None
charge = StripeObject()
charge["payment_intent"] = "ABC"
charge["refunds"] = StripeObject()
charge["refunds"]["data"] = [refund]
# when
handle_refund(charge, plugin.config, channel.slug)
# then
assert wrapped_update_payment_with_new_transaction.called == called
assert not wrapped_order_refunded.called
@pytest.mark.parametrize(
"webhook_type, fun_to_mock",
[
(WEBHOOK_SUCCESS_EVENT, "handle_successful_payment_intent"),
(WEBHOOK_PROCESSING_EVENT, "handle_processing_payment_intent"),
(WEBHOOK_FAILED_EVENT, "handle_failed_payment_intent"),
(WEBHOOK_AUTHORIZED_EVENT, "handle_authorized_payment_intent"),
(WEBHOOK_CANCELED_EVENT, "handle_failed_payment_intent"),
],
)
@patch("saleor.payment.gateways.stripe.stripe_api.stripe.Webhook.construct_event")
def test_handle_webhook_events(
mocked_webhook_event, webhook_type, fun_to_mock, stripe_plugin, rf, channel_USD
):
dummy_payload = {
"id": "evt_1Ip9ANH1Vac4G4dbE9ch7zGS",
}
request = rf.post(
path="/webhooks/", data=dummy_payload, content_type="application/json"
)
stripe_signature = "1234"
request.META["HTTP_STRIPE_SIGNATURE"] = stripe_signature
event = Mock()
event.type = webhook_type
event.data.object = StripeObject()
mocked_webhook_event.return_value = event
plugin = stripe_plugin()
with patch(f"saleor.payment.gateways.stripe.webhooks.{fun_to_mock}") as mocked_fun:
plugin.webhook(request, "/webhooks/", None)
mocked_fun.assert_called_once_with(
event.data.object, plugin.config, channel_USD.slug
)
api_key = plugin.config.connection_params["secret_api_key"]
endpoint_secret = plugin.config.connection_params["webhook_secret"]
mocked_webhook_event.assert_called_once_with(
json.dumps(dummy_payload).encode("utf-8"),
stripe_signature,
endpoint_secret,
api_key=api_key,
)
@patch("saleor.payment.gateway.refund")
@patch("saleor.checkout.complete_checkout._get_order_data")
def test_finalize_checkout_not_created_order_payment_refund(
order_data_mock,
refund_mock,
stripe_plugin,
channel_USD,
payment_stripe_for_checkout,
stripe_payment_intent,
):
order_data_mock.side_effect = ValidationError("Test error")
stripe_plugin()
checkout = payment_stripe_for_checkout.checkout
_finalize_checkout(
checkout,
payment_stripe_for_checkout,
stripe_payment_intent,
TransactionKind.CAPTURE,
payment_stripe_for_checkout.total,
payment_stripe_for_checkout.currency,
)
payment_stripe_for_checkout.refresh_from_db()
assert not payment_stripe_for_checkout.order
assert refund_mock.called
@patch("saleor.payment.gateway.refund")
def test_finalize_checkout_not_created_checkout_variant_deleted_order_payment_refund(
refund_mock,
stripe_plugin,
channel_USD,
payment_stripe_for_checkout,
stripe_payment_intent,
):
stripe_plugin()
checkout = payment_stripe_for_checkout.checkout
checkout.lines.first().delete()
checkout.price_expiration = timezone.now()
checkout.save(update_fields=["price_expiration"])
_finalize_checkout(
checkout,
payment_stripe_for_checkout,
stripe_payment_intent,
TransactionKind.CAPTURE,
payment_stripe_for_checkout.total,
payment_stripe_for_checkout.currency,
)
payment_stripe_for_checkout.refresh_from_db()
assert not payment_stripe_for_checkout.order
assert refund_mock.called
@patch("saleor.payment.gateway.void")
@patch("saleor.checkout.complete_checkout._get_order_data")
def test_finalize_checkout_not_created_order_payment_void(
order_data_mock,
void_mock,
stripe_plugin,
channel_USD,
payment_stripe_for_checkout,
stripe_payment_intent,
):
order_data_mock.side_effect = ValidationError("Test error")
stripe_plugin()
checkout = payment_stripe_for_checkout.checkout
_finalize_checkout(
checkout,
payment_stripe_for_checkout,
stripe_payment_intent,
TransactionKind.AUTH,
payment_stripe_for_checkout.total,
payment_stripe_for_checkout.currency,
)
payment_stripe_for_checkout.refresh_from_db()
assert not payment_stripe_for_checkout.order
assert void_mock.called
@patch("saleor.payment.gateway.void")
def test_finalize_checkout_not_created_checkout_variant_deleted_order_payment_void(
void_mock,
stripe_plugin,
channel_USD,
payment_stripe_for_checkout,
stripe_payment_intent,
):
stripe_plugin()
checkout = payment_stripe_for_checkout.checkout
checkout.lines.first().delete()
checkout.price_expiration = timezone.now()
checkout.save(update_fields=["price_expiration"])
_finalize_checkout(
checkout,
payment_stripe_for_checkout,
stripe_payment_intent,
TransactionKind.AUTH,
payment_stripe_for_checkout.total,
payment_stripe_for_checkout.currency,
)
payment_stripe_for_checkout.refresh_from_db()
assert not payment_stripe_for_checkout.order
assert void_mock.called
def test_update_payment_method_details_from_intent_payment_info_does_not_exist(
payment_stripe_for_checkout, stripe_payment_intent
):
payment = payment_stripe_for_checkout
update_payment_method_details_from_intent(payment, stripe_payment_intent)
payment.refresh_from_db()
assert not payment.cc_brand
assert not payment.cc_last_digits
assert not payment.cc_exp_year
assert not payment.cc_exp_month
assert not payment.payment_method_type
def test_update_payment_method_details_from_intent_payment_info_exists(
payment_stripe_for_checkout, stripe_payment_intent_with_details
):
intent = stripe_payment_intent_with_details
payment = payment_stripe_for_checkout
update_payment_method_details_from_intent(payment, intent)
payment.refresh_from_db()
assert payment.cc_brand == "visa"
assert payment.cc_last_digits == "3220"
assert payment.cc_exp_year == 2030
assert payment.cc_exp_month == 3
assert payment.payment_method_type == "card"
|
{
"content_hash": "a51e60a8ff69cc14f529594a8735b173",
"timestamp": "",
"source": "github",
"line_count": 1469,
"max_line_length": 88,
"avg_line_length": 31.733151803948264,
"alnum_prop": 0.6985369830101252,
"repo_name": "mociepka/saleor",
"id": "f06f5964650c27cb4eaef7cfa95b2b41309d2948",
"size": "46616",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "saleor/payment/gateways/stripe/tests/test_webhooks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "2228"
},
{
"name": "HTML",
"bytes": "249248"
},
{
"name": "Procfile",
"bytes": "290"
},
{
"name": "Python",
"bytes": "12686831"
},
{
"name": "Shell",
"bytes": "439"
}
],
"symlink_target": ""
}
|
from pprint import pprint
import cgum.statement
import cgum.program
import bughunter.pool as pl
prog = cgum.program.Program.from_source_file("replace-while-loop-body/after.c")
pool = pl.DonorPool(prog, pl.StatementCriterion())
pprint(pool.contents())
|
{
"content_hash": "c84023ef53cc98823c5c9aec28870e16",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 79,
"avg_line_length": 28.11111111111111,
"alnum_prop": 0.7905138339920948,
"repo_name": "ChrisTimperley/BugCollector",
"id": "39f1a82c406c64552dfc77ca62e2f1aa100feb43",
"size": "272",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/pool.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8412"
}
],
"symlink_target": ""
}
|
from baseframe import __
from baseframe.forms import Form, ValidName
from baseframe.forms.sqlalchemy import AvailableName
import wtforms
import wtforms.fields.html5
__all__ = ['UserGroupForm']
class UserGroupForm(Form):
name = wtforms.TextField(__("URL name"), validators=[wtforms.validators.Required(), ValidName(), AvailableName()])
title = wtforms.TextField(__("Title"), validators=[wtforms.validators.Required()])
users = wtforms.TextAreaField(__("Users"), validators=[wtforms.validators.Required()],
description=__("Usernames or email addresses, one per line"))
|
{
"content_hash": "3a9c20ef26db7b935c4069bc434d3519",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 118,
"avg_line_length": 42.142857142857146,
"alnum_prop": 0.735593220338983,
"repo_name": "jace/failconfunnel",
"id": "d70f7d4425d381019e6e64a3212ed51c7b1393c9",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "funnel/forms/usergroup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "51076"
},
{
"name": "JavaScript",
"bytes": "151492"
},
{
"name": "Python",
"bytes": "134867"
},
{
"name": "Ruby",
"bytes": "372"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals
"""
Created on Mar 8, 2012
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 8, 2012"
import unittest
import os
from nose.exc import SkipTest
from pymatgen import Composition
from pymatgen.io.vasp.inputs import Poscar
import pymatgen.io.ase as aio
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class AseAtomsAdaptorTest(unittest.TestCase):
def test_get_atoms(self):
if not aio.ase_loaded:
raise SkipTest("ASE not present. Skipping...")
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
structure = p.structure
atoms = aio.AseAtomsAdaptor.get_atoms(structure)
ase_composition = Composition(atoms.get_name())
self.assertEqual(ase_composition, structure.composition)
def test_get_structure(self):
if not aio.ase_loaded:
raise SkipTest("ASE not present. Skipping...")
p = Poscar.from_file(os.path.join(test_dir, 'POSCAR'))
atoms = aio.AseAtomsAdaptor.get_atoms(p.structure)
self.assertEqual(aio.AseAtomsAdaptor.get_structure(atoms).formula,
"Fe4 P4 O16")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
if aio.ase_loaded:
unittest.main()
else:
print("ASE not loaded. Skipping tests")
|
{
"content_hash": "acadbe28b14ff86fa5b53aa66e8d2d92",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 74,
"avg_line_length": 29.037735849056602,
"alnum_prop": 0.6270305393112411,
"repo_name": "rousseab/pymatgen",
"id": "ce7adcbf68157d230750d02a20534d4d7030647e",
"size": "1556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymatgen/io/tests/test_ase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Groff",
"bytes": "868"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "3710783"
}
],
"symlink_target": ""
}
|
"""Declarative container example."""
from dependency_injector import containers, providers
class Container(containers.DeclarativeContainer):
factory1 = providers.Factory(object)
factory2 = providers.Factory(object)
if __name__ == "__main__":
container = Container()
object1 = container.factory1()
object2 = container.factory2()
print(container.providers)
# {
# "factory1": <dependency_injector.providers.Factory(...),
# "factory2": <dependency_injector.providers.Factory(...),
# }
|
{
"content_hash": "722b33ecfc425eefb8ac7834e5ac5b87",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 66,
"avg_line_length": 23.391304347826086,
"alnum_prop": 0.671003717472119,
"repo_name": "ets-labs/dependency_injector",
"id": "21a43174441e58a461fdb4ff1226fbcde5990660",
"size": "538",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/containers/declarative.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "171148"
}
],
"symlink_target": ""
}
|
"""
Resource Filtering Logic
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime, timedelta
import fnmatch
import logging
import operator
import re
from dateutil.tz import tzutc
from dateutil.parser import parse
import jmespath
import ipaddress
import six
from c7n.executor import ThreadPoolExecutor
from c7n.registry import PluginRegistry
from c7n.resolver import ValuesFrom
from c7n.utils import set_annotation, type_schema, parse_cidr
class FilterValidationError(Exception):
pass
# Matching filters annotate their key onto objects
ANNOTATION_KEY = "c7n:MatchedFilters"
def glob_match(value, pattern):
if not isinstance(value, six.string_types):
return False
return fnmatch.fnmatch(value, pattern)
def regex_match(value, regex):
if not isinstance(value, six.string_types):
return False
# Note python 2.5+ internally cache regex
# would be nice to use re2
return bool(re.match(regex, value, flags=re.IGNORECASE))
def operator_in(x, y):
return x in y
def operator_ni(x, y):
return x not in y
OPERATORS = {
'eq': operator.eq,
'equal': operator.eq,
'ne': operator.ne,
'not-equal': operator.ne,
'gt': operator.gt,
'greater-than': operator.gt,
'ge': operator.ge,
'gte': operator.ge,
'le': operator.le,
'lte': operator.le,
'lt': operator.lt,
'less-than': operator.lt,
'glob': glob_match,
'regex': regex_match,
'in': operator_in,
'ni': operator_ni,
'not-in': operator_ni,
'contains': operator.contains}
class FilterRegistry(PluginRegistry):
def __init__(self, *args, **kw):
super(FilterRegistry, self).__init__(*args, **kw)
self.register('value', ValueFilter)
self.register('or', Or)
self.register('and', And)
self.register('not', Not)
self.register('event', EventFilter)
def parse(self, data, manager):
results = []
for d in data:
results.append(self.factory(d, manager))
return results
def factory(self, data, manager=None):
"""Factory func for filters.
data - policy config for filters
manager - resource type manager (ec2, s3, etc)
"""
# Make the syntax a little nicer for common cases.
if isinstance(data, dict) and len(data) == 1 and 'type' not in data:
op = list(data.keys())[0]
if op == 'or':
return Or(data, self, manager)
elif op == 'and':
return And(data, self, manager)
elif op == 'not':
return Not(data, self, manager)
return ValueFilter(data, manager).validate()
if isinstance(data, six.string_types):
filter_type = data
data = {'type': data}
else:
filter_type = data.get('type')
if not filter_type:
raise FilterValidationError(
"%s Invalid Filter %s" % (
self.plugin_type, data))
filter_class = self.get(filter_type)
if filter_class is not None:
return filter_class(data, manager)
else:
raise FilterValidationError(
"%s Invalid filter type %s" % (
self.plugin_type, data))
# Really should be an abstract base class (abc) or
# zope.interface
class Filter(object):
executor_factory = ThreadPoolExecutor
log = logging.getLogger('custodian.filters')
metrics = ()
permissions = ()
schema = {'type': 'object'}
def __init__(self, data, manager=None):
self.data = data
self.manager = manager
def get_permissions(self):
return self.permissions
def validate(self):
"""validate filter config, return validation error or self"""
return self
def process(self, resources, event=None):
""" Bulk process resources and return filtered set."""
return list(filter(self, resources))
class Or(Filter):
def __init__(self, data, registry, manager):
super(Or, self).__init__(data)
self.registry = registry
self.filters = registry.parse(list(self.data.values())[0], manager)
self.manager = manager
def process(self, resources, event=None):
if self.manager:
return self.process_set(resources, event)
return super(Or, self).process(resources, event)
def __call__(self, r):
"""Fallback for older unit tests that don't utilize a query manager"""
for f in self.filters:
if f(r):
return True
return False
def process_set(self, resources, event):
resource_type = self.manager.get_model()
resource_map = {r[resource_type.id]: r for r in resources}
results = set()
for f in self.filters:
results = results.union([
r[resource_type.id] for r in f.process(resources, event)])
return [resource_map[r_id] for r_id in results]
class And(Filter):
def __init__(self, data, registry, manager):
super(And, self).__init__(data)
self.registry = registry
self.filters = registry.parse(list(self.data.values())[0], manager)
def process(self, resources, events=None):
for f in self.filters:
resources = f.process(resources, events)
return resources
class Not(Filter):
def __init__(self, data, registry, manager):
super(Not, self).__init__(data)
self.registry = registry
self.filters = registry.parse(list(self.data.values())[0], manager)
self.manager = manager
def process(self, resources, event=None):
if self.manager:
return self.process_set(resources, event)
return super(Not, self).process(resources, event)
def __call__(self, r):
"""Fallback for older unit tests that don't utilize a query manager"""
# There is an implicit 'and' for self.filters
# ~(A ^ B ^ ... ^ Z) = ~A v ~B v ... v ~Z
for f in self.filters:
if not f(r):
return True
return False
def process_set(self, resources, event):
resource_type = self.manager.get_model()
resource_map = {r[resource_type.id]: r for r in resources}
for f in self.filters:
resources = f.process(resources, event)
before = set(resource_map.keys())
after = set([r[resource_type.id] for r in resources])
results = before - after
return [resource_map[r_id] for r_id in results]
class ValueFilter(Filter):
"""Generic value filter using jmespath
"""
expr = None
op = v = vtype = None
schema = {
'type': 'object',
# Doesn't mix well with inherits that extend
'additionalProperties': False,
'required': ['type'],
'properties': {
# Doesn't mix well as enum with inherits that extend
'type': {'enum': ['value']},
'key': {'type': 'string'},
'value_type': {'enum': [
'age', 'integer', 'expiration', 'normalize', 'size',
'cidr', 'cidr_size', 'swap', 'resource_count', 'expr']},
'default': {'type': 'object'},
'value_from': ValuesFrom.schema,
'value': {'oneOf': [
{'type': 'array'},
{'type': 'string'},
{'type': 'boolean'},
{'type': 'number'},
{'type': 'null'}]},
'op': {'enum': list(OPERATORS.keys())}}}
annotate = True
def __init__(self, data, manager=None):
super(ValueFilter, self).__init__(data, manager)
self.expr = {}
def _validate_resource_count(self):
""" Specific validation for `resource_count` type
The `resource_count` type works a little differently because it operates
on the entire set of resources. It:
- does not require `key`
- `value` must be a number
- supports a subset of the OPERATORS list
"""
for field in ('op', 'value'):
if field not in self.data:
raise FilterValidationError(
"Missing '%s' in value filter %s" % (field, self.data))
if not (isinstance(self.data['value'], int) or
isinstance(self.data['value'], list)):
raise FilterValidationError(
"`value` must be an integer in resource_count filter %s" % self.data)
# I don't see how to support regex for this?
if self.data['op'] not in OPERATORS or self.data['op'] == 'regex':
raise FilterValidationError(
"Invalid operator in value filter %s" % self.data)
return self
def validate(self):
if len(self.data) == 1:
return self
# `resource_count` requires a slightly different schema than the rest of
# the value filters because it operates on the full resource list
if self.data.get('value_type') == 'resource_count':
return self._validate_resource_count()
if 'key' not in self.data:
raise FilterValidationError(
"Missing 'key' in value filter %s" % self.data)
if 'value' not in self.data and 'value_from' not in self.data:
raise FilterValidationError(
"Missing 'value' in value filter %s" % self.data)
if 'op' in self.data:
if not self.data['op'] in OPERATORS:
raise FilterValidationError(
"Invalid operator in value filter %s" % self.data)
if self.data['op'] == 'regex':
# Sanity check that we can compile
try:
re.compile(self.data['value'])
except re.error as e:
raise FilterValidationError(
"Invalid regex: %s %s" % (e, self.data))
return self
def __call__(self, i):
if self.data.get('value_type') == 'resource_count':
return self.process(i)
matched = self.match(i)
if matched and self.annotate:
set_annotation(i, ANNOTATION_KEY, self.k)
return matched
def process(self, resources, event=None):
# For the resource_count filter we operate on the full set of resources.
if self.data.get('value_type') == 'resource_count':
op = OPERATORS[self.data.get('op')]
if op(len(resources), self.data.get('value')):
return resources
return []
return super(ValueFilter, self).process(resources, event)
def get_resource_value(self, k, i):
if k.startswith('tag:'):
tk = k.split(':', 1)[1]
r = None
for t in i.get("Tags", []):
if t.get('Key') == tk:
r = t.get('Value')
break
elif k in i:
r = i.get(k)
elif k not in self.expr:
self.expr[k] = jmespath.compile(k)
r = self.expr[k].search(i)
else:
r = self.expr[k].search(i)
return r
def match(self, i):
if self.v is None and len(self.data) == 1:
[(self.k, self.v)] = self.data.items()
elif self.v is None:
self.k = self.data.get('key')
self.op = self.data.get('op')
if 'value_from' in self.data:
values = ValuesFrom(self.data['value_from'], self.manager)
self.v = values.get_values()
else:
self.v = self.data.get('value')
self.vtype = self.data.get('value_type')
if i is None:
return False
# value extract
r = self.get_resource_value(self.k, i)
if self.op in ('in', 'not-in') and r is None:
r = ()
# value type conversion
if self.vtype is not None:
v, r = self.process_value_type(self.v, r, i)
else:
v = self.v
# Value match
if r is None and v == 'absent':
return True
elif r is not None and v == 'present':
return True
elif v == 'not-null' and r:
return True
elif v == 'empty' and not r:
return True
elif self.op:
op = OPERATORS[self.op]
try:
return op(r, v)
except TypeError:
return False
elif r == self.v:
return True
return False
def process_value_type(self, sentinel, value, resource):
if self.vtype == 'normalize' and isinstance(value, six.string_types):
return sentinel, value.strip().lower()
elif self.vtype == 'expr':
return sentinel, self.get_resource_value(value, resource)
elif self.vtype == 'integer':
try:
value = int(value.strip())
except ValueError:
value = 0
elif self.vtype == 'size':
try:
return sentinel, len(value)
except TypeError:
return sentinel, 0
elif self.vtype == 'swap':
return value, sentinel
elif self.vtype == 'age':
if not isinstance(sentinel, datetime):
sentinel = datetime.now(tz=tzutc()) - timedelta(sentinel)
if not isinstance(value, datetime):
# EMR bug when testing ages in EMR. This is due to
# EMR not having more functionality.
try:
value = parse(value, default=datetime.now(tz=tzutc()))
except (AttributeError, TypeError, ValueError):
value = 0
# Reverse the age comparison, we want to compare the value being
# greater than the sentinel typically. Else the syntax for age
# comparisons is intuitively wrong.
return value, sentinel
elif self.vtype == 'cidr':
s = parse_cidr(sentinel)
v = parse_cidr(value)
if (isinstance(s, ipaddress._BaseAddress) and isinstance(v, ipaddress._BaseNetwork)):
return v, s
return s, v
elif self.vtype == 'cidr_size':
cidr = parse_cidr(value)
if cidr:
return sentinel, cidr.prefixlen
return sentinel, 0
# Allows for expiration filtering, for events in the future as opposed
# to events in the past which age filtering allows for.
elif self.vtype == 'expiration':
if not isinstance(sentinel, datetime):
sentinel = datetime.now(tz=tzutc()) + timedelta(sentinel)
if not isinstance(value, datetime):
try:
value = parse(value, default=datetime.now(tz=tzutc()))
except (AttributeError, TypeError, ValueError):
value = 0
return sentinel, value
return sentinel, value
class AgeFilter(Filter):
"""Automatically filter resources older than a given date.
"""
threshold_date = None
# The name of attribute to compare to threshold; must override in subclass
date_attribute = None
schema = None
def validate(self):
if not self.date_attribute:
raise NotImplementedError(
"date_attribute must be overriden in subclass")
return self
def get_resource_date(self, i):
v = i[self.date_attribute]
if not isinstance(v, datetime):
v = parse(v)
if not v.tzinfo:
v = v.replace(tzinfo=tzutc())
return v
def __call__(self, i):
v = self.get_resource_date(i)
if v is None:
return False
op = OPERATORS[self.data.get('op', 'greater-than')]
if not self.threshold_date:
days = self.data.get('days', 0)
hours = self.data.get('hours', 0)
minutes = self.data.get('minutes', 0)
# Work around placebo issues with tz
if v.tzinfo:
n = datetime.now(tz=tzutc())
else:
n = datetime.now()
self.threshold_date = n - timedelta(days=days, hours=hours, minutes=minutes)
return op(self.threshold_date, v)
class EventFilter(ValueFilter):
"""Filter against a cloudwatch event associated to a resource type."""
schema = type_schema('event', rinherit=ValueFilter.schema)
def validate(self):
if 'mode' not in self.manager.data:
raise FilterValidationError(
"Event filters can only be used with lambda policies")
return self
def process(self, resources, event=None):
if event is None:
return resources
if self(event):
return resources
return []
|
{
"content_hash": "2a50cb0858e29bdc01495aed87c445c5",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 97,
"avg_line_length": 31.983018867924528,
"alnum_prop": 0.5561323815704088,
"repo_name": "jdubs/cloud-custodian",
"id": "78df6b2d489b569ec344a95f7c19a4647745a548",
"size": "17541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "c7n/filters/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1251"
},
{
"name": "Python",
"bytes": "1557818"
}
],
"symlink_target": ""
}
|
"""
The following abstract structures are used to build the Abstract Syntax
Tree (AST). In general, there is no hierarchy between them. For example,
an ArithmeticStructure can contain a CallStructure which at the same time
contains another ArithmeticStructure. However, some of them could not be
inside another structures due to the restrictions of the source languages.
For example, the GetConstantsStructure cannot be a part of another structure
because it has to appear after the '=' sign in Vensim and not be followed by
anything else.
"""
from dataclasses import dataclass
from typing import Union
class AbstractSyntax:
"""
Generic class. All Abstract Synax structured are childs of that class.
Used for typing.
"""
pass
@dataclass
class ArithmeticStructure(AbstractSyntax):
"""
Dataclass for an arithmetic structure.
Parameters
----------
operators: list
List of operators applied between the arguments
arguments: list
The arguments of the arithmetics operations.
"""
operators: list
arguments: list
def __str__(self) -> str: # pragma: no cover
return "ArithmeticStructure:\n\t %s %s" % (
self.operators, self.arguments)
@dataclass
class LogicStructure(AbstractSyntax):
"""
Dataclass for a logic structure.
Parameters
----------
operators: list
List of operators applied between the arguments
arguments: list
The arguments of the logic operations.
"""
operators: list
arguments: list
def __str__(self) -> str: # pragma: no cover
return "LogicStructure:\n\t %s %s" % (
self.operators, self.arguments)
@dataclass
class SubscriptsReferenceStructure(AbstractSyntax):
"""
Dataclass for a subscript reference structure.
Parameters
----------
subscripts: tuple
The list of subscripts referenced.
"""
subscripts: tuple
def __str__(self) -> str: # pragma: no cover
return "SubscriptReferenceStructure:\n\t %s" % self.subscripts
@dataclass
class ReferenceStructure(AbstractSyntax):
"""
Dataclass for an element reference structure.
Parameters
----------
reference: str
The name of the referenced element.
subscripts: SubscriptsReferenceStructure or None
The subscrips used in the reference.
"""
reference: str
subscripts: Union[SubscriptsReferenceStructure, None] = None
def __str__(self) -> str: # pragma: no cover
return "ReferenceStructure:\n\t %s%s" % (
self.reference,
"\n\t" + str(self.subscripts or "").replace("\n", "\n\t"))
@dataclass
class CallStructure(AbstractSyntax):
"""
Dataclass for a call structure.
Parameters
----------
function: str or ReferenceStructure
The name or the reference of the callable.
arguments: tuple
The list of arguments used for calling the function.
"""
function: Union[str, ReferenceStructure]
arguments: tuple
def __str__(self) -> str: # pragma: no cover
return "CallStructure:\n\t%s(%s)" % (
self.function,
"\n\t\t,".join([
"\n\t\t" + str(arg).replace("\n", "\n\t\t")
for arg in self.arguments
]))
@dataclass
class GameStructure(AbstractSyntax):
"""
Dataclass for a game structure.
Parameters
----------
expression: AST
The expression inside the game call.
"""
expression: Union[AbstractSyntax, float]
def __str__(self) -> str: # pragma: no cover
return "GameStructure:\n\t%s" % self.expression
@dataclass
class AllocateAvailableStructure(AbstractSyntax):
"""
Dataclass for a Allocate Available structure.
Parameters
----------
request: AbstractSyntax
The reference to the request variable.
pp: AbstractSyntax
The reference to the priority variable.
avail: AbstractSyntax or float
The total available supply.
"""
request: AbstractSyntax
pp: AbstractSyntax
avail: Union[AbstractSyntax, float]
def __str__(self) -> str: # pragma: no cover
return "AllocateAvailableStructure:\n\t%s,\n\t%s,\n\t%s" % (
self.request, self.pp, self.avail
)
@dataclass
class AllocateByPriorityStructure(AbstractSyntax):
"""
Dataclass for a Allocate By Priority structure.
Parameters
----------
request: AbstractSyntax
The reference to the request variable.
priority: AbstractSyntax
The reference to the priority variable.
size: AbstractSyntax or int
The size of the last dimension.
width: AbstractSyntax or float
The width between priorities.
supply: AbstractSyntax or float
The total supply.
"""
request: AbstractSyntax
priority: AbstractSyntax
size: Union[AbstractSyntax, int]
width: Union[AbstractSyntax, float]
supply: Union[AbstractSyntax, float]
def __str__(self) -> str: # pragma: no cover
return "AllocateByPriorityStructure:"\
"\n\t%s,\n\t%s,\n\t%s,\n\t%s,\n\t%s" % (
self.request, self.priority, self.size,
self.width, self.supply
)
@dataclass
class InitialStructure(AbstractSyntax):
"""
Dataclass for a initial structure.
Parameters
----------
initial: AST
The expression inside the initial call.
"""
initial: Union[AbstractSyntax, float]
def __str__(self) -> str: # pragma: no cover
return "InitialStructure:\n\t%s" % (
self.initial)
@dataclass
class IntegStructure(AbstractSyntax):
"""
Dataclass for an integ/stock structure.
Parameters
----------
flow: AST
The flow of the stock.
initial: AST
The initial value of the stock.
"""
flow: Union[AbstractSyntax, float]
initial: Union[AbstractSyntax, float]
def __str__(self) -> str: # pragma: no cover
return "IntegStructure:\n\t%s,\n\t%s" % (
self.flow,
self.initial)
@dataclass
class DelayStructure(AbstractSyntax):
"""
Dataclass for a delay structure.
Parameters
----------
input: AST
The input of the delay.
delay_time: AST
The delay time value of the delay.
initial: AST
The initial value of the delay.
order: float
The order of the delay.
"""
input: Union[AbstractSyntax, float]
delay_time: Union[AbstractSyntax, float]
initial: Union[AbstractSyntax, float]
order: float
def __str__(self) -> str: # pragma: no cover
return "DelayStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % (
self.order,
self.input,
self.delay_time,
self.initial)
@dataclass
class DelayNStructure(AbstractSyntax):
"""
Dataclass for a delay n structure.
Parameters
----------
input: AST
The input of the delay.
delay_time: AST
The delay time value of the delay.
initial: AST
The initial value of the delay.
order: float
The order of the delay.
"""
input: Union[AbstractSyntax, float]
delay_time: Union[AbstractSyntax, float]
initial: Union[AbstractSyntax, float]
order: Union[AbstractSyntax, float]
# DELAY N may behave different than other delays when the delay time
# changes during integration
def __str__(self) -> str: # pragma: no cover
return "DelayNStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % (
self.order,
self.input,
self.delay_time,
self.initial)
@dataclass
class DelayFixedStructure(AbstractSyntax):
"""
Dataclass for a delay fixed structure.
Parameters
----------
input: AST
The input of the delay.
delay_time: AST
The delay time value of the delay.
initial: AST
The initial value of the delay.
"""
input: Union[AbstractSyntax, float]
delay_time: Union[AbstractSyntax, float]
initial: Union[AbstractSyntax, float]
def __str__(self) -> str: # pragma: no cover
return "DelayFixedStructure:\n\t%s,\n\t%s,\n\t%s" % (
self.input,
self.delay_time,
self.initial)
@dataclass
class SmoothStructure(AbstractSyntax):
"""
Dataclass for a smooth structure.
Parameters
----------
input: AST
The input of the smooth.
delay_time: AST
The smooth time value of the smooth.
initial: AST
The initial value of the smooth.
order: float
The order of the smooth.
"""
input: Union[AbstractSyntax, float]
smooth_time: Union[AbstractSyntax, float]
initial: Union[AbstractSyntax, float]
order: float
def __str__(self) -> str: # pragma: no cover
return "SmoothStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % (
self.order,
self.input,
self.smooth_time,
self.initial)
@dataclass
class SmoothNStructure(AbstractSyntax):
"""
Dataclass for a smooth n structure.
Parameters
----------
input: AST
The input of the smooth.
delay_time: AST
The smooth time value of the smooth.
initial: AST
The initial value of the smooth.
order: float
The order of the smooth.
"""
input: Union[AbstractSyntax, float]
smooth_time: Union[AbstractSyntax, float]
initial: Union[AbstractSyntax, float]
order: Union[AbstractSyntax, float]
# SMOOTH N may behave different than other smooths with RungeKutta
# integration
def __str__(self) -> str: # pragma: no cover
return "SmoothNStructure (order %s):\n\t%s,\n\t%s,\n\t%s" % (
self.order,
self.input,
self.smooth_time,
self.initial)
@dataclass
class TrendStructure(AbstractSyntax):
"""
Dataclass for a trend structure.
Parameters
----------
input: AST
The input of the trend.
average_time: AST
The average time value of the trend.
initial_trend: AST
The initial trend value of the trend.
"""
input: Union[AbstractSyntax, float]
average_time: Union[AbstractSyntax, float]
initial_trend: Union[AbstractSyntax, float]
def __str__(self) -> str: # pragma: no cover
return "TrendStructure:\n\t%s,\n\t%s,\n\t%s" % (
self.input,
self.average_time,
self.initial)
@dataclass
class ForecastStructure(AbstractSyntax):
"""
Dataclass for a forecast structure.
Parameters
----------
input: AST
The input of the forecast.
averae_time: AST
The average time value of the forecast.
horizon: float
The horizon value of the forecast.
initial_trend: AST
The initial trend value of the forecast.
"""
input: Union[AbstractSyntax, float]
average_time: Union[AbstractSyntax, float]
horizon: Union[AbstractSyntax, float]
initial_trend: Union[AbstractSyntax, float]
def __str__(self) -> str: # pragma: no cover
return "ForecastStructure:\n\t%s,\n\t%s,\n\t%s,\n\t%s" % (
self.input,
self.average_time,
self.horizon,
self.initial_trend)
@dataclass
class SampleIfTrueStructure(AbstractSyntax):
"""
Dataclass for a sample if true structure.
Parameters
----------
condition: AST
The condition of the sample if true
input: AST
The input of the sample if true.
initial: AST
The initial value of the sample if true.
"""
condition: Union[AbstractSyntax, float]
input: Union[AbstractSyntax, float]
initial: Union[AbstractSyntax, float]
def __str__(self) -> str: # pragma: no cover
return "SampleIfTrueStructure:\n\t%s,\n\t%s,\n\t%s" % (
self.condition,
self.input,
self.initial)
@dataclass
class LookupsStructure(AbstractSyntax):
"""
Dataclass for a lookup structure.
Parameters
----------
x: tuple
The list of the x values of the lookup.
y: tuple
The list of the y values of the lookup.
x_limits: tuple
The minimum and maximum value of x.
y_limits: tuple
The minimum and maximum value of y.
type: str
The interpolation method.
"""
x: tuple
y: tuple
x_limits: tuple
y_limits: tuple
type: str
def __str__(self) -> str: # pragma: no cover
return "LookupStructure (%s):\n\tx %s = %s\n\ty %s = %s\n" % (
self.type, self.x_limits, self.x, self.y_limits, self.y
)
@dataclass
class InlineLookupsStructure(AbstractSyntax):
"""
Dataclass for an inline lookup structure.
Parameters
----------
argument: AST
The argument of the inline lookup.
lookups: LookupStructure
The lookups definition.
"""
argument: Union[AbstractSyntax, float]
lookups: LookupsStructure
def __str__(self) -> str: # pragma: no cover
return "InlineLookupsStructure:\n\t%s\n\t%s" % (
str(self.argument).replace("\n", "\n\t"),
str(self.lookups).replace("\n", "\n\t")
)
@dataclass
class DataStructure(AbstractSyntax):
"""
Dataclass for an empty data structure.
Parameters
----------
None
"""
pass
def __str__(self) -> str: # pragma: no cover
return "DataStructure"
@dataclass
class GetLookupsStructure(AbstractSyntax):
"""
Dataclass for a get lookups structure.
Parameters
----------
file: str
The file path where the data is.
tab: str
The sheetname where the data is.
x_row_or_col: str
The pointer to the cell or cellrange name that defines the
interpolation series data.
cell: str
The pointer to the cell or the cellrange name that defines the data.
"""
file: str
tab: str
x_row_or_col: str
cell: str
def __str__(self) -> str: # pragma: no cover
return "GetLookupStructure:\n\t'%s', '%s', '%s', '%s'\n" % (
self.file, self.tab, self.x_row_or_col, self.cell
)
@dataclass
class GetDataStructure(AbstractSyntax):
"""
Dataclass for a get lookups structure.
Parameters
----------
file: str
The file path where the data is.
tab: str
The sheetname where the data is.
time_row_or_col: str
The pointer to the cell or cellrange name that defines the
interpolation time series data.
cell: str
The pointer to the cell or the cellrange name that defines the data.
"""
file: str
tab: str
time_row_or_col: str
cell: str
def __str__(self) -> str: # pragma: no cover
return "GetDataStructure:\n\t'%s', '%s', '%s', '%s'\n" % (
self.file, self.tab, self.time_row_or_col, self.cell
)
@dataclass
class GetConstantsStructure(AbstractSyntax):
"""
Dataclass for a get lookups structure.
Parameters
----------
file: str
The file path where the data is.
tab: str
The sheetname where the data is.
cell: str
The pointer to the cell or the cellrange name that defines the data.
"""
file: str
tab: str
cell: str
def __str__(self) -> str: # pragma: no cover
return "GetConstantsStructure:\n\t'%s', '%s', '%s'\n" % (
self.file, self.tab, self.cell
)
|
{
"content_hash": "9fe97951b4f5e947ac03379c23ade2de",
"timestamp": "",
"source": "github",
"line_count": 628,
"max_line_length": 76,
"avg_line_length": 24.75955414012739,
"alnum_prop": 0.6058267412695351,
"repo_name": "JamesPHoughton/pysd",
"id": "e745318796ca479bdb9f42976f93a0b059fd398a",
"size": "15549",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysd/translators/structures/abstract_expressions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "850"
},
{
"name": "Python",
"bytes": "771517"
}
],
"symlink_target": ""
}
|
"""
Created on Fri Jan 27 18:31:59 2017
@author: katsuya.ishiyama
"""
from numpy import random
# Definition of module level constants
SUCCESS_CODE = 1
FAILURE_CODE = 0
class Strategy():
def __init__(self, n):
_success_probability = _generate_success_probability(n)
_strategy = {i: p for i, p in enumerate(_success_probability, 1)}
self._n = n
self.strategy = _strategy
self.stock_of_strategy = list(_strategy.keys())
self.tried_strategy = []
self.current_strategy = None
self.previous_strategy = None
self.count_same_strategy = 0
self._result_of_trial = None
def choose_strategy(self):
if not self.stock_of_strategy:
raise ValueError('There is no strategy in stock.')
_chosen_id = random.choice(self.stock_of_strategy, 1)[0]
self.previous_strategy = self.current_strategy
self.current_strategy = _chosen_id
self.count_same_strategy = 0
self.stock_of_strategy.remove(_chosen_id)
_chosen_strategy = {
'chosen_strategy': _chosen_id,
'success_probability': self._get_success_probability()
}
return _chosen_strategy
def _get_success_probability(self):
return self.strategy[self.current_strategy]
def try_strategy(self):
if not self.current_strategy:
raise ValueError('No strategy is chosen.')
self.tried_strategy.append(self.current_strategy)
self._result_of_trial = _get_trial_result(
p=self._get_success_probability()
)
if self.current_strategy == self.previous_strategy:
self.count_same_strategy += 1
return self._result_of_trial
def _get_trial_result(p):
_trial_result = random.choice([FAILURE_CODE, SUCCESS_CODE], size=1, p=[1 - p, p])
return _trial_result[0]
def _generate_success_probability(size):
return random.sample(size)
|
{
"content_hash": "5a83823dbb44f5e9b12178860887ea3b",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 85,
"avg_line_length": 23.72289156626506,
"alnum_prop": 0.6211274758760792,
"repo_name": "Katsuya-Ishiyama/simulation",
"id": "4d649a859859bd7d05bc7f4373330dcb6efbd5bb",
"size": "2013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "strategy/strategy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7780"
}
],
"symlink_target": ""
}
|
ANSIBLE_METADATA = {
'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'
}
DOCUMENTATION = '''
---
module: bigip_facts
short_description: Collect facts from F5 BIG-IP devices
description:
- Collect facts from F5 BIG-IP devices via iControl SOAP API
version_added: "1.6"
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- Requires BIG-IP software version >= 11.4
- F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
- Best run as a local_action in your playbook
- Tested with manager and above account privilege level
- C(provision) facts were added in 2.2
requirements:
- bigsuds
options:
session:
description:
- BIG-IP session support; may be useful to avoid concurrency
issues in certain circumstances.
required: false
default: true
choices: []
aliases: []
include:
description:
- Fact category or list of categories to collect
required: true
default: null
choices:
- address_class
- certificate
- client_ssl_profile
- device
- device_group
- interface
- key
- node
- pool
- provision
- rule
- self_ip
- software
- system_info
- traffic_group
- trunk
- virtual_address
- virtual_server
- vlan
aliases: []
filter:
description:
- Shell-style glob matching string used to filter fact keys. Not
applicable for software, provision, and system_info fact categories.
required: false
default: null
choices: []
aliases: []
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Collect BIG-IP facts
bigip_facts:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
include: "interface,vlan"
delegate_to: localhost
'''
try:
from suds import MethodNotFound, WebFault
except ImportError:
bigsuds_found = False
else:
bigsuds_found = True
import fnmatch
import re
import traceback
class F5(object):
"""F5 iControl class.
F5 BIG-IP iControl API class.
Attributes:
api: iControl API instance.
"""
def __init__(self, host, user, password, session=False, validate_certs=True, port=443):
self.api = bigip_api(host, user, password, validate_certs, port)
if session:
self.start_session()
def start_session(self):
self.api = self.api.with_session_id()
def get_api(self):
return self.api
def set_recursive_query_state(self, state):
self.api.System.Session.set_recursive_query_state(state)
def get_recursive_query_state(self):
return self.api.System.Session.get_recursive_query_state()
def enable_recursive_query_state(self):
self.set_recursive_query_state('STATE_ENABLED')
def disable_recursive_query_state(self):
self.set_recursive_query_state('STATE_DISABLED')
def set_active_folder(self, folder):
self.api.System.Session.set_active_folder(folder=folder)
def get_active_folder(self):
return self.api.System.Session.get_active_folder()
class Interfaces(object):
"""Interfaces class.
F5 BIG-IP interfaces class.
Attributes:
api: iControl API instance.
interfaces: A list of BIG-IP interface names.
"""
def __init__(self, api, regex=None):
self.api = api
self.interfaces = api.Networking.Interfaces.get_list()
if regex:
re_filter = re.compile(regex)
self.interfaces = filter(re_filter.search, self.interfaces)
def get_list(self):
return self.interfaces
def get_active_media(self):
return self.api.Networking.Interfaces.get_active_media(self.interfaces)
def get_actual_flow_control(self):
return self.api.Networking.Interfaces.get_actual_flow_control(self.interfaces)
def get_bundle_state(self):
return self.api.Networking.Interfaces.get_bundle_state(self.interfaces)
def get_description(self):
return self.api.Networking.Interfaces.get_description(self.interfaces)
def get_dual_media_state(self):
return self.api.Networking.Interfaces.get_dual_media_state(self.interfaces)
def get_enabled_state(self):
return self.api.Networking.Interfaces.get_enabled_state(self.interfaces)
def get_if_index(self):
return self.api.Networking.Interfaces.get_if_index(self.interfaces)
def get_learning_mode(self):
return self.api.Networking.Interfaces.get_learning_mode(self.interfaces)
def get_lldp_admin_status(self):
return self.api.Networking.Interfaces.get_lldp_admin_status(self.interfaces)
def get_lldp_tlvmap(self):
return self.api.Networking.Interfaces.get_lldp_tlvmap(self.interfaces)
def get_mac_address(self):
return self.api.Networking.Interfaces.get_mac_address(self.interfaces)
def get_media(self):
return self.api.Networking.Interfaces.get_media(self.interfaces)
def get_media_option(self):
return self.api.Networking.Interfaces.get_media_option(self.interfaces)
def get_media_option_sfp(self):
return self.api.Networking.Interfaces.get_media_option_sfp(self.interfaces)
def get_media_sfp(self):
return self.api.Networking.Interfaces.get_media_sfp(self.interfaces)
def get_media_speed(self):
return self.api.Networking.Interfaces.get_media_speed(self.interfaces)
def get_media_status(self):
return self.api.Networking.Interfaces.get_media_status(self.interfaces)
def get_mtu(self):
return self.api.Networking.Interfaces.get_mtu(self.interfaces)
def get_phy_master_slave_mode(self):
return self.api.Networking.Interfaces.get_phy_master_slave_mode(self.interfaces)
def get_prefer_sfp_state(self):
return self.api.Networking.Interfaces.get_prefer_sfp_state(self.interfaces)
def get_flow_control(self):
return self.api.Networking.Interfaces.get_requested_flow_control(self.interfaces)
def get_sflow_poll_interval(self):
return self.api.Networking.Interfaces.get_sflow_poll_interval(self.interfaces)
def get_sflow_poll_interval_global(self):
return self.api.Networking.Interfaces.get_sflow_poll_interval_global(self.interfaces)
def get_sfp_media_state(self):
return self.api.Networking.Interfaces.get_sfp_media_state(self.interfaces)
def get_stp_active_edge_port_state(self):
return self.api.Networking.Interfaces.get_stp_active_edge_port_state(self.interfaces)
def get_stp_enabled_state(self):
return self.api.Networking.Interfaces.get_stp_enabled_state(self.interfaces)
def get_stp_link_type(self):
return self.api.Networking.Interfaces.get_stp_link_type(self.interfaces)
def get_stp_protocol_detection_reset_state(self):
return self.api.Networking.Interfaces.get_stp_protocol_detection_reset_state(self.interfaces)
class SelfIPs(object):
"""Self IPs class.
F5 BIG-IP Self IPs class.
Attributes:
api: iControl API instance.
self_ips: List of self IPs.
"""
def __init__(self, api, regex=None):
self.api = api
self.self_ips = api.Networking.SelfIPV2.get_list()
if regex:
re_filter = re.compile(regex)
self.self_ips = filter(re_filter.search, self.self_ips)
def get_list(self):
return self.self_ips
def get_address(self):
return self.api.Networking.SelfIPV2.get_address(self.self_ips)
def get_allow_access_list(self):
return self.api.Networking.SelfIPV2.get_allow_access_list(self.self_ips)
def get_description(self):
return self.api.Networking.SelfIPV2.get_description(self.self_ips)
def get_enforced_firewall_policy(self):
return self.api.Networking.SelfIPV2.get_enforced_firewall_policy(self.self_ips)
def get_floating_state(self):
return self.api.Networking.SelfIPV2.get_floating_state(self.self_ips)
def get_fw_rule(self):
return self.api.Networking.SelfIPV2.get_fw_rule(self.self_ips)
def get_netmask(self):
return self.api.Networking.SelfIPV2.get_netmask(self.self_ips)
def get_staged_firewall_policy(self):
return self.api.Networking.SelfIPV2.get_staged_firewall_policy(self.self_ips)
def get_traffic_group(self):
return self.api.Networking.SelfIPV2.get_traffic_group(self.self_ips)
def get_vlan(self):
return self.api.Networking.SelfIPV2.get_vlan(self.self_ips)
def get_is_traffic_group_inherited(self):
return self.api.Networking.SelfIPV2.is_traffic_group_inherited(self.self_ips)
class Trunks(object):
"""Trunks class.
F5 BIG-IP trunks class.
Attributes:
api: iControl API instance.
trunks: List of trunks.
"""
def __init__(self, api, regex=None):
self.api = api
self.trunks = api.Networking.Trunk.get_list()
if regex:
re_filter = re.compile(regex)
self.trunks = filter(re_filter.search, self.trunks)
def get_list(self):
return self.trunks
def get_active_lacp_state(self):
return self.api.Networking.Trunk.get_active_lacp_state(self.trunks)
def get_configured_member_count(self):
return self.api.Networking.Trunk.get_configured_member_count(self.trunks)
def get_description(self):
return self.api.Networking.Trunk.get_description(self.trunks)
def get_distribution_hash_option(self):
return self.api.Networking.Trunk.get_distribution_hash_option(self.trunks)
def get_interface(self):
return self.api.Networking.Trunk.get_interface(self.trunks)
def get_lacp_enabled_state(self):
return self.api.Networking.Trunk.get_lacp_enabled_state(self.trunks)
def get_lacp_timeout_option(self):
return self.api.Networking.Trunk.get_lacp_timeout_option(self.trunks)
def get_link_selection_policy(self):
return self.api.Networking.Trunk.get_link_selection_policy(self.trunks)
def get_media_speed(self):
return self.api.Networking.Trunk.get_media_speed(self.trunks)
def get_media_status(self):
return self.api.Networking.Trunk.get_media_status(self.trunks)
def get_operational_member_count(self):
return self.api.Networking.Trunk.get_operational_member_count(self.trunks)
def get_stp_enabled_state(self):
return self.api.Networking.Trunk.get_stp_enabled_state(self.trunks)
def get_stp_protocol_detection_reset_state(self):
return self.api.Networking.Trunk.get_stp_protocol_detection_reset_state(self.trunks)
class Vlans(object):
"""Vlans class.
F5 BIG-IP Vlans class.
Attributes:
api: iControl API instance.
vlans: List of VLANs.
"""
def __init__(self, api, regex=None):
self.api = api
self.vlans = api.Networking.VLAN.get_list()
if regex:
re_filter = re.compile(regex)
self.vlans = filter(re_filter.search, self.vlans)
def get_list(self):
return self.vlans
def get_auto_lasthop(self):
return self.api.Networking.VLAN.get_auto_lasthop(self.vlans)
def get_cmp_hash_algorithm(self):
return self.api.Networking.VLAN.get_cmp_hash_algorithm(self.vlans)
def get_description(self):
return self.api.Networking.VLAN.get_description(self.vlans)
def get_dynamic_forwarding(self):
return self.api.Networking.VLAN.get_dynamic_forwarding(self.vlans)
def get_failsafe_action(self):
return self.api.Networking.VLAN.get_failsafe_action(self.vlans)
def get_failsafe_state(self):
return self.api.Networking.VLAN.get_failsafe_state(self.vlans)
def get_failsafe_timeout(self):
return self.api.Networking.VLAN.get_failsafe_timeout(self.vlans)
def get_if_index(self):
return self.api.Networking.VLAN.get_if_index(self.vlans)
def get_learning_mode(self):
return self.api.Networking.VLAN.get_learning_mode(self.vlans)
def get_mac_masquerade_address(self):
return self.api.Networking.VLAN.get_mac_masquerade_address(self.vlans)
def get_member(self):
return self.api.Networking.VLAN.get_member(self.vlans)
def get_mtu(self):
return self.api.Networking.VLAN.get_mtu(self.vlans)
def get_sflow_poll_interval(self):
return self.api.Networking.VLAN.get_sflow_poll_interval(self.vlans)
def get_sflow_poll_interval_global(self):
return self.api.Networking.VLAN.get_sflow_poll_interval_global(self.vlans)
def get_sflow_sampling_rate(self):
return self.api.Networking.VLAN.get_sflow_sampling_rate(self.vlans)
def get_sflow_sampling_rate_global(self):
return self.api.Networking.VLAN.get_sflow_sampling_rate_global(self.vlans)
def get_source_check_state(self):
return self.api.Networking.VLAN.get_source_check_state(self.vlans)
def get_true_mac_address(self):
return self.api.Networking.VLAN.get_true_mac_address(self.vlans)
def get_vlan_id(self):
return self.api.Networking.VLAN.get_vlan_id(self.vlans)
class Software(object):
"""Software class.
F5 BIG-IP software class.
Attributes:
api: iControl API instance.
"""
def __init__(self, api):
self.api = api
def get_all_software_status(self):
return self.api.System.SoftwareManagement.get_all_software_status()
class VirtualServers(object):
"""Virtual servers class.
F5 BIG-IP virtual servers class.
Attributes:
api: iControl API instance.
virtual_servers: List of virtual servers.
"""
def __init__(self, api, regex=None):
self.api = api
self.virtual_servers = api.LocalLB.VirtualServer.get_list()
if regex:
re_filter = re.compile(regex)
self.virtual_servers = filter(re_filter.search, self.virtual_servers)
def get_list(self):
return self.virtual_servers
def get_actual_hardware_acceleration(self):
return self.api.LocalLB.VirtualServer.get_actual_hardware_acceleration(self.virtual_servers)
def get_authentication_profile(self):
return self.api.LocalLB.VirtualServer.get_authentication_profile(self.virtual_servers)
def get_auto_lasthop(self):
return self.api.LocalLB.VirtualServer.get_auto_lasthop(self.virtual_servers)
def get_bw_controller_policy(self):
return self.api.LocalLB.VirtualServer.get_bw_controller_policy(self.virtual_servers)
def get_clone_pool(self):
return self.api.LocalLB.VirtualServer.get_clone_pool(self.virtual_servers)
def get_cmp_enable_mode(self):
return self.api.LocalLB.VirtualServer.get_cmp_enable_mode(self.virtual_servers)
def get_connection_limit(self):
return self.api.LocalLB.VirtualServer.get_connection_limit(self.virtual_servers)
def get_connection_mirror_state(self):
return self.api.LocalLB.VirtualServer.get_connection_mirror_state(self.virtual_servers)
def get_default_pool_name(self):
return self.api.LocalLB.VirtualServer.get_default_pool_name(self.virtual_servers)
def get_description(self):
return self.api.LocalLB.VirtualServer.get_description(self.virtual_servers)
def get_destination(self):
return self.api.LocalLB.VirtualServer.get_destination_v2(self.virtual_servers)
def get_enabled_state(self):
return self.api.LocalLB.VirtualServer.get_enabled_state(self.virtual_servers)
def get_enforced_firewall_policy(self):
return self.api.LocalLB.VirtualServer.get_enforced_firewall_policy(self.virtual_servers)
def get_fallback_persistence_profile(self):
return self.api.LocalLB.VirtualServer.get_fallback_persistence_profile(self.virtual_servers)
def get_fw_rule(self):
return self.api.LocalLB.VirtualServer.get_fw_rule(self.virtual_servers)
def get_gtm_score(self):
return self.api.LocalLB.VirtualServer.get_gtm_score(self.virtual_servers)
def get_last_hop_pool(self):
return self.api.LocalLB.VirtualServer.get_last_hop_pool(self.virtual_servers)
def get_nat64_state(self):
return self.api.LocalLB.VirtualServer.get_nat64_state(self.virtual_servers)
def get_object_status(self):
return self.api.LocalLB.VirtualServer.get_object_status(self.virtual_servers)
def get_persistence_profile(self):
return self.api.LocalLB.VirtualServer.get_persistence_profile(self.virtual_servers)
def get_profile(self):
return self.api.LocalLB.VirtualServer.get_profile(self.virtual_servers)
def get_protocol(self):
return self.api.LocalLB.VirtualServer.get_protocol(self.virtual_servers)
def get_rate_class(self):
return self.api.LocalLB.VirtualServer.get_rate_class(self.virtual_servers)
def get_rate_limit(self):
return self.api.LocalLB.VirtualServer.get_rate_limit(self.virtual_servers)
def get_rate_limit_destination_mask(self):
return self.api.LocalLB.VirtualServer.get_rate_limit_destination_mask(self.virtual_servers)
def get_rate_limit_mode(self):
return self.api.LocalLB.VirtualServer.get_rate_limit_mode(self.virtual_servers)
def get_rate_limit_source_mask(self):
return self.api.LocalLB.VirtualServer.get_rate_limit_source_mask(self.virtual_servers)
def get_related_rule(self):
return self.api.LocalLB.VirtualServer.get_related_rule(self.virtual_servers)
def get_rule(self):
return self.api.LocalLB.VirtualServer.get_rule(self.virtual_servers)
def get_security_log_profile(self):
return self.api.LocalLB.VirtualServer.get_security_log_profile(self.virtual_servers)
def get_snat_pool(self):
return self.api.LocalLB.VirtualServer.get_snat_pool(self.virtual_servers)
def get_snat_type(self):
return self.api.LocalLB.VirtualServer.get_snat_type(self.virtual_servers)
def get_source_address(self):
return self.api.LocalLB.VirtualServer.get_source_address(self.virtual_servers)
def get_source_address_translation_lsn_pool(self):
return self.api.LocalLB.VirtualServer.get_source_address_translation_lsn_pool(self.virtual_servers)
def get_source_address_translation_snat_pool(self):
return self.api.LocalLB.VirtualServer.get_source_address_translation_snat_pool(self.virtual_servers)
def get_source_address_translation_type(self):
return self.api.LocalLB.VirtualServer.get_source_address_translation_type(self.virtual_servers)
def get_source_port_behavior(self):
return self.api.LocalLB.VirtualServer.get_source_port_behavior(self.virtual_servers)
def get_staged_firewall_policy(self):
return self.api.LocalLB.VirtualServer.get_staged_firewall_policy(self.virtual_servers)
def get_translate_address_state(self):
return self.api.LocalLB.VirtualServer.get_translate_address_state(self.virtual_servers)
def get_translate_port_state(self):
return self.api.LocalLB.VirtualServer.get_translate_port_state(self.virtual_servers)
def get_type(self):
return self.api.LocalLB.VirtualServer.get_type(self.virtual_servers)
def get_vlan(self):
return self.api.LocalLB.VirtualServer.get_vlan(self.virtual_servers)
def get_wildmask(self):
return self.api.LocalLB.VirtualServer.get_wildmask(self.virtual_servers)
class Pools(object):
"""Pools class.
F5 BIG-IP pools class.
Attributes:
api: iControl API instance.
pool_names: List of pool names.
"""
def __init__(self, api, regex=None):
self.api = api
self.pool_names = api.LocalLB.Pool.get_list()
if regex:
re_filter = re.compile(regex)
self.pool_names = filter(re_filter.search, self.pool_names)
def get_list(self):
return self.pool_names
def get_action_on_service_down(self):
return self.api.LocalLB.Pool.get_action_on_service_down(self.pool_names)
def get_active_member_count(self):
return self.api.LocalLB.Pool.get_active_member_count(self.pool_names)
def get_aggregate_dynamic_ratio(self):
return self.api.LocalLB.Pool.get_aggregate_dynamic_ratio(self.pool_names)
def get_allow_nat_state(self):
return self.api.LocalLB.Pool.get_allow_nat_state(self.pool_names)
def get_allow_snat_state(self):
return self.api.LocalLB.Pool.get_allow_snat_state(self.pool_names)
def get_client_ip_tos(self):
return self.api.LocalLB.Pool.get_client_ip_tos(self.pool_names)
def get_client_link_qos(self):
return self.api.LocalLB.Pool.get_client_link_qos(self.pool_names)
def get_description(self):
return self.api.LocalLB.Pool.get_description(self.pool_names)
def get_gateway_failsafe_device(self):
return self.api.LocalLB.Pool.get_gateway_failsafe_device(self.pool_names)
def get_ignore_persisted_weight_state(self):
return self.api.LocalLB.Pool.get_ignore_persisted_weight_state(self.pool_names)
def get_lb_method(self):
return self.api.LocalLB.Pool.get_lb_method(self.pool_names)
def get_member(self):
return self.api.LocalLB.Pool.get_member_v2(self.pool_names)
def get_minimum_active_member(self):
return self.api.LocalLB.Pool.get_minimum_active_member(self.pool_names)
def get_minimum_up_member(self):
return self.api.LocalLB.Pool.get_minimum_up_member(self.pool_names)
def get_minimum_up_member_action(self):
return self.api.LocalLB.Pool.get_minimum_up_member_action(self.pool_names)
def get_minimum_up_member_enabled_state(self):
return self.api.LocalLB.Pool.get_minimum_up_member_enabled_state(self.pool_names)
def get_monitor_association(self):
return self.api.LocalLB.Pool.get_monitor_association(self.pool_names)
def get_monitor_instance(self):
return self.api.LocalLB.Pool.get_monitor_instance(self.pool_names)
def get_object_status(self):
return self.api.LocalLB.Pool.get_object_status(self.pool_names)
def get_profile(self):
return self.api.LocalLB.Pool.get_profile(self.pool_names)
def get_queue_depth_limit(self):
return self.api.LocalLB.Pool.get_queue_depth_limit(self.pool_names)
def get_queue_on_connection_limit_state(self):
return self.api.LocalLB.Pool.get_queue_on_connection_limit_state(self.pool_names)
def get_queue_time_limit(self):
return self.api.LocalLB.Pool.get_queue_time_limit(self.pool_names)
def get_reselect_tries(self):
return self.api.LocalLB.Pool.get_reselect_tries(self.pool_names)
def get_server_ip_tos(self):
return self.api.LocalLB.Pool.get_server_ip_tos(self.pool_names)
def get_server_link_qos(self):
return self.api.LocalLB.Pool.get_server_link_qos(self.pool_names)
def get_simple_timeout(self):
return self.api.LocalLB.Pool.get_simple_timeout(self.pool_names)
def get_slow_ramp_time(self):
return self.api.LocalLB.Pool.get_slow_ramp_time(self.pool_names)
class Devices(object):
"""Devices class.
F5 BIG-IP devices class.
Attributes:
api: iControl API instance.
devices: List of devices.
"""
def __init__(self, api, regex=None):
self.api = api
self.devices = api.Management.Device.get_list()
if regex:
re_filter = re.compile(regex)
self.devices = filter(re_filter.search, self.devices)
def get_list(self):
return self.devices
def get_active_modules(self):
return self.api.Management.Device.get_active_modules(self.devices)
def get_base_mac_address(self):
return self.api.Management.Device.get_base_mac_address(self.devices)
def get_blade_addresses(self):
return self.api.Management.Device.get_blade_addresses(self.devices)
def get_build(self):
return self.api.Management.Device.get_build(self.devices)
def get_chassis_id(self):
return self.api.Management.Device.get_chassis_id(self.devices)
def get_chassis_type(self):
return self.api.Management.Device.get_chassis_type(self.devices)
def get_comment(self):
return self.api.Management.Device.get_comment(self.devices)
def get_configsync_address(self):
return self.api.Management.Device.get_configsync_address(self.devices)
def get_contact(self):
return self.api.Management.Device.get_contact(self.devices)
def get_description(self):
return self.api.Management.Device.get_description(self.devices)
def get_edition(self):
return self.api.Management.Device.get_edition(self.devices)
def get_failover_state(self):
return self.api.Management.Device.get_failover_state(self.devices)
def get_local_device(self):
return self.api.Management.Device.get_local_device()
def get_hostname(self):
return self.api.Management.Device.get_hostname(self.devices)
def get_inactive_modules(self):
return self.api.Management.Device.get_inactive_modules(self.devices)
def get_location(self):
return self.api.Management.Device.get_location(self.devices)
def get_management_address(self):
return self.api.Management.Device.get_management_address(self.devices)
def get_marketing_name(self):
return self.api.Management.Device.get_marketing_name(self.devices)
def get_multicast_address(self):
return self.api.Management.Device.get_multicast_address(self.devices)
def get_optional_modules(self):
return self.api.Management.Device.get_optional_modules(self.devices)
def get_platform_id(self):
return self.api.Management.Device.get_platform_id(self.devices)
def get_primary_mirror_address(self):
return self.api.Management.Device.get_primary_mirror_address(self.devices)
def get_product(self):
return self.api.Management.Device.get_product(self.devices)
def get_secondary_mirror_address(self):
return self.api.Management.Device.get_secondary_mirror_address(self.devices)
def get_software_version(self):
return self.api.Management.Device.get_software_version(self.devices)
def get_timelimited_modules(self):
return self.api.Management.Device.get_timelimited_modules(self.devices)
def get_timezone(self):
return self.api.Management.Device.get_timezone(self.devices)
def get_unicast_addresses(self):
return self.api.Management.Device.get_unicast_addresses(self.devices)
class DeviceGroups(object):
"""Device groups class.
F5 BIG-IP device groups class.
Attributes:
api: iControl API instance.
device_groups: List of device groups.
"""
def __init__(self, api, regex=None):
self.api = api
self.device_groups = api.Management.DeviceGroup.get_list()
if regex:
re_filter = re.compile(regex)
self.device_groups = filter(re_filter.search, self.device_groups)
def get_list(self):
return self.device_groups
def get_all_preferred_active(self):
return self.api.Management.DeviceGroup.get_all_preferred_active(self.device_groups)
def get_autosync_enabled_state(self):
return self.api.Management.DeviceGroup.get_autosync_enabled_state(self.device_groups)
def get_description(self):
return self.api.Management.DeviceGroup.get_description(self.device_groups)
def get_device(self):
return self.api.Management.DeviceGroup.get_device(self.device_groups)
def get_full_load_on_sync_state(self):
return self.api.Management.DeviceGroup.get_full_load_on_sync_state(self.device_groups)
def get_incremental_config_sync_size_maximum(self):
return self.api.Management.DeviceGroup.get_incremental_config_sync_size_maximum(self.device_groups)
def get_network_failover_enabled_state(self):
return self.api.Management.DeviceGroup.get_network_failover_enabled_state(self.device_groups)
def get_sync_status(self):
return self.api.Management.DeviceGroup.get_sync_status(self.device_groups)
def get_type(self):
return self.api.Management.DeviceGroup.get_type(self.device_groups)
class TrafficGroups(object):
"""Traffic groups class.
F5 BIG-IP traffic groups class.
Attributes:
api: iControl API instance.
traffic_groups: List of traffic groups.
"""
def __init__(self, api, regex=None):
self.api = api
self.traffic_groups = api.Management.TrafficGroup.get_list()
if regex:
re_filter = re.compile(regex)
self.traffic_groups = filter(re_filter.search, self.traffic_groups)
def get_list(self):
return self.traffic_groups
def get_auto_failback_enabled_state(self):
return self.api.Management.TrafficGroup.get_auto_failback_enabled_state(self.traffic_groups)
def get_auto_failback_time(self):
return self.api.Management.TrafficGroup.get_auto_failback_time(self.traffic_groups)
def get_default_device(self):
return self.api.Management.TrafficGroup.get_default_device(self.traffic_groups)
def get_description(self):
return self.api.Management.TrafficGroup.get_description(self.traffic_groups)
def get_ha_load_factor(self):
return self.api.Management.TrafficGroup.get_ha_load_factor(self.traffic_groups)
def get_ha_order(self):
return self.api.Management.TrafficGroup.get_ha_order(self.traffic_groups)
def get_is_floating(self):
return self.api.Management.TrafficGroup.get_is_floating(self.traffic_groups)
def get_mac_masquerade_address(self):
return self.api.Management.TrafficGroup.get_mac_masquerade_address(self.traffic_groups)
def get_unit_id(self):
return self.api.Management.TrafficGroup.get_unit_id(self.traffic_groups)
class Rules(object):
"""Rules class.
F5 BIG-IP iRules class.
Attributes:
api: iControl API instance.
rules: List of iRules.
"""
def __init__(self, api, regex=None):
self.api = api
self.rules = api.LocalLB.Rule.get_list()
if regex:
re_filter = re.compile(regex)
self.traffic_groups = filter(re_filter.search, self.rules)
def get_list(self):
return self.rules
def get_description(self):
return self.api.LocalLB.Rule.get_description(rule_names=self.rules)
def get_ignore_vertification(self):
return self.api.LocalLB.Rule.get_ignore_vertification(rule_names=self.rules)
def get_verification_status(self):
return self.api.LocalLB.Rule.get_verification_status_v2(rule_names=self.rules)
def get_definition(self):
return [x['rule_definition'] for x in self.api.LocalLB.Rule.query_rule(rule_names=self.rules)]
class Nodes(object):
"""Nodes class.
F5 BIG-IP nodes class.
Attributes:
api: iControl API instance.
nodes: List of nodes.
"""
def __init__(self, api, regex=None):
self.api = api
self.nodes = api.LocalLB.NodeAddressV2.get_list()
if regex:
re_filter = re.compile(regex)
self.nodes = filter(re_filter.search, self.nodes)
def get_list(self):
return self.nodes
def get_address(self):
return self.api.LocalLB.NodeAddressV2.get_address(nodes=self.nodes)
def get_connection_limit(self):
return self.api.LocalLB.NodeAddressV2.get_connection_limit(nodes=self.nodes)
def get_description(self):
return self.api.LocalLB.NodeAddressV2.get_description(nodes=self.nodes)
def get_dynamic_ratio(self):
return self.api.LocalLB.NodeAddressV2.get_dynamic_ratio_v2(nodes=self.nodes)
def get_monitor_instance(self):
return self.api.LocalLB.NodeAddressV2.get_monitor_instance(nodes=self.nodes)
def get_monitor_rule(self):
return self.api.LocalLB.NodeAddressV2.get_monitor_rule(nodes=self.nodes)
def get_monitor_status(self):
return self.api.LocalLB.NodeAddressV2.get_monitor_status(nodes=self.nodes)
def get_object_status(self):
return self.api.LocalLB.NodeAddressV2.get_object_status(nodes=self.nodes)
def get_rate_limit(self):
return self.api.LocalLB.NodeAddressV2.get_rate_limit(nodes=self.nodes)
def get_ratio(self):
return self.api.LocalLB.NodeAddressV2.get_ratio(nodes=self.nodes)
def get_session_status(self):
return self.api.LocalLB.NodeAddressV2.get_session_status(nodes=self.nodes)
class VirtualAddresses(object):
"""Virtual addresses class.
F5 BIG-IP virtual addresses class.
Attributes:
api: iControl API instance.
virtual_addresses: List of virtual addresses.
"""
def __init__(self, api, regex=None):
self.api = api
self.virtual_addresses = api.LocalLB.VirtualAddressV2.get_list()
if regex:
re_filter = re.compile(regex)
self.virtual_addresses = filter(re_filter.search, self.virtual_addresses)
def get_list(self):
return self.virtual_addresses
def get_address(self):
return self.api.LocalLB.VirtualAddressV2.get_address(self.virtual_addresses)
def get_arp_state(self):
return self.api.LocalLB.VirtualAddressV2.get_arp_state(self.virtual_addresses)
def get_auto_delete_state(self):
return self.api.LocalLB.VirtualAddressV2.get_auto_delete_state(self.virtual_addresses)
def get_connection_limit(self):
return self.api.LocalLB.VirtualAddressV2.get_connection_limit(self.virtual_addresses)
def get_description(self):
return self.api.LocalLB.VirtualAddressV2.get_description(self.virtual_addresses)
def get_enabled_state(self):
return self.api.LocalLB.VirtualAddressV2.get_enabled_state(self.virtual_addresses)
def get_icmp_echo_state(self):
return self.api.LocalLB.VirtualAddressV2.get_icmp_echo_state(self.virtual_addresses)
def get_is_floating_state(self):
return self.api.LocalLB.VirtualAddressV2.get_is_floating_state(self.virtual_addresses)
def get_netmask(self):
return self.api.LocalLB.VirtualAddressV2.get_netmask(self.virtual_addresses)
def get_object_status(self):
return self.api.LocalLB.VirtualAddressV2.get_object_status(self.virtual_addresses)
def get_route_advertisement_state(self):
return self.api.LocalLB.VirtualAddressV2.get_route_advertisement_state(self.virtual_addresses)
def get_traffic_group(self):
return self.api.LocalLB.VirtualAddressV2.get_traffic_group(self.virtual_addresses)
class AddressClasses(object):
"""Address group/class class.
F5 BIG-IP address group/class class.
Attributes:
api: iControl API instance.
address_classes: List of address classes.
"""
def __init__(self, api, regex=None):
self.api = api
self.address_classes = api.LocalLB.Class.get_address_class_list()
if regex:
re_filter = re.compile(regex)
self.address_classes = filter(re_filter.search, self.address_classes)
def get_list(self):
return self.address_classes
def get_address_class(self):
key = self.api.LocalLB.Class.get_address_class(self.address_classes)
value = self.api.LocalLB.Class.get_address_class_member_data_value(key)
result = list(map(zip, [x['members'] for x in key], value))
return result
def get_description(self):
return self.api.LocalLB.Class.get_description(self.address_classes)
class Certificates(object):
"""Certificates class.
F5 BIG-IP certificates class.
Attributes:
api: iControl API instance.
certificates: List of certificate identifiers.
certificate_list: List of certificate information structures.
"""
def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"):
self.api = api
self.certificate_list = api.Management.KeyCertificate.get_certificate_list(mode=mode)
self.certificates = [x['certificate']['cert_info']['id'] for x in self.certificate_list]
if regex:
re_filter = re.compile(regex)
self.certificates = filter(re_filter.search, self.certificates)
self.certificate_list = [x for x in self.certificate_list if x['certificate']['cert_info']['id'] in self.certificates]
def get_list(self):
return self.certificates
def get_certificate_list(self):
return self.certificate_list
class Keys(object):
"""Keys class.
F5 BIG-IP keys class.
Attributes:
api: iControl API instance.
keys: List of key identifiers.
key_list: List of key information structures.
"""
def __init__(self, api, regex=None, mode="MANAGEMENT_MODE_DEFAULT"):
self.api = api
self.key_list = api.Management.KeyCertificate.get_key_list(mode=mode)
self.keys = [x['key_info']['id'] for x in self.key_list]
if regex:
re_filter = re.compile(regex)
self.keys = filter(re_filter.search, self.keys)
self.key_list = [x for x in self.key_list if x['key_info']['id'] in self.keys]
def get_list(self):
return self.keys
def get_key_list(self):
return self.key_list
class ProfileClientSSL(object):
"""Client SSL profiles class.
F5 BIG-IP client SSL profiles class.
Attributes:
api: iControl API instance.
profiles: List of client SSL profiles.
"""
def __init__(self, api, regex=None):
self.api = api
self.profiles = api.LocalLB.ProfileClientSSL.get_list()
if regex:
re_filter = re.compile(regex)
self.profiles = filter(re_filter.search, self.profiles)
def get_list(self):
return self.profiles
def get_alert_timeout(self):
return self.api.LocalLB.ProfileClientSSL.get_alert_timeout(self.profiles)
def get_allow_nonssl_state(self):
return self.api.LocalLB.ProfileClientSSL.get_allow_nonssl_state(self.profiles)
def get_authenticate_depth(self):
return self.api.LocalLB.ProfileClientSSL.get_authenticate_depth(self.profiles)
def get_authenticate_once_state(self):
return self.api.LocalLB.ProfileClientSSL.get_authenticate_once_state(self.profiles)
def get_ca_file(self):
return self.api.LocalLB.ProfileClientSSL.get_ca_file_v2(self.profiles)
def get_cache_size(self):
return self.api.LocalLB.ProfileClientSSL.get_cache_size(self.profiles)
def get_cache_timeout(self):
return self.api.LocalLB.ProfileClientSSL.get_cache_timeout(self.profiles)
def get_certificate_file(self):
return self.api.LocalLB.ProfileClientSSL.get_certificate_file_v2(self.profiles)
def get_chain_file(self):
return self.api.LocalLB.ProfileClientSSL.get_chain_file_v2(self.profiles)
def get_cipher_list(self):
return self.api.LocalLB.ProfileClientSSL.get_cipher_list(self.profiles)
def get_client_certificate_ca_file(self):
return self.api.LocalLB.ProfileClientSSL.get_client_certificate_ca_file_v2(self.profiles)
def get_crl_file(self):
return self.api.LocalLB.ProfileClientSSL.get_crl_file_v2(self.profiles)
def get_default_profile(self):
return self.api.LocalLB.ProfileClientSSL.get_default_profile(self.profiles)
def get_description(self):
return self.api.LocalLB.ProfileClientSSL.get_description(self.profiles)
def get_forward_proxy_ca_certificate_file(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_certificate_file(self.profiles)
def get_forward_proxy_ca_key_file(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_key_file(self.profiles)
def get_forward_proxy_ca_passphrase(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_ca_passphrase(self.profiles)
def get_forward_proxy_certificate_extension_include(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_extension_include(self.profiles)
def get_forward_proxy_certificate_lifespan(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_certificate_lifespan(self.profiles)
def get_forward_proxy_enabled_state(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_enabled_state(self.profiles)
def get_forward_proxy_lookup_by_ipaddr_port_state(self):
return self.api.LocalLB.ProfileClientSSL.get_forward_proxy_lookup_by_ipaddr_port_state(self.profiles)
def get_handshake_timeout(self):
return self.api.LocalLB.ProfileClientSSL.get_handshake_timeout(self.profiles)
def get_key_file(self):
return self.api.LocalLB.ProfileClientSSL.get_key_file_v2(self.profiles)
def get_modssl_emulation_state(self):
return self.api.LocalLB.ProfileClientSSL.get_modssl_emulation_state(self.profiles)
def get_passphrase(self):
return self.api.LocalLB.ProfileClientSSL.get_passphrase(self.profiles)
def get_peer_certification_mode(self):
return self.api.LocalLB.ProfileClientSSL.get_peer_certification_mode(self.profiles)
def get_profile_mode(self):
return self.api.LocalLB.ProfileClientSSL.get_profile_mode(self.profiles)
def get_renegotiation_maximum_record_delay(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_maximum_record_delay(self.profiles)
def get_renegotiation_period(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_period(self.profiles)
def get_renegotiation_state(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_state(self.profiles)
def get_renegotiation_throughput(self):
return self.api.LocalLB.ProfileClientSSL.get_renegotiation_throughput(self.profiles)
def get_retain_certificate_state(self):
return self.api.LocalLB.ProfileClientSSL.get_retain_certificate_state(self.profiles)
def get_secure_renegotiation_mode(self):
return self.api.LocalLB.ProfileClientSSL.get_secure_renegotiation_mode(self.profiles)
def get_server_name(self):
return self.api.LocalLB.ProfileClientSSL.get_server_name(self.profiles)
def get_session_ticket_state(self):
return self.api.LocalLB.ProfileClientSSL.get_session_ticket_state(self.profiles)
def get_sni_default_state(self):
return self.api.LocalLB.ProfileClientSSL.get_sni_default_state(self.profiles)
def get_sni_require_state(self):
return self.api.LocalLB.ProfileClientSSL.get_sni_require_state(self.profiles)
def get_ssl_option(self):
return self.api.LocalLB.ProfileClientSSL.get_ssl_option(self.profiles)
def get_strict_resume_state(self):
return self.api.LocalLB.ProfileClientSSL.get_strict_resume_state(self.profiles)
def get_unclean_shutdown_state(self):
return self.api.LocalLB.ProfileClientSSL.get_unclean_shutdown_state(self.profiles)
def get_is_base_profile(self):
return self.api.LocalLB.ProfileClientSSL.is_base_profile(self.profiles)
def get_is_system_profile(self):
return self.api.LocalLB.ProfileClientSSL.is_system_profile(self.profiles)
class SystemInfo(object):
"""System information class.
F5 BIG-IP system information class.
Attributes:
api: iControl API instance.
"""
def __init__(self, api):
self.api = api
def get_base_mac_address(self):
return self.api.System.SystemInfo.get_base_mac_address()
def get_blade_temperature(self):
return self.api.System.SystemInfo.get_blade_temperature()
def get_chassis_slot_information(self):
return self.api.System.SystemInfo.get_chassis_slot_information()
def get_globally_unique_identifier(self):
return self.api.System.SystemInfo.get_globally_unique_identifier()
def get_group_id(self):
return self.api.System.SystemInfo.get_group_id()
def get_hardware_information(self):
return self.api.System.SystemInfo.get_hardware_information()
def get_marketing_name(self):
return self.api.System.SystemInfo.get_marketing_name()
def get_product_information(self):
return self.api.System.SystemInfo.get_product_information()
def get_pva_version(self):
return self.api.System.SystemInfo.get_pva_version()
def get_system_id(self):
return self.api.System.SystemInfo.get_system_id()
def get_system_information(self):
return self.api.System.SystemInfo.get_system_information()
def get_time(self):
return self.api.System.SystemInfo.get_time()
def get_time_zone(self):
return self.api.System.SystemInfo.get_time_zone()
def get_uptime(self):
return self.api.System.SystemInfo.get_uptime()
class ProvisionInfo(object):
"""Provision information class.
F5 BIG-IP provision information class.
Attributes:
api: iControl API instance.
"""
def __init__(self, api):
self.api = api
def get_list(self):
result = []
list = self.api.Management.Provision.get_list()
for item in list:
item = item.lower().replace('tmos_module_', '')
result.append(item)
return result
def get_provisioned_list(self):
result = []
list = self.api.Management.Provision.get_provisioned_list()
for item in list:
item = item.lower().replace('tmos_module_', '')
result.append(item)
return result
def generate_dict(api_obj, fields):
result_dict = {}
lists = []
supported_fields = []
if api_obj.get_list():
for field in fields:
try:
api_response = getattr(api_obj, "get_" + field)()
except (MethodNotFound, WebFault):
pass
else:
lists.append(api_response)
supported_fields.append(field)
for i, j in enumerate(api_obj.get_list()):
temp = {}
temp.update([(item[0], item[1][i]) for item in zip(supported_fields, lists)])
result_dict[j] = temp
return result_dict
def generate_simple_dict(api_obj, fields):
result_dict = {}
for field in fields:
try:
api_response = getattr(api_obj, "get_" + field)()
except (MethodNotFound, WebFault):
pass
else:
result_dict[field] = api_response
return result_dict
def generate_interface_dict(f5, regex):
interfaces = Interfaces(f5.get_api(), regex)
fields = ['active_media', 'actual_flow_control', 'bundle_state',
'description', 'dual_media_state', 'enabled_state', 'if_index',
'learning_mode', 'lldp_admin_status', 'lldp_tlvmap',
'mac_address', 'media', 'media_option', 'media_option_sfp',
'media_sfp', 'media_speed', 'media_status', 'mtu',
'phy_master_slave_mode', 'prefer_sfp_state', 'flow_control',
'sflow_poll_interval', 'sflow_poll_interval_global',
'sfp_media_state', 'stp_active_edge_port_state',
'stp_enabled_state', 'stp_link_type',
'stp_protocol_detection_reset_state']
return generate_dict(interfaces, fields)
def generate_self_ip_dict(f5, regex):
self_ips = SelfIPs(f5.get_api(), regex)
fields = ['address', 'allow_access_list', 'description',
'enforced_firewall_policy', 'floating_state', 'fw_rule',
'netmask', 'staged_firewall_policy', 'traffic_group',
'vlan', 'is_traffic_group_inherited']
return generate_dict(self_ips, fields)
def generate_trunk_dict(f5, regex):
trunks = Trunks(f5.get_api(), regex)
fields = ['active_lacp_state', 'configured_member_count', 'description',
'distribution_hash_option', 'interface', 'lacp_enabled_state',
'lacp_timeout_option', 'link_selection_policy', 'media_speed',
'media_status', 'operational_member_count', 'stp_enabled_state',
'stp_protocol_detection_reset_state']
return generate_dict(trunks, fields)
def generate_vlan_dict(f5, regex):
vlans = Vlans(f5.get_api(), regex)
fields = ['auto_lasthop', 'cmp_hash_algorithm', 'description',
'dynamic_forwarding', 'failsafe_action', 'failsafe_state',
'failsafe_timeout', 'if_index', 'learning_mode',
'mac_masquerade_address', 'member', 'mtu',
'sflow_poll_interval', 'sflow_poll_interval_global',
'sflow_sampling_rate', 'sflow_sampling_rate_global',
'source_check_state', 'true_mac_address', 'vlan_id']
return generate_dict(vlans, fields)
def generate_vs_dict(f5, regex):
virtual_servers = VirtualServers(f5.get_api(), regex)
fields = ['actual_hardware_acceleration', 'authentication_profile',
'auto_lasthop', 'bw_controller_policy', 'clone_pool',
'cmp_enable_mode', 'connection_limit', 'connection_mirror_state',
'default_pool_name', 'description', 'destination',
'enabled_state', 'enforced_firewall_policy',
'fallback_persistence_profile', 'fw_rule', 'gtm_score',
'last_hop_pool', 'nat64_state', 'object_status',
'persistence_profile', 'profile', 'protocol',
'rate_class', 'rate_limit', 'rate_limit_destination_mask',
'rate_limit_mode', 'rate_limit_source_mask', 'related_rule',
'rule', 'security_log_profile', 'snat_pool', 'snat_type',
'source_address', 'source_address_translation_lsn_pool',
'source_address_translation_snat_pool',
'source_address_translation_type', 'source_port_behavior',
'staged_firewall_policy', 'translate_address_state',
'translate_port_state', 'type', 'vlan', 'wildmask']
return generate_dict(virtual_servers, fields)
def generate_pool_dict(f5, regex):
pools = Pools(f5.get_api(), regex)
fields = ['action_on_service_down', 'active_member_count',
'aggregate_dynamic_ratio', 'allow_nat_state',
'allow_snat_state', 'client_ip_tos', 'client_link_qos',
'description', 'gateway_failsafe_device',
'ignore_persisted_weight_state', 'lb_method', 'member',
'minimum_active_member', 'minimum_up_member',
'minimum_up_member_action', 'minimum_up_member_enabled_state',
'monitor_association', 'monitor_instance', 'object_status',
'profile', 'queue_depth_limit',
'queue_on_connection_limit_state', 'queue_time_limit',
'reselect_tries', 'server_ip_tos', 'server_link_qos',
'simple_timeout', 'slow_ramp_time']
return generate_dict(pools, fields)
def generate_device_dict(f5, regex):
devices = Devices(f5.get_api(), regex)
fields = ['active_modules', 'base_mac_address', 'blade_addresses',
'build', 'chassis_id', 'chassis_type', 'comment',
'configsync_address', 'contact', 'description', 'edition',
'failover_state', 'hostname', 'inactive_modules', 'location',
'management_address', 'marketing_name', 'multicast_address',
'optional_modules', 'platform_id', 'primary_mirror_address',
'product', 'secondary_mirror_address', 'software_version',
'timelimited_modules', 'timezone', 'unicast_addresses']
return generate_dict(devices, fields)
def generate_device_group_dict(f5, regex):
device_groups = DeviceGroups(f5.get_api(), regex)
fields = ['all_preferred_active', 'autosync_enabled_state', 'description',
'device', 'full_load_on_sync_state',
'incremental_config_sync_size_maximum',
'network_failover_enabled_state', 'sync_status', 'type']
return generate_dict(device_groups, fields)
def generate_traffic_group_dict(f5, regex):
traffic_groups = TrafficGroups(f5.get_api(), regex)
fields = ['auto_failback_enabled_state', 'auto_failback_time',
'default_device', 'description', 'ha_load_factor',
'ha_order', 'is_floating', 'mac_masquerade_address',
'unit_id']
return generate_dict(traffic_groups, fields)
def generate_rule_dict(f5, regex):
rules = Rules(f5.get_api(), regex)
fields = ['definition', 'description', 'ignore_vertification',
'verification_status']
return generate_dict(rules, fields)
def generate_node_dict(f5, regex):
nodes = Nodes(f5.get_api(), regex)
fields = ['address', 'connection_limit', 'description', 'dynamic_ratio',
'monitor_instance', 'monitor_rule', 'monitor_status',
'object_status', 'rate_limit', 'ratio', 'session_status']
return generate_dict(nodes, fields)
def generate_virtual_address_dict(f5, regex):
virtual_addresses = VirtualAddresses(f5.get_api(), regex)
fields = ['address', 'arp_state', 'auto_delete_state', 'connection_limit',
'description', 'enabled_state', 'icmp_echo_state',
'is_floating_state', 'netmask', 'object_status',
'route_advertisement_state', 'traffic_group']
return generate_dict(virtual_addresses, fields)
def generate_address_class_dict(f5, regex):
address_classes = AddressClasses(f5.get_api(), regex)
fields = ['address_class', 'description']
return generate_dict(address_classes, fields)
def generate_certificate_dict(f5, regex):
certificates = Certificates(f5.get_api(), regex)
return dict(zip(certificates.get_list(), certificates.get_certificate_list()))
def generate_key_dict(f5, regex):
keys = Keys(f5.get_api(), regex)
return dict(zip(keys.get_list(), keys.get_key_list()))
def generate_client_ssl_profile_dict(f5, regex):
profiles = ProfileClientSSL(f5.get_api(), regex)
fields = ['alert_timeout', 'allow_nonssl_state', 'authenticate_depth',
'authenticate_once_state', 'ca_file', 'cache_size',
'cache_timeout', 'certificate_file', 'chain_file',
'cipher_list', 'client_certificate_ca_file', 'crl_file',
'default_profile', 'description',
'forward_proxy_ca_certificate_file', 'forward_proxy_ca_key_file',
'forward_proxy_ca_passphrase',
'forward_proxy_certificate_extension_include',
'forward_proxy_certificate_lifespan',
'forward_proxy_enabled_state',
'forward_proxy_lookup_by_ipaddr_port_state', 'handshake_timeout',
'key_file', 'modssl_emulation_state', 'passphrase',
'peer_certification_mode', 'profile_mode',
'renegotiation_maximum_record_delay', 'renegotiation_period',
'renegotiation_state', 'renegotiation_throughput',
'retain_certificate_state', 'secure_renegotiation_mode',
'server_name', 'session_ticket_state', 'sni_default_state',
'sni_require_state', 'ssl_option', 'strict_resume_state',
'unclean_shutdown_state', 'is_base_profile', 'is_system_profile']
return generate_dict(profiles, fields)
def generate_system_info_dict(f5):
system_info = SystemInfo(f5.get_api())
fields = ['base_mac_address',
'blade_temperature', 'chassis_slot_information',
'globally_unique_identifier', 'group_id',
'hardware_information',
'marketing_name',
'product_information', 'pva_version', 'system_id',
'system_information', 'time',
'time_zone', 'uptime']
return generate_simple_dict(system_info, fields)
def generate_software_list(f5):
software = Software(f5.get_api())
software_list = software.get_all_software_status()
return software_list
def generate_provision_dict(f5):
provisioned = ProvisionInfo(f5.get_api())
fields = ['list', 'provisioned_list']
return generate_simple_dict(provisioned, fields)
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
session=dict(type='bool', default=False),
include=dict(type='list', required=True),
filter=dict(type='str', required=False),
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec
)
if not bigsuds_found:
module.fail_json(msg="the python suds and bigsuds modules are required")
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
validate_certs = module.params['validate_certs']
session = module.params['session']
fact_filter = module.params['filter']
if validate_certs:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
if fact_filter:
regex = fnmatch.translate(fact_filter)
else:
regex = None
include = [x.lower() for x in module.params['include']]
valid_includes = ('address_class', 'certificate', 'client_ssl_profile',
'device', 'device_group', 'interface', 'key', 'node',
'pool', 'provision', 'rule', 'self_ip', 'software',
'system_info', 'traffic_group', 'trunk',
'virtual_address', 'virtual_server', 'vlan')
include_test = map(lambda x: x in valid_includes, include)
if not all(include_test):
module.fail_json(msg="value of include must be one or more of: %s, got: %s" % (",".join(valid_includes), ",".join(include)))
try:
facts = {}
if len(include) > 0:
f5 = F5(server, user, password, session, validate_certs, server_port)
saved_active_folder = f5.get_active_folder()
saved_recursive_query_state = f5.get_recursive_query_state()
if saved_active_folder != "/":
f5.set_active_folder("/")
if saved_recursive_query_state != "STATE_ENABLED":
f5.enable_recursive_query_state()
if 'interface' in include:
facts['interface'] = generate_interface_dict(f5, regex)
if 'self_ip' in include:
facts['self_ip'] = generate_self_ip_dict(f5, regex)
if 'trunk' in include:
facts['trunk'] = generate_trunk_dict(f5, regex)
if 'vlan' in include:
facts['vlan'] = generate_vlan_dict(f5, regex)
if 'virtual_server' in include:
facts['virtual_server'] = generate_vs_dict(f5, regex)
if 'pool' in include:
facts['pool'] = generate_pool_dict(f5, regex)
if 'provision' in include:
facts['provision'] = generate_provision_dict(f5)
if 'device' in include:
facts['device'] = generate_device_dict(f5, regex)
if 'device_group' in include:
facts['device_group'] = generate_device_group_dict(f5, regex)
if 'traffic_group' in include:
facts['traffic_group'] = generate_traffic_group_dict(f5, regex)
if 'rule' in include:
facts['rule'] = generate_rule_dict(f5, regex)
if 'node' in include:
facts['node'] = generate_node_dict(f5, regex)
if 'virtual_address' in include:
facts['virtual_address'] = generate_virtual_address_dict(f5, regex)
if 'address_class' in include:
facts['address_class'] = generate_address_class_dict(f5, regex)
if 'software' in include:
facts['software'] = generate_software_list(f5)
if 'certificate' in include:
facts['certificate'] = generate_certificate_dict(f5, regex)
if 'key' in include:
facts['key'] = generate_key_dict(f5, regex)
if 'client_ssl_profile' in include:
facts['client_ssl_profile'] = generate_client_ssl_profile_dict(f5, regex)
if 'system_info' in include:
facts['system_info'] = generate_system_info_dict(f5)
# restore saved state
if saved_active_folder and saved_active_folder != "/":
f5.set_active_folder(saved_active_folder)
if saved_recursive_query_state and \
saved_recursive_query_state != "STATE_ENABLED":
f5.set_recursive_query_state(saved_recursive_query_state)
result = {'ansible_facts': facts}
except Exception as e:
module.fail_json(msg="received exception: %s\ntraceback: %s" % (e, traceback.format_exc()))
module.exit_json(**result)
# include magic from lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
|
{
"content_hash": "ba56ee73f1fba555c1891cb9e12a3022",
"timestamp": "",
"source": "github",
"line_count": 1710,
"max_line_length": 166,
"avg_line_length": 35.79941520467836,
"alnum_prop": 0.6694055572798405,
"repo_name": "mcheo/ansible_f5",
"id": "76269dc933020baebc58e4fb9941ae9cd248222a",
"size": "61958",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "library/bigip_facts.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "986457"
},
{
"name": "Shell",
"bytes": "5016"
},
{
"name": "Tcl",
"bytes": "2161"
}
],
"symlink_target": ""
}
|
import logging
from pip._internal.cli.base_command import Command
from pip._internal.cli.status_codes import ERROR, SUCCESS
from pip._internal.operations.check import (
check_package_set,
create_package_set_from_installed,
)
from pip._internal.utils.misc import write_output
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
logger = logging.getLogger(__name__)
if MYPY_CHECK_RUNNING:
from typing import List, Any
from optparse import Values
class CheckCommand(Command):
"""Verify installed packages have compatible dependencies."""
usage = """
%prog [options]"""
def run(self, options, args):
# type: (Values, List[Any]) -> int
package_set, parsing_probs = create_package_set_from_installed()
missing, conflicting = check_package_set(package_set)
for project_name in missing:
version = package_set[project_name].version
for dependency in missing[project_name]:
write_output(
"%s %s requires %s, which is not installed.",
project_name, version, dependency[0],
)
for project_name in conflicting:
version = package_set[project_name].version
for dep_name, dep_version, req in conflicting[project_name]:
write_output(
"%s %s has requirement %s, but you have %s %s.",
project_name, version, req, dep_name, dep_version,
)
if missing or conflicting or parsing_probs:
return ERROR
else:
write_output("No broken requirements found.")
return SUCCESS
|
{
"content_hash": "cc860721b48df18cd14e6ce4c6a9a0d3",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 72,
"avg_line_length": 32.88235294117647,
"alnum_prop": 0.616577221228384,
"repo_name": "TeamSPoon/logicmoo_workspace",
"id": "b557ca64113c8948b2144f3a6acbed93e9bf8424",
"size": "1677",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "packs_web/butterfly/lib/python3.7/site-packages/pip/_internal/commands/check.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "342"
},
{
"name": "C",
"bytes": "1"
},
{
"name": "C++",
"bytes": "1"
},
{
"name": "CSS",
"bytes": "126627"
},
{
"name": "HTML",
"bytes": "839172"
},
{
"name": "Java",
"bytes": "11116"
},
{
"name": "JavaScript",
"bytes": "238700"
},
{
"name": "PHP",
"bytes": "42253"
},
{
"name": "Perl 6",
"bytes": "23"
},
{
"name": "Prolog",
"bytes": "440882"
},
{
"name": "PureBasic",
"bytes": "1334"
},
{
"name": "Rich Text Format",
"bytes": "3436542"
},
{
"name": "Roff",
"bytes": "42"
},
{
"name": "Shell",
"bytes": "61603"
},
{
"name": "TeX",
"bytes": "99504"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.apimanagement import ApiManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-apimanagement
# USAGE
python api_management_create_certificate_with_key_vault.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = ApiManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.certificate.create_or_update(
resource_group_name="rg1",
service_name="apimService1",
certificate_id="templateCertkv",
parameters={
"properties": {
"keyVault": {
"identityClientId": "ceaa6b06-c00f-43ef-99ac-f53d1fe876a0",
"secretIdentifier": "https://rpbvtkeyvaultintegration.vault-int.azure-int.net/secrets/msitestingCert",
}
}
},
)
print(response)
# x-ms-original-file: specification/apimanagement/resource-manager/Microsoft.ApiManagement/stable/2021-08-01/examples/ApiManagementCreateCertificateWithKeyVault.json
if __name__ == "__main__":
main()
|
{
"content_hash": "8e8397c31f5c32d79340a5734be18d55",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 165,
"avg_line_length": 35.523809523809526,
"alnum_prop": 0.685656836461126,
"repo_name": "Azure/azure-sdk-for-python",
"id": "1c781480b49e6d2b0b0511be30132076c151add5",
"size": "1960",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/apimanagement/azure-mgmt-apimanagement/generated_samples/api_management_create_certificate_with_key_vault.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import ConfigParser
import pickle
import os
import glob
import sys
import hashlib
def get_param(param, default=""):
v = raw_input(param+" ["+default+"]: ")
if v == "":
return default
return v
if not (os.path.exists("FunBot.py") and os.path.exists("games")):
print "Please run me from the directory with the FunBot.py file and games directory."
exit()
print "Welcome to the FunBot configurator. I will guide you through steps to generate a FunBot.ini configuration file and user database."
print "Prompts will be displayed like this: parameter [defaultvalue]:.\nIf you do not enter anything and press enter, the default value will be used."
print "Scanning games directory..."
if not os.path.exists("games/__init__.py"):
print "The games directory doesn't appear to have an __init__.py file."
exit()
found_games = []
for file in glob.iglob("games/[a-zA-Z0-9]*.py"):
try:
__import__("games."+file[6:-3])
except:
continue
m = sys.modules["games."+file[6:-3]]
try:
gamename = m.__gamename__
except:
continue
try:
help = m.show_help(None)
except:
help = None
print "Found", gamename, "["+file[6:]+"]"
if help != None:
print "Help:", help
print
found_games.append((file[6:], gamename))
cfg = ConfigParser.ConfigParser()
cfg.add_section("config")
print "OK, let's start!"
print
userfilename = get_param("User database file name", "users.pickle")
cfg.set("config", "userfile", userfilename)
if get_param("Specify a log file? (Y/N)", "Y").upper() == "Y":
cfg.set("config", "logfile", get_param("Log file name", "FunBot.log"))
global_prefix = get_param("Global prefix", "!")
cfg.set("config", "prefix", global_prefix)
print
print "Which games would you like to add to your FunBot?"
add_games = []
for game in found_games:
if get_param("Add "+game[1]+" ["+game[0]+"]? (Y/N)", "Y").upper() == "Y":
add_games.append(game)
cfg.set("config", "games", ",".join([game[0][:-3] for game in add_games]))
print
print "Now, we need to add networks. If you don't specify any network name or server, I will stop asking you for networks."
net_names = []
while True:
net_name = get_param("Network section name")
if net_name == "":
break
net_server = get_param("Network server")
if net_server == "":
break
port = get_param("Port", "6667")
password = get_param("Connection password", "")
nick = get_param("Nick", "FunBot")
network_prefix = get_param("Network prefix", global_prefix)
if network_prefix == global_prefix:
network_prefix = ""
msgwait = get_param("Message delay", "50")
print
print "Let's add channels to your network. Again, if you don't specify any channel name or channel, I will stop asking you for channels."
channels = []
while True:
channel_name = get_param("Channel section name")
if channel_name == "":
break
channel = get_param("Channel")
if channel == "":
break
key = get_param("Channel keyword")
chan_prefix = get_param("Channel prefix", global_prefix if network_prefix == "" else network_prefix)
if chan_prefix == global_prefix if network_prefix == "" else network_prefix:
chan_prefix = ""
channels.append((channel_name, channel, key, chan_prefix))
print
print "We're done with", net_name, "now!"
print
for channel in channels:
cfg.add_section(channel[0])
cfg.set(channel[0], "channel", channel[1])
if channel[2] != "":
cfg.set(channel[0], "key", channel[2])
if channel[3] != "":
cfg.set(channel[0], "prefix", channel[3])
cfg.add_section(net_name)
cfg.set(net_name, "server", net_server)
if port != "6667":
cfg.set(net_name, "port", port)
if password != "":
cfg.set(net_name, "pass", password)
cfg.set(net_name, "nick", nick)
if network_prefix != "":
cfg.set(net_name, "prefix", network_prefix)
cfg.set(net_name, "channels", ",".join([channel[0] for channel in channels]))
if msgwait != "50":
cfg.set(net_name, "msgwait", msgwait)
net_names.append(net_name)
cfg.set("config", "networks", ",".join(net_names))
print
print "Now, I need an admin username and password."
while True:
username = get_param("Admin username")
if username != "":
break
print "No, really, I need a username."
while True:
password = get_param("Admin password")
if password != "":
break
print "Seriously, I need a password for the admin account."
cfg.set("config", "admins", username)
print
print "Saving configuration file..."
while True:
try:
f = open("FunBot.ini", "w")
except:
print "FunBot.ini could not be opened."
v = get_param("Try again? (Y/N)", "Y").upper()
if v == "Y":
continue
print "OK then. Here's what the configuration file should look like:"
cfg.write(stdout)
break
cfg.write(f)
f.close()
break
print "Creating the user database..."
d = [{"FunBot":[None,{}], username:[hashlib.sha1(password).hexdigest(),{}]}, {"FunBot":"FunBot"}]
pickle.dump(d, open(userfilename, "w"))
print
print "We're all done! You can now run FunBot. NOTE: Once FunBot starts up, you will need to add your hostmask to the admin account."
|
{
"content_hash": "8c424f5424046c174ee5b66d6fed9438",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 150,
"avg_line_length": 30.3109756097561,
"alnum_prop": 0.6708911687789177,
"repo_name": "tpwrules/FunBot",
"id": "1ab8b01c5687df7eab728ffaeab2d2d2d2307acf",
"size": "4971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FunBot/configurator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79367"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Calendar'
db.create_table(u'calendar_calendar', (
(u'auditmodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['unobase.AuditModel'], unique=True)),
(u'tagmodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['unobase.TagModel'], unique=True, primary_key=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('date_taken', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('view_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('crop_from', self.gf('django.db.models.fields.CharField')(default='center', max_length=10, blank=True)),
('effect', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='calendar_related', null=True, to=orm['photologue.PhotoEffect'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255)),
('content', self.gf('ckeditor.fields.RichTextField')(null=True, blank=True)),
))
db.send_create_signal(u'calendar', ['Calendar'])
# Adding M2M table for field sites on 'Calendar'
m2m_table_name = db.shorten_name(u'calendar_calendar_sites')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('calendar', models.ForeignKey(orm[u'calendar.calendar'], null=False)),
('site', models.ForeignKey(orm[u'sites.site'], null=False))
))
db.create_unique(m2m_table_name, ['calendar_id', 'site_id'])
# Adding model 'Venue'
db.create_table(u'calendar_venue', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('address', self.gf('django.db.models.fields.CharField')(max_length=512)),
))
db.send_create_signal(u'calendar', ['Venue'])
# Adding model 'Event'
db.create_table(u'calendar_event', (
(u'auditmodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['unobase.AuditModel'], unique=True)),
(u'tagmodel_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['unobase.TagModel'], unique=True, primary_key=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('date_taken', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('view_count', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('crop_from', self.gf('django.db.models.fields.CharField')(default='center', max_length=10, blank=True)),
('effect', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='event_related', null=True, to=orm['photologue.PhotoEffect'])),
('title', self.gf('django.db.models.fields.CharField')(max_length=200)),
('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(max_length=255)),
('content', self.gf('ckeditor.fields.RichTextField')(null=True, blank=True)),
('venue', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['calendar.Venue'], null=True, blank=True)),
('start', self.gf('django.db.models.fields.DateTimeField')(db_index=True)),
('end', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('repeat', self.gf('django.db.models.fields.CharField')(default='does_not_repeat', max_length=64)),
('repeat_until', self.gf('django.db.models.fields.DateField')(null=True, blank=True)),
('external_link', self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True)),
))
db.send_create_signal(u'calendar', ['Event'])
# Adding M2M table for field sites on 'Event'
m2m_table_name = db.shorten_name(u'calendar_event_sites')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('event', models.ForeignKey(orm[u'calendar.event'], null=False)),
('site', models.ForeignKey(orm[u'sites.site'], null=False))
))
db.create_unique(m2m_table_name, ['event_id', 'site_id'])
def backwards(self, orm):
# Deleting model 'Calendar'
db.delete_table(u'calendar_calendar')
# Removing M2M table for field sites on 'Calendar'
db.delete_table(db.shorten_name(u'calendar_calendar_sites'))
# Deleting model 'Venue'
db.delete_table(u'calendar_venue')
# Deleting model 'Event'
db.delete_table(u'calendar_event')
# Removing M2M table for field sites on 'Event'
db.delete_table(db.shorten_name(u'calendar_event_sites'))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'calendar.calendar': {
'Meta': {'object_name': 'Calendar'},
u'auditmodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['unobase.AuditModel']", 'unique': 'True'}),
'content': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'calendar_related'", 'null': 'True', 'to': u"orm['photologue.PhotoEffect']"}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
u'tagmodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['unobase.TagModel']", 'unique': 'True', 'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'calendar.event': {
'Meta': {'ordering': "('start',)", 'object_name': 'Event'},
u'auditmodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['unobase.AuditModel']", 'unique': 'True'}),
'content': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_related'", 'null': 'True', 'to': u"orm['photologue.PhotoEffect']"}),
'end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'external_link': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'repeat': ('django.db.models.fields.CharField', [], {'default': "'does_not_repeat'", 'max_length': '64'}),
'repeat_until': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'start': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
u'tagmodel_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['unobase.TagModel']", 'unique': 'True', 'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'venue': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['calendar.Venue']", 'null': 'True', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
u'calendar.venue': {
'Meta': {'object_name': 'Venue'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'downloads.download': {
'Meta': {'object_name': 'Download'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'downloads.downloadversion': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'DownloadVersion'},
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'download': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'to': u"orm['downloads.Download']"}),
'eula_required': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'download_versions'", 'null': 'True', 'to': u"orm['eula.EULAVersion']"}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'eula.eula': {
'Meta': {'object_name': 'EULA'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'})
},
u'eula.eulaversion': {
'Meta': {'ordering': "['-date_created']", 'object_name': 'EULAVersion'},
'content': ('ckeditor.fields.RichTextField', [], {}),
'date_created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'eula': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'instances'", 'to': u"orm['eula.EULA']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.6'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'tagging.tag': {
'Meta': {'ordering': "['-publish_date_time']", 'object_name': 'Tag'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'publish_date_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'retract_date_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'state': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'unobase.auditmodel': {
'Meta': {'object_name': 'AuditModel'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'created_objects'", 'null': 'True', 'to': u"orm['user.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leaf_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modified_objects'", 'null': 'True', 'to': u"orm['user.User']"})
},
u'unobase.tagmodel': {
'Meta': {'object_name': 'TagModel'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'leaf_content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tag_models'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['tagging.Tag']"})
},
u'user.user': {
'Meta': {'object_name': 'User'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'd_and_b': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'user_related'", 'null': 'True', 'to': u"orm['photologue.PhotoEffect']"}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'eulas_accepted': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_accepted'", 'to': u"orm['eula.EULAVersion']", 'through': u"orm['user.UserEULA']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'files_downloaded': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_downloaded'", 'to': u"orm['downloads.DownloadVersion']", 'through': u"orm['user.UserDownload']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_admin': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_console_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'legal_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'login_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'login_token_init_vector': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'maturation_score': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'newsletter_recipient': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'phone_number': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'role': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'}),
'salesforce_account_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'salesforce_contact_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'salesforce_lead_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'salesforce_subscription_level': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'state_province': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'street_address': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'tax_id_vat_gst_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'web_address': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'zip_postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'})
},
u'user.userdownload': {
'Meta': {'object_name': 'UserDownload'},
'download_version': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['downloads.DownloadVersion']"}),
'downloaded_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['user.User']"})
},
u'user.usereula': {
'Meta': {'object_name': 'UserEULA'},
'eula': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['eula.EULAVersion']"}),
'eula_content': ('ckeditor.fields.RichTextField', [], {}),
'file_signed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['downloads.DownloadVersion']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'signed_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['user.User']"})
}
}
complete_apps = ['calendar']
|
{
"content_hash": "f21b78eee059123a2c2326607b612171",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 268,
"avg_line_length": 82.30847457627118,
"alnum_prop": 0.5679749598451465,
"repo_name": "unomena/unobase",
"id": "57808a02d5116fae0310001a13b9fc5d57670a3a",
"size": "24305",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unobase/calendar/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "307418"
}
],
"symlink_target": ""
}
|
from account import Account
from accountteams import AccountTeams
from accountuser import AccountUser
from contact import Contact
from contactlist import ContactList
from emailmessage import EmailMessage
from survey import Survey
from surveycampaign import SurveyCampaign
from surveyoption import SurveyOption
from surveypage import SurveyPage
from surveyquestion import SurveyQuestion
from surveyreport import SurveyReport
from surveyresponse import SurveyResponse
from surveystatistic import SurveyStatistic
__all__ = [
'Account',
'AccountTeams',
'AccountUser',
'Contact',
'ContactList',
'EmailMessage',
'Survey',
'SurveyCampaign',
'SurveyOption',
'SurveyPage',
'SurveyQuestion',
'SurveyReport',
'SurveyResponse',
'SurveyStatistic',
]
|
{
"content_hash": "f6a1d00f6f99c15eb206a40a0e49161f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 43,
"avg_line_length": 24.84375,
"alnum_prop": 0.7723270440251573,
"repo_name": "infoscout/SurveyGizmo",
"id": "d30bed6090f0eeed6ad7c5f3845a02049d50e893",
"size": "796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "surveygizmo/api/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "33508"
}
],
"symlink_target": ""
}
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
import codecs
import os
import re
import sys
here = os.path.abspath(os.path.dirname(__file__))
# Get the long description from the relevant file
with codecs.open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
def read(*parts):
# intentionally *not* adding an encoding option to open, See:
# https://github.com/pypa/virtualenv/issues/201#issuecomment-3145690
return codecs.open(os.path.join(here, *parts), 'r').read()
def find_version(*file_paths):
version_file = read(*file_paths)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]",
version_file, re.M)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='django-tastypie-simple-api-doc',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=find_version('tastypie_api_doc','__init__.py'),
description='A Django app to generate a simple and automatic api documentation with Tastypie',
long_description=long_description,
# The project's main homepage.
url='https://github.com/matheuscas/django-tastypie-simple-api-doc',
# Author details
author='Matheus Cardoso',
author_email='matheus.mcas@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: System :: Systems Administration :: Authentication/Directory',
'Environment :: Web Environment',
'Framework :: Django',
'Framework :: Django :: 1.8',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
# What does your project relate to?
keywords='django tastypie api documentation automatic',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(exclude=['contrib', 'docs', 'tests*']),
# packages=['tastypie_api_doc'],
package_data={'tastypie_api_doc': ['templates/index.html',
'static/tastypie_api_doc/bower_components/jquery/dist/*.js',
'static/tastypie_api_doc/bower_components/semantic/dist/*.js',
'static/tastypie_api_doc/bower_components/semantic/dist/*.css',
'static/tastypie_api_doc/bower_components/semantic/dist/components/*.js',
'static/tastypie_api_doc/bower_components/semantic/dist/components/*.css',
'static/tastypie_api_doc/bower_components/semantic/dist/themes/default/assets/fonts/*.ttf',
'static/tastypie_api_doc/bower_components/semantic/dist/themes/default/assets/fonts/*.woff',
'static/tastypie_api_doc/bower_components/semantic/dist/themes/default/assets/fonts/*.svg',
'static/tastypie_api_doc/bower_components/semantic/dist/themes/default/assets/fonts/*.eot'
'static/tastypie_api_doc/bower_components/semantic/dist/themes/default/assets/fonts/*.otf'
'static/tastypie_api_doc/bower_components/semantic/dist/themes/default/assets/fonts/*.woff2',
'static/tastypie_api_doc/*.css']},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['django-tastypie>=0.12.1','django>=1.8.6',
'django-markup', 'textile','smartypants','docutils','markdown','python-creole'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
zip_safe=False
)
|
{
"content_hash": "4d331509b43ab5e8b17bc531e8e275fd",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 125,
"avg_line_length": 42.744360902255636,
"alnum_prop": 0.6309586631486368,
"repo_name": "matheuscas/django-tastypie-simple-api-doc",
"id": "5153e082414e483b811bfcd33dabc5b876c4b782",
"size": "5685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1291"
},
{
"name": "HTML",
"bytes": "9554"
},
{
"name": "Python",
"bytes": "8186"
}
],
"symlink_target": ""
}
|
from azure.identity import DefaultAzureCredential
from azure.mgmt.synapse import SynapseManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-synapse
# USAGE
python integration_runtime_nodes_update.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = SynapseManagementClient(
credential=DefaultAzureCredential(),
subscription_id="12345678-1234-1234-1234-12345678abc",
)
response = client.integration_runtime_nodes.update(
resource_group_name="exampleResourceGroup",
workspace_name="exampleWorkspace",
integration_runtime_name="exampleIntegrationRuntime",
node_name="Node_1",
update_integration_runtime_node_request={"concurrentJobsLimit": 2},
)
print(response)
# x-ms-original-file: specification/synapse/resource-manager/Microsoft.Synapse/preview/2021-06-01-preview/examples/IntegrationRuntimeNodes_Update.json
if __name__ == "__main__":
main()
|
{
"content_hash": "289fd078a5bf11005b10647711dc74a1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 150,
"avg_line_length": 36.5,
"alnum_prop": 0.7374429223744292,
"repo_name": "Azure/azure-sdk-for-python",
"id": "1f2fccf7edb7b62ea207c6b27c6825ec477b3919",
"size": "1782",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/synapse/azure-mgmt-synapse/generated_samples/integration_runtime_nodes_update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import bee
from bee.segments import *
import libcontext
from libcontext.socketclasses import *
from libcontext.pluginclasses import *
from .matrix import matrix
import Spyder
matrix0 = matrix(Spyder.AxisSystem(), "AxisSystem")
class spawn_actor_or_entity(bee.worker):
actorclassname = antenna("pull", "id")
b_actorclassname = buffer("pull", "id")
connect(actorclassname, b_actorclassname)
v_actorname = variable("id")
v_matrix = variable(("object", "matrix"))
@modifier
def do_spawn(self):
try:
self.actorspawnfunc(self.b_actorclassname, self.v_actorname)
except KeyError:
self.entspawnfunc(self.b_actorclassname, self.v_actorname)
axis = self.v_matrix.get_copy("AxisSystem")
ent = self.get_entity(self.v_actorname)
ent.set_axissystem(axis)
ent.commit()
spawn_matrix = antenna("push", ("id", ("object", "matrix")))
uw = unweaver(("id", ("object", "matrix")), v_actorname, v_matrix)
connect(spawn_matrix, uw)
trigger(v_actorname, b_actorclassname, "input")
trigger(v_matrix, do_spawn, "input")
spawn = antenna("push", "id")
b_spawn = buffer("push", "id")
@modifier
def set_identity_matrix(self):
self.v_matrix = matrix0
connect(spawn, b_spawn)
connect(b_spawn, v_actorname)
trigger(b_spawn, set_identity_matrix, "input")
trigger(b_spawn, b_spawn, "input")
trigger(b_spawn, do_spawn, "input")
def set_actorspawnfunc(self, spawnfunc):
self.actorspawnfunc = spawnfunc
def set_entspawnfunc(self, spawnfunc):
self.entspawnfunc = spawnfunc
def set_get_entity(self, get_entity):
self.get_entity = get_entity
def place(self):
libcontext.socket(("get_entity", "AxisSystem"), socket_single_required(self.set_get_entity))
libcontext.socket(("spawn", "actor"), socket_single_required(self.set_actorspawnfunc))
libcontext.socket(("spawn", "entity"), socket_single_required(self.set_entspawnfunc))
|
{
"content_hash": "1d13e83ee313f44d2f465ff2ceadb3f4",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 100,
"avg_line_length": 31.71875,
"alnum_prop": 0.6586206896551724,
"repo_name": "agoose77/hivesystem",
"id": "e56c6f4417afcb6af1932ce2290f281ea953172f",
"size": "2030",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dragonfly/scene/spawn_actor_or_entity.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "2491478"
},
{
"name": "Shell",
"bytes": "1164"
}
],
"symlink_target": ""
}
|
"""
This file contains some utility functions for using ImageNet dataset and L-OBS
Author: Chen Shangyu (schen025@e.ntu.edu.sg)
"""
import os
import sys
import time
import math
import torch
import torch.nn as nn
import torch.nn.init as init
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
import torch.optim as optim
import collections
from datetime import datetime
import numpy as np
import tensorflow as tf
import torch.nn.functional as F
from torch.autograd import Variable
import torch
import pickle
use_cuda = torch.cuda.is_available()
def get_error(theta_B, hessian, theta_0):
"""
Calculate \delta \theta^T H \delta \theta
:param theta_B:
:param hessian:
:param theta_0:
:param alpha:
:param sigma:
:return:
"""
delta = theta_B - theta_0
error = np.trace(np.dot(np.dot(delta.T, hessian), delta))
return error
def unfold_kernel(kernel):
"""
In pytorch format, kernel is stored as [out_channel, in_channel, height, width]
Unfold kernel into a 2-dimension weights: [height * width * in_channel, out_channel]
:param kernel: numpy ndarray
:return:
"""
k_shape = kernel.shape
weight = np.zeros([k_shape[1] * k_shape[2] * k_shape[3], k_shape[0]])
for i in range(k_shape[0]):
weight[:, i] = np.reshape(kernel[i, :, :, :], [-1])
return weight
def fold_weights(weights, kernel_shape):
"""
In pytorch format, kernel is stored as [out_channel, in_channel, width, height]
Fold weights into a 4-dimensional tensor as [out_channel, in_channel, width, height]
:param weights:
:param kernel_shape:
:return:
"""
kernel = np.zeros(shape=kernel_shape)
for i in range(kernel_shape[0]):
kernel[i,:,:,:] = weights[:, i].reshape([kernel_shape[1], kernel_shape[2], kernel_shape[3]])
return kernel
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def validate(model, val_loader, val_record, train_record, n_batch_used = 100, use_cuda = True):
monitor_freq = int(n_batch_used / 5)
# batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
criterion = nn.CrossEntropyLoss()
# switch to evaluate mode
model.eval()
# end = time.time()
for i, (input, target) in enumerate(val_loader):
if use_cuda:
target = target.cuda()
input = input.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
if i % monitor_freq == 0:
print('Test: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), loss=losses,
top1=top1, top5=top5))
if i == n_batch_used:
break
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
if val_record != None:
val_record.write('Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}\n'
.format(top1=top1, top5=top5))
if train_record != None:
train_record.write('Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}\n'
.format(top1=top1, top5=top5))
model.train()
return top1.avg, top5.avg
def adjust_mean_var(net, train_loader, train_file, n_batch_used = 500, use_cuda = True):
monitor_freq = int(n_batch_used / 5)
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
criterion = nn.CrossEntropyLoss()
net.train()
# end = time.time()
for i, (input, target) in enumerate(train_loader):
if use_cuda:
target = target.cuda()
input = input.cuda()
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = net(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
if (i) % monitor_freq == 0:
print('Train: [{0}/{1}]\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, n_batch_used, loss=losses,
top1=top1, top5=top5))
if train_file != None:
train_file.write('[%d/%d] Loss: %f, Prec@1: %f, Prec@5: %f\n' %\
(i, n_batch_used, losses.avg, top1.avg, top5.avg))
if (i) == n_batch_used:
break
def create_prune_graph(input_dimension, output_dimension):
pruned_weight_holder = tf.placeholder(tf.float32, shape=None)
hessian_inv_diag_holder = tf.placeholder(tf.float32, shape=None)
hessian_inv_holder = tf.placeholder(tf.float32, shape=[input_dimension, input_dimension])
prune_row_idx_holder = tf.placeholder(tf.float32, shape=None)
mask_holder = tf.placeholder(tf.float32, shape=[input_dimension, output_dimension])
wb_holder = tf.placeholder(tf.float32, shape=[input_dimension, output_dimension])
selection_q = tf.one_hot(indices = prune_row_idx_holder, depth = input_dimension)
get_sparse_wb_op = -pruned_weight_holder / (hessian_inv_diag_holder + 10e-6) \
* tf.matmul(a = hessian_inv_holder, b = selection_q) + wb_holder
return pruned_weight_holder, hessian_inv_diag_holder, hessian_inv_holder, prune_row_idx_holder,\
mask_holder, wb_holder, get_sparse_wb_op
def create_sparse_mul_graph(input_dimension, output_dimension):
"""
This function perform element-wise multiplication between weights matrix and mask matrix
by tensorflow backend to speed up
args:
input_dimension: first dimension of weights (mask) matrix
output_dimension: second dimension of weights (mask) matrix
Output:
mask_holder: tf holder for mask matrix
wb_holder: tf holder for weight matrix
get_sparse_wb_op: tf op for generating sparse wb
"""
mask_holder = tf.placeholder(tf.float32, shape=[input_dimension, output_dimension])
wb_holder = tf.placeholder(tf.float32, shape=[input_dimension, output_dimension])
get_sparse_wb_op = tf.multiply(wb_holder, mask_holder)
return mask_holder, wb_holder, get_sparse_wb_op
def generate_layer_list(param):
pass
|
{
"content_hash": "0d2f32a7a4502e6a9ff8dacd0d4deb7e",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 97,
"avg_line_length": 27.791338582677167,
"alnum_prop": 0.6821079473013175,
"repo_name": "csyhhu/L-OBS",
"id": "cf6fabaa6eabb0a4a608dbdbd8384fec7b09f6c1",
"size": "7059",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyTorch/ImageNet/utils.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "220221"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import warnings
import sys
import traceback
import inspect
import pickle
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import check_skip_travis
from sklearn.base import (clone, ClusterMixin)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if "n_iter" in params:
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_regressors_classifiers_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_transformer(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
if name == "KernelPCA":
transformer.remove_zero_eig = False
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
def check_transformer_sparse_data(name, Transformer):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
if name in ['Scaler', 'StandardScaler']:
transformer = Transformer(with_mean=False)
else:
transformer = Transformer()
set_fast_parameters(transformer)
# fit
try:
transformer.fit(X, y)
except TypeError as e:
if not 'sparse' in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
if issubclass(Estimator, ClusterMixin):
estimator.fit(X_train)
else:
estimator.fit(X_train, y)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
if issubclass(Estimator, ClusterMixin):
# All estimators except clustering algorithm
# support fitting with (optional) y
estimator.fit(X_train_finite)
else:
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if not 'inf' in repr(e) and not 'NaN' in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_transformer_pickle(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
n_samples, n_features = X.shape
X = StandardScaler().fit_transform(X)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
if not hasattr(transformer, 'transform'):
return
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
random_state = np.random.RandomState(seed=12345)
y_ = np.vstack([y, 2 * y + random_state.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit(X, y_).transform(X)
pickled_transformer = pickle.dumps(transformer)
unpickled_transformer = pickle.loads(pickled_transformer)
pickled_X_pred = unpickled_transformer.transform(X)
assert_array_almost_equal(pickled_X_pred, X_pred)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if not 'class' in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.85)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict:
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict:
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_classifiers_input_shapes(name, Classifier):
iris = load_iris()
X, y = iris.data, iris.target
X, y = shuffle(X, y, random_state=1)
X = StandardScaler().fit_transform(X)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
set_random_state(classifier)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
classifier.fit(X, y[:, np.newaxis])
assert_equal(len(w), 1)
assert_array_equal(y_pred, classifier.predict(X))
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_classifiers_pickle(name, Classifier):
X, y = make_blobs(random_state=0)
X, y = shuffle(X, y, random_state=7)
X -= X.min()
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
y_pred = classifier.predict(X)
pickled_classifier = pickle.dumps(classifier)
unpickled_classifier = pickle.loads(pickled_classifier)
pickled_y_pred = unpickled_classifier.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
regressor.predict(X)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
assert_greater(regressor.score(X, y_), 0.5)
def check_regressors_pickle(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
if name == 'OrthogonalMatchingPursuitCV':
# FIXME: This test is unstable on Travis, see issue #3190.
check_skip_travis()
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
regressor.fit(X, y_)
y_pred = regressor.predict(X)
# store old predictions
pickled_regressor = pickle.dumps(regressor)
unpickled_regressor = pickle.loads(pickled_regressor)
pickled_y_pred = unpickled_regressor.predict(X)
assert_array_almost_equal(pickled_y_pred, y_pred)
def check_class_weight_classifiers(name, Classifier):
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.9)
def check_class_weight_auto_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='auto')
classifier.fit(X_train, y_train)
y_pred_auto = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_auto),
f1_score(y_test, y_pred))
def check_class_weight_auto_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='auto')
coef_auto = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
mean_weight = (1. / 3 + 1. / 2) / 2
class_weight = {
1: 1. / 3 / mean_weight,
-1: 1. / 2 / mean_weight,
}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_auto, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
if name == 'MiniBatchDictLearning' or name == 'MiniBatchSparsePCA':
# FIXME
# for MiniBatchDictLearning and MiniBatchSparsePCA
estimator.batch_size = 1
set_fast_parameters(estimator)
set_random_state(estimator)
params = estimator.get_params()
estimator.fit(X, y)
new_params = estimator.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_cluster_overwrite_params(name, Clustering):
X, y = make_blobs(random_state=0, n_samples=9)
with warnings.catch_warnings(record=True):
# catch deprecation warnings
clustering = Clustering()
set_fast_parameters(clustering)
params = clustering.get_params()
clustering.fit(X)
new_params = clustering.get_params()
for k, v in params.items():
assert_false(np.any(new_params[k] != v),
"Estimator %s changes its parameter %s"
" from %s to %s during fit."
% (name, k, v, new_params[k]))
def check_sparsify_multiclass_classifier(name, Classifier):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Classifier()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_sparsify_binary_classifier(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(isinstance(estimator.set_params(), Estimator))
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator, multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
|
{
"content_hash": "069ea26e23543ef8f4333063de4b39b3",
"timestamp": "",
"source": "github",
"line_count": 980,
"max_line_length": 81,
"avg_line_length": 35.813265306122446,
"alnum_prop": 0.6018748041143117,
"repo_name": "Garrett-R/scikit-learn",
"id": "acdf451648830f780d8325f2fb64c1ff45b96a74",
"size": "35097",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sklearn/utils/estimator_checks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "18219273"
},
{
"name": "C++",
"bytes": "1808975"
},
{
"name": "CSS",
"bytes": "0"
},
{
"name": "JavaScript",
"bytes": "22298"
},
{
"name": "PowerShell",
"bytes": "13427"
},
{
"name": "Python",
"bytes": "5488186"
},
{
"name": "Shell",
"bytes": "8730"
}
],
"symlink_target": ""
}
|
from model.group import Group
from timeit import timeit
def test_group_list(app, db):
print(timeit(lambda: app.group.get_group_list(), number=1))
def clean(group):
return Group(id=group.id, name=group.name.strip())
print(timeit(lambda: map(clean, db.get_group_list()), number=1000))
assert False # sorted(ui_list, key=Group.id_or_max) == sorted(db_list, key=Group.id_or_max)
|
{
"content_hash": "33046f6881ccab70f9e70175ec6b38a9",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 95,
"avg_line_length": 36.54545454545455,
"alnum_prop": 0.6890547263681592,
"repo_name": "sabinaczopik/python_training",
"id": "ce6424b15ff1c2e1432af5342ddd985ef93dce61",
"size": "402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_db_match_ui.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1243"
},
{
"name": "C",
"bytes": "423609"
},
{
"name": "C++",
"bytes": "137854"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "31715"
},
{
"name": "Tcl",
"bytes": "1295069"
}
],
"symlink_target": ""
}
|
"""MIT License
Copyright (c) 2017 Uka Osim
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE."""
import sys
import re
def count_words(lines):
# use a python dictionary -> new_list to store a
new_list = []
# make sure that a list type is used at all times
if not isinstance(lines, list):
lines = lines.split()
for line in lines:
new_list += [word for word in regex.sub("", line).lower().split()]
# used a python set comprehension to generate an ordered list of tuples
# containing (<word>, <count>)
# the list is sorted by the count and also alphabetically(first letter of the word)
sorted_list = sorted({(word, new_list.count(word)) for word in new_list},
key=lambda tup: (-tup[1], tup[0]),
reverse=True
)
# returning a reversed list which orders the list starting with the highest word count
return sorted_list[::-1]
# remove special characters from string except for white spaces
regex = re.compile("[^a-zA-Z\w\s:]")
if __name__ == '__main__':
stream = sys.stdin
lines = stream.readlines()
count_words(lines)
for word, count in count_words(lines):
print(word, count)
|
{
"content_hash": "04f76e9306a7f0afcd464ca3f2aa0283",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 90,
"avg_line_length": 39.464285714285715,
"alnum_prop": 0.7036199095022625,
"repo_name": "osimuka/samplecode",
"id": "69092b371b7638c72c9bcc1296208d7c756beda1",
"size": "2210",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "word_count.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26872"
}
],
"symlink_target": ""
}
|
"""Test error messages for 'getaddressinfo' and 'validateaddress' RPC commands."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
BECH32_VALID = 'bcrt1qtmp74ayg7p24uslctssvjm06q5phz4yrxucgnv'
BECH32_INVALID_BECH32 = 'bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqdmchcc'
BECH32_INVALID_BECH32M = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7k35mrzd'
BECH32_INVALID_VERSION = 'bcrt130xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqynjegk'
BECH32_INVALID_SIZE = 'bcrt1s0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7v8n0nx0muaewav25430mtr'
BECH32_INVALID_V0_SIZE = 'bcrt1qw508d6qejxtdg4y5r3zarvary0c5xw7kqqq5k3my'
BECH32_INVALID_PREFIX = 'bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7k7grplx'
BASE58_VALID = 'mipcBbFg9gMiCh81Kj8tqqdgoZub1ZJRfn'
BASE58_INVALID_PREFIX = '17VZNX1SN5NtKa8UQFxwQbFeFc3iqRYhem'
INVALID_ADDRESS = 'asfah14i8fajz0123f'
class InvalidAddressErrorMessageTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def test_validateaddress(self):
node = self.nodes[0]
# Bech32
info = node.validateaddress(BECH32_INVALID_SIZE)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid Bech32 address data size')
info = node.validateaddress(BECH32_INVALID_PREFIX)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid prefix for Bech32 address')
info = node.validateaddress(BECH32_INVALID_BECH32)
assert not info['isvalid']
assert_equal(info['error'], 'Version 1+ witness address must use Bech32m checksum')
info = node.validateaddress(BECH32_INVALID_BECH32M)
assert not info['isvalid']
assert_equal(info['error'], 'Version 0 witness address must use Bech32 checksum')
info = node.validateaddress(BECH32_INVALID_V0_SIZE)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid Bech32 v0 address data size')
info = node.validateaddress(BECH32_VALID)
assert info['isvalid']
assert 'error' not in info
info = node.validateaddress(BECH32_INVALID_VERSION)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid Bech32 address witness version')
# Base58
info = node.validateaddress(BASE58_INVALID_PREFIX)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid prefix for Base58-encoded address')
info = node.validateaddress(BASE58_VALID)
assert info['isvalid']
assert 'error' not in info
# Invalid address format
info = node.validateaddress(INVALID_ADDRESS)
assert not info['isvalid']
assert_equal(info['error'], 'Invalid address format')
def test_getaddressinfo(self):
node = self.nodes[0]
assert_raises_rpc_error(-5, "Invalid Bech32 address data size", node.getaddressinfo, BECH32_INVALID_SIZE)
assert_raises_rpc_error(-5, "Invalid prefix for Bech32 address", node.getaddressinfo, BECH32_INVALID_PREFIX)
assert_raises_rpc_error(-5, "Invalid prefix for Base58-encoded address", node.getaddressinfo, BASE58_INVALID_PREFIX)
assert_raises_rpc_error(-5, "Invalid address format", node.getaddressinfo, INVALID_ADDRESS)
def run_test(self):
self.test_validateaddress()
if self.is_wallet_compiled():
self.init_wallet(node=0)
self.test_getaddressinfo()
if __name__ == '__main__':
InvalidAddressErrorMessageTest().main()
|
{
"content_hash": "3c4d1cd0f49c582b8df14a6f073e8a4e",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 124,
"avg_line_length": 38.90425531914894,
"alnum_prop": 0.7128794093519278,
"repo_name": "prusnak/bitcoin",
"id": "8f9b2c3312d654109b6084102ee8a3f48a85f867",
"size": "3871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/rpc_invalid_address_message.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28178"
},
{
"name": "C",
"bytes": "1230499"
},
{
"name": "C++",
"bytes": "9236078"
},
{
"name": "CMake",
"bytes": "29132"
},
{
"name": "Cap'n Proto",
"bytes": "1256"
},
{
"name": "Dockerfile",
"bytes": "1721"
},
{
"name": "HTML",
"bytes": "21833"
},
{
"name": "Java",
"bytes": "695"
},
{
"name": "M4",
"bytes": "257675"
},
{
"name": "Makefile",
"bytes": "131646"
},
{
"name": "Objective-C++",
"bytes": "5497"
},
{
"name": "Python",
"bytes": "2609560"
},
{
"name": "QMake",
"bytes": "438"
},
{
"name": "Sage",
"bytes": "56850"
},
{
"name": "Scheme",
"bytes": "25953"
},
{
"name": "Shell",
"bytes": "210746"
}
],
"symlink_target": ""
}
|
import os
import sublime
from sublime_plugin import WindowCommand, TextCommand
from ..git_command import GitCommand
COMMIT_HELP_TEXT = """
## To make a commit, type your commit message and press {key}-ENTER. To cancel
## the commit, close the window. To sign off the commit press {key}-S.
## You may also reference or close a GitHub issue with this commit. To do so,
## type `#` followed by the `tab` key. You will be shown a list of issues
## related to the current repo. You may also type `owner/repo#` plus the `tab`
## key to reference an issue in a different GitHub repo.
""".format(key="CTRL" if os.name == "nt" else "SUPER")
COMMIT_SIGN_TEXT = """
Signed-off-by: {name} <{email}>
"""
COMMIT_TITLE = "COMMIT"
class GsCommitCommand(WindowCommand, GitCommand):
"""
Display a transient window to capture the user's desired commit message.
If the user is amending the previous commit, pre-populate the commit
message area with the previous commit message.
"""
def run(self, **kwargs):
sublime.set_timeout_async(lambda: self.run_async(**kwargs), 0)
def run_async(self, repo_path=None, include_unstaged=False, amend=False):
repo_path = repo_path or self.repo_path
view = self.window.new_file()
view.settings().set("git_savvy.get_long_text_view", True)
view.settings().set("git_savvy.commit_view.include_unstaged", include_unstaged)
view.settings().set("git_savvy.commit_view.amend", amend)
view.settings().set("git_savvy.repo_path", repo_path)
view.set_syntax_file("Packages/GitSavvy/syntax/make_commit.tmLanguage")
view.set_name(COMMIT_TITLE)
view.set_scratch(True)
view.run_command("gs_commit_initialize_view")
class GsCommitInitializeViewCommand(TextCommand, GitCommand):
"""
Fill the view with the commit view help message, and optionally
the previous commit message if amending.
"""
def run(self, edit):
merge_msg_path = os.path.join(self.repo_path, ".git", "MERGE_MSG")
if self.view.settings().get("git_savvy.commit_view.amend"):
last_commit_message = self.git("log", "-1", "--pretty=%B")
initial_text = last_commit_message + COMMIT_HELP_TEXT
elif os.path.exists(merge_msg_path):
with open(merge_msg_path, "r") as f:
initial_text = f.read() + COMMIT_HELP_TEXT
else:
initial_text = COMMIT_HELP_TEXT
if sublime.load_settings("GitSavvy.sublime-settings").get("show_commit_diff"):
stdout = self.git("diff", "--cached")
initial_text = initial_text + stdout
self.view.run_command("gs_replace_view_text", {
"text": initial_text,
"nuke_cursors": True
})
class GsCommitViewDoCommitCommand(TextCommand, GitCommand):
"""
Take the text of the current view (minus the help message text) and
make a commit using the text for the commit message.
"""
def run(self, edit):
sublime.set_timeout_async(self.run_async, 0)
def run_async(self):
view_text = self.view.substr(sublime.Region(0, self.view.size()))
commit_message = view_text.split(COMMIT_HELP_TEXT)[0]
if self.view.settings().get("git_savvy.commit_view.include_unstaged"):
self.add_all_tracked_files()
if self.view.settings().get("git_savvy.commit_view.amend"):
self.git("commit", "-q", "--amend", "-F", "-", stdin=commit_message)
else:
self.git("commit", "-q", "-F", "-", stdin=commit_message)
self.view.window().focus_view(self.view)
self.view.window().run_command("close_file")
class GsCommitViewSignCommand(TextCommand, GitCommand):
"""
Sign off the commit with full name and email
"""
def run(self, edit):
view_text = self.view.substr(sublime.Region(0, self.view.size()))
view_text_list = view_text.split(COMMIT_HELP_TEXT)
config_name = self.git("config", "user.name").strip()
config_email = self.git("config", "user.email").strip()
sign_text = COMMIT_SIGN_TEXT.format(name=config_name, email=config_email)
view_text_list[0] += sign_text
self.view.run_command("gs_replace_view_text", {
"text": COMMIT_HELP_TEXT.join(view_text_list),
"nuke_cursors": True
})
|
{
"content_hash": "9284e492af9e971b82af36c6279f0a70",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 87,
"avg_line_length": 34.817460317460316,
"alnum_prop": 0.6366537497150673,
"repo_name": "ypersyntelykos/GitSavvy",
"id": "b425b0b555b2df4e958693dc90b11477a5c9c77b",
"size": "4387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/commands/commit.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "22782"
},
{
"name": "Python",
"bytes": "145608"
}
],
"symlink_target": ""
}
|
from typing import Optional, List, Tuple, Sequence, Union, cast, TypeVar
from typing import Iterator, overload
import numpy
import itertools
from .. import registry
from ..types import Xp, Shape, DTypes, DTypesInt, DTypesFloat, List2d, ArrayXd
from ..types import Array3d, Floats1d, Floats2d, Floats3d, Floats4d
from ..types import FloatsXd, Ints1d, Ints2d, Ints3d, Ints4d, IntsXd, _Floats
from ..types import DeviceTypes, Generator, Padded, Batchable, SizedGenerator
from ..util import get_array_module, is_xp_array, to_numpy
ArrayT = TypeVar("ArrayT", bound=ArrayXd)
FloatsT = TypeVar("FloatsT", bound=_Floats)
FloatsType = TypeVar("FloatsType", bound=FloatsXd)
class Ops:
name: str = "base"
xp: Xp = numpy
def __init__(
self, device_type: DeviceTypes = "cpu", device_id: int = -1, **kwargs
) -> None:
self.device_type = device_type
self.device_id = device_id
def to_numpy(self, data): # pragma: no cover
if isinstance(data, numpy.ndarray):
return data
else:
raise ValueError("Cannot convert non-numpy from base Ops class")
def minibatch(
self,
size: Union[int, Generator],
sequence: Batchable,
*,
shuffle: bool = False,
buffer: int = 1,
) -> SizedGenerator:
"""Iterate slices from a sequence, optionally shuffled. Slices
may be either views or copies of the underlying data.
The `size` argument may be either an integer, or a sequence of integers.
If a sequence, a new size is drawn before every output.
If shuffle is True, shuffled batches are produced by first generating
an index array, shuffling it, and then using it to slice into the
sequence.
An internal queue of `buffer` items is accumulated before being each
output. Buffering is useful for some devices, to allow the
network to run asynchronously without blocking on every batch.
"""
if not hasattr(sequence, "__len__"):
err = f"Can't minibatch data. Expected sequence, got {type(sequence)}"
raise ValueError(err)
sizes = self._get_batch_sizes(
len(sequence), itertools.repeat(size) if isinstance(size, int) else size
)
indices = numpy.arange(len(sequence))
# This is a bit convoluted, but it's a time where convenience makes
# trickery worthwhile: instead of being an actual generator, we
# return our SizedGenerator object, which provides a __len__.
def _iter_items():
if shuffle:
numpy.random.shuffle(indices)
queue = []
i = 0
for size in sizes:
size = int(size)
queue.append(self._get_batch(sequence, indices[i : i + size]))
if len(queue) >= buffer:
yield from queue
queue = []
i += size
yield from queue
return SizedGenerator(_iter_items, len(sizes))
def multibatch(
self,
size: Union[int, Generator],
sequence: Batchable,
*others: Batchable,
shuffle: bool = False,
buffer: int = 1,
) -> SizedGenerator:
"""Minibatch one or more sequences of data, and yield
lists with one batch per sequence. See ops.minibatch.
"""
# You'd think we could just do this by calling into minibatch and zip...
# But the shuffling makes it really hard.
sequences = (sequence,) + tuple(others)
if not all(hasattr(seq, "__len__") for seq in sequences):
values = ", ".join([f"{type(seq)}" for seq in sequences])
err = f"Can't multibatch data. Expected sequences, got {values}"
raise ValueError(err)
sizes = self._get_batch_sizes(
len(sequence), itertools.repeat(size) if isinstance(size, int) else size
)
indices = numpy.arange(len(sequence))
def _iter_items():
if shuffle:
numpy.random.shuffle(indices)
queue = []
i = 0
for size in sizes:
size = int(size)
idx_batch = indices[i : i + size]
queue.append([])
for sequence in sequences:
queue[-1].append(self._get_batch(sequence, idx_batch))
if len(queue) >= buffer:
yield from queue
queue = []
i += size
yield from queue
return SizedGenerator(_iter_items, len(sizes))
def _get_batch(self, sequence, indices):
if isinstance(sequence, list):
subseq = [sequence[i] for i in indices]
elif isinstance(sequence, tuple):
subseq = tuple(sequence[i] for i in indices) # type: ignore
else:
subseq = sequence[indices] # type: ignore
if is_xp_array(subseq):
subseq = self.as_contig(
cast(ArrayXd, self.xp.asarray(subseq))
) # type: ignore
return subseq
def _get_batch_sizes(self, length: int, sizes: Iterator[int]):
output = []
i = 0
while i < length:
output.append(next(sizes))
i += output[-1]
return output
def seq2col(self, seq: Floats2d, nW: int) -> Floats2d:
"""Given an (M, N) sequence of vectors, return an (M, N*(nW*2+1))
sequence. The new sequence is constructed by concatenating nW preceding
and succeeding vectors onto each column in the sequence, to extract a
window of features.
"""
# This is a test implementation that only supports nW=1
assert nW == 1
B = seq.shape[0]
I = seq.shape[1]
cols = self.alloc3f(B, (nW * 2 + 1), I)
# Copy left contexts. The last words aren't the left-context for anything.
cols[nW:, :nW] = self.reshape3f(seq[:-nW], -1, nW, I)
cols[:, nW] = seq
cols[:-nW, nW + 1 :] = self.reshape3f(seq[nW:], -1, nW, I)
return self.reshape2f(cols, B, I * (2 * nW + 1))
def backprop_seq2col(self, dY: Floats2d, nW: int) -> Floats2d:
"""The reverse/backward operation of the `seq2col` function: calculate
the gradient of the original `(M, N)` sequence, as a function of the
gradient of the output `(M, N*(nW*2+1))` sequence.
"""
# This is a test implementation that only supports nW=1
assert nW == 1
nF = nW * 2 + 1
B = dY.shape[0]
I = dY.shape[1] // nF
# Having trouble getting the kernel to work...
dX = self.alloc2f(B, I)
dY3d = self.reshape3f(dY, B, nF, I)
dX[:-nW] += self.reshape2f(dY3d[nW:, :nW], -1, I)
dX += dY3d[:, nW]
dX[nW:] += self.reshape2f(dY3d[:-nW, nW + 1 :], -1, I)
return dX
def gemm(
self,
x: Floats2d,
y: Floats2d,
out: Optional[Floats2d] = None,
trans1: bool = False,
trans2: bool = False,
) -> Floats2d:
"""Perform General Matrix Multiplication (GeMM) and optionally store
the result in the specified output variable.
"""
if trans1:
x = x.T
if trans2:
y = y.T
if out is None:
return self.xp.dot(x, y)
else:
self.xp.dot(x, y, out=out)
return out
def tile(self, X: Floats2d, reps: int) -> Floats2d:
return self.xp.tile(X, reps)
def affine(self, X: Floats2d, W: Floats2d, b: Floats1d) -> Floats2d:
"""Apply a weights layer and a bias to some inputs, i.e.
Y = X @ W.T + b
"""
Y = self.gemm(X, W, trans2=True)
Y += b
return Y
def flatten(
self,
X: Sequence[ArrayT],
dtype: Optional[DTypes] = None,
pad: int = 0,
ndim_if_empty: int = 2,
) -> ArrayT:
"""Flatten a list of arrays into one large array."""
if X is None or len(X) == 0:
return self.alloc((0,) * ndim_if_empty, dtype=dtype or "f")
xp = get_array_module(X[0])
shape_if_empty = X[0].shape
X = [x for x in X if x.size != 0]
if len(X) == 0:
return self.alloc(shape_if_empty, dtype=dtype or "f")
if int(pad) >= 1:
padded = []
for x in X:
padded.append(xp.zeros((pad,) + x.shape[1:], dtype=x.dtype))
padded.append(x)
padded.append(xp.zeros((pad,) + x.shape[1:], dtype=x.dtype))
X = padded
result = xp.concatenate(X)
if dtype is not None:
result = xp.asarray(result, dtype=dtype)
return result
def unflatten(self, X: Floats2d, lengths: Ints1d, pad: int = 0) -> List[Floats2d]:
"""The reverse/backward operation of the `flatten` function: unflatten
a large array into a list of arrays according to the given lengths.
"""
unflat = []
pad = int(pad)
for length in lengths:
length = int(length)
if pad >= 1 and length != 0:
X = X[pad:]
unflat.append(X[:length])
X = X[length:]
if pad >= 1:
X = X[pad:]
assert len(X) == 0
assert len(unflat) == len(lengths)
return unflat
@overload
def pad(self, seqs: List[Ints2d], round_to=1) -> Ints3d:
...
@overload # noqa: F811
def pad(self, seqs: List[Floats2d], round_to=1) -> Floats3d:
...
def pad( # noqa: F811
self, seqs: Union[List[Ints2d], List[Floats2d]], round_to=1
) -> Array3d:
"""Perform padding on a list of arrays so that they each have the same
length, by taking the maximum dimension across each axis. This only
works on non-empty sequences with the same `ndim` and `dtype`.
"""
# TODO: This should be generalized to handle different ranks
if not seqs:
raise ValueError("Cannot pad empty sequence")
if len(set(seq.ndim for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences with different ndims")
if len(set(seq.dtype for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences with different dtypes")
if len(set(seq.shape[1:] for seq in seqs)) != 1:
raise ValueError("Cannot pad sequences that differ on other dimensions")
# Find the maximum dimension along each axis. That's what we'll pad to.
length = max(len(seq) for seq in seqs)
# Round the length to nearest bucket -- helps on GPU, to make similar
# array sizes.
length = (length + (round_to - 1)) // round_to * round_to
final_shape = (len(seqs), length) + seqs[0].shape[1:]
output: Array3d = self.alloc(final_shape, dtype=seqs[0].dtype)
for i, arr in enumerate(seqs):
# It's difficult to convince this that the dtypes will match.
output[i, : arr.shape[0]] = arr # type: ignore
return output
def unpad(self, padded: Array3d, lengths: List[int]) -> List2d:
"""The reverse/backward operation of the `pad` function: transform an
array back into a list of arrays, each with their original length.
"""
output = []
for i, length in enumerate(lengths):
output.append(padded[i, :length])
return cast(List2d, output)
def list2padded(self, seqs: List[Floats2d]) -> Padded:
"""Pack a sequence of 2d arrays into a Padded datatype."""
if not seqs:
return Padded(
self.alloc3f(0, 0, 0), self.alloc1i(0), self.alloc1i(0), self.alloc1i(0)
)
elif len(seqs) == 1:
data = self.reshape3f(seqs[0], seqs[0].shape[0], 1, seqs[0].shape[1])
size_at_t = self.asarray1i([1] * data.shape[0])
lengths = self.asarray1i([data.shape[0]])
indices = self.asarray1i([0])
return Padded(data, size_at_t, lengths, indices)
lengths_indices = [(len(seq), i) for i, seq in enumerate(seqs)]
lengths_indices.sort(reverse=True)
indices_ = [i for length, i in lengths_indices]
lengths_ = [length for length, i in lengths_indices]
nS = max([seq.shape[0] for seq in seqs])
nB = len(seqs)
nO = seqs[0].shape[1]
# Reorder the sequences, by length. This looks the same in either
# direction: you're swapping elements between their original and sorted
# position.
seqs = [seqs[i] for i in indices_]
arr: Floats3d = self.pad(seqs)
assert arr.shape == (nB, nS, nO), (nB, nS, nO)
arr = self.as_contig(arr.transpose((1, 0, 2)))
assert arr.shape == (nS, nB, nO)
# Build a lookup table so we can find how big the batch is at point t.
batch_size_at_t_ = [0 for _ in range(nS)]
current_size = len(lengths_)
for t in range(nS):
while current_size and t >= lengths_[current_size - 1]:
current_size -= 1
batch_size_at_t_[t] = current_size
assert sum(lengths_) == sum(batch_size_at_t_)
return Padded(
cast(Floats3d, arr),
self.asarray1i(batch_size_at_t_),
self.asarray1i(lengths_),
self.asarray1i(indices_),
)
def padded2list(self, padded: Padded) -> List2d:
"""Unpack a Padded datatype to a list of 2-dimensional arrays."""
data = padded.data
indices = to_numpy(padded.indices)
lengths = to_numpy(padded.lengths)
unpadded: List[Optional[Floats2d]] = [None] * len(lengths)
# Transpose from (length, batch, data) to (batch, length, data)
data = self.as_contig(data.transpose((1, 0, 2)))
for i in range(data.shape[0]):
unpadded[indices[i]] = data[i, : int(lengths[i])]
return cast(List2d, unpadded)
def get_dropout_mask(self, shape: Shape, drop: Optional[float]) -> FloatsXd:
"""Create a random mask for applying dropout, with a certain percent of
the mask (defined by `drop`) will contain zeros. The neurons at those
positions will be deactivated during training, resulting in a more
robust network and less overfitting.
"""
if drop is None or drop <= 0:
return self.xp.ones(shape, dtype="f")
elif drop >= 1.0:
return self.alloc(shape)
coinflips = self.xp.random.uniform(0.0, 1.0, shape)
mask = (coinflips >= drop) / (1.0 - drop)
return cast(FloatsXd, self.asarray(mask, dtype="float32"))
def alloc1f(self, d0: int, *, dtype: Optional[DTypesFloat] = "float32") -> Floats1d:
return self.alloc((d0,), dtype=dtype)
def alloc2f(
self, d0: int, d1: int, *, dtype: Optional[DTypesFloat] = "float32"
) -> Floats2d:
return self.alloc((d0, d1), dtype=dtype)
def alloc3f(
self, d0: int, d1: int, d2: int, *, dtype: Optional[DTypesFloat] = "float32"
) -> Floats3d:
return self.alloc((d0, d1, d2), dtype=dtype)
def alloc4f(
self,
d0: int,
d1: int,
d2: int,
d3: int,
*,
dtype: Optional[DTypesFloat] = "float32",
) -> Floats4d:
return self.alloc((d0, d1, d2, d3), dtype=dtype)
def alloc_f(
self, shape: Shape, *, dtype: Optional[DTypesFloat] = "float32"
) -> FloatsXd:
return self.alloc(shape, dtype=dtype)
def alloc1i(self, d0: int, *, dtype: Optional[DTypesInt] = "int32") -> Ints1d:
return self.alloc((d0,), dtype=dtype)
def alloc2i(
self, d0: int, d1: int, *, dtype: Optional[DTypesInt] = "int32"
) -> Ints2d:
return self.alloc((d0, d1), dtype=dtype)
def alloc3i(
self, d0: int, d1: int, d2: int, *, dtype: Optional[DTypesInt] = "int32"
) -> Ints3d:
return self.alloc((d0, d1, d2), dtype=dtype)
def alloc4i(
self,
d0: int,
d1: int,
d2: int,
d3: int,
*,
dtype: Optional[DTypesInt] = "int32",
) -> Ints4d:
return self.alloc((d0, d1, d2, d3), dtype=dtype)
def alloc_i(self, shape: Shape, *, dtype: Optional[DTypesInt] = "int32") -> IntsXd:
return self.alloc(shape, dtype=dtype)
def alloc(self, shape: Shape, *, dtype: Optional[DTypes] = "float32") -> ArrayT:
"""Allocate an array of a certain shape."""
if isinstance(shape, int):
shape = (shape,)
return self.xp.zeros(shape, dtype=dtype)
def reshape1f(self, array: FloatsXd, d0: int) -> Floats1d:
return cast(Floats1d, self.reshape(array, (d0,)))
def reshape2f(self, array: FloatsXd, d0: int, d1: int) -> Floats2d:
return cast(Floats2d, self.reshape(array, (d0, d1)))
def reshape3f(self, array: FloatsXd, d0: int, d1: int, d2: int) -> Floats3d:
return cast(Floats3d, self.reshape(array, (d0, d1, d2)))
def reshape4f(
self, array: FloatsXd, d0: int, d1: int, d2: int, d3: int
) -> Floats4d:
return cast(Floats4d, self.reshape(array, (d0, d1, d2, d3)))
def reshape_f(self, array: FloatsXd, shape: Shape) -> FloatsXd:
return self.reshape(array, shape)
def reshape1i(self, array: IntsXd, d0: int) -> Ints1d:
return cast(Ints1d, self.reshape(array, (d0,)))
def reshape2i(self, array: IntsXd, d0: int, d1: int) -> Ints2d:
return cast(Ints2d, self.reshape(array, (d0, d1)))
def reshape3i(self, array: IntsXd, d0: int, d1: int, d2: int) -> Ints3d:
return cast(Ints3d, self.reshape(array, (d0, d1, d2)))
def reshape4i(self, array: IntsXd, d0: int, d1: int, d2: int, d3: int) -> Ints4d:
return cast(Ints4d, self.reshape(array, (d0, d1, d2, d3)))
def reshape_i(self, array: IntsXd, shape: Shape) -> IntsXd:
return self.reshape(array, shape)
def reshape(self, array: ArrayT, shape: Shape) -> ArrayT:
"""Reshape an array."""
if isinstance(shape, int):
shape = (shape,)
return cast(ArrayT, array.reshape(shape))
def asarray4f(
self,
data: Union[Floats4d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats4d:
return cast(Floats4d, self.asarray(data, dtype=dtype))
def asarray3f(
self,
data: Union[Floats3d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats3d:
return cast(Floats3d, self.asarray(data, dtype=dtype))
def asarray2f(
self,
data: Union[Floats2d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats2d:
return cast(Floats2d, self.asarray(data, dtype=dtype))
def asarray1f(
self,
data: Union[Floats1d, Sequence[int]],
*,
dtype: Optional[DTypes] = "float32",
) -> Floats1d:
return cast(Floats1d, self.asarray(data, dtype=dtype))
def asarray_f(
self,
data: Union[FloatsXd, Sequence[float]],
*,
dtype: Optional[DTypes] = "float32",
) -> FloatsXd:
return cast(FloatsXd, self.asarray(data, dtype=dtype))
def asarray1i(
self, data: Union[Ints1d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints1d:
return cast(Ints1d, self.asarray(data, dtype=dtype))
def asarray2i(
self, data: Union[Ints2d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints2d:
return cast(Ints2d, self.asarray(data, dtype=dtype))
def asarray3i(
self, data: Union[Ints3d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints3d:
return cast(Ints3d, self.asarray(data, dtype=dtype))
def asarray4i(
self, data: Union[Ints4d, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> Ints4d:
return cast(Ints4d, self.asarray(data, dtype=dtype))
def asarray_i(
self, data: Union[IntsXd, Sequence[int]], *, dtype: Optional[DTypes] = "int32"
) -> IntsXd:
return cast(IntsXd, self.asarray(data, dtype=dtype))
def asarray(
self,
data: Union[ArrayXd, Sequence[ArrayXd], Sequence[float], Sequence[int]],
*,
dtype: Optional[DTypes] = None,
) -> ArrayXd:
"""Ensure a given array is of the correct type."""
if isinstance(data, self.xp.ndarray):
if dtype is None:
return data
elif data.dtype == dtype:
return data
else:
return self.xp.asarray(data, dtype=dtype)
elif hasattr(data, "numpy"):
# Handles PyTorch Tensor
return data.numpy() # type: ignore
elif dtype is not None:
return self.xp.array(data, dtype=dtype)
else:
return self.xp.array(data)
def as_contig(self, data: ArrayT, dtype: Optional[DTypes] = None) -> ArrayT:
"""Allow the backend to make a contiguous copy of an array.
Implementations of `Ops` do not have to make a copy or make it
contiguous if that would not improve efficiency for the execution engine.
"""
if data.flags["C_CONTIGUOUS"] and dtype in (None, data.dtype):
return data
kwargs = {"dtype": dtype} if dtype is not None else {}
return self.xp.ascontiguousarray(data, **kwargs)
def sigmoid(self, X: FloatsType, *, inplace: bool = False) -> FloatsType:
if inplace:
self.xp.exp(-X, out=X)
X += 1.0 # type: ignore
X **= -1.0 # type: ignore
return cast(FloatsType, X)
else:
return cast(FloatsType, 1.0 / (1.0 + self.xp.exp(-X)))
def dsigmoid(self, Y: FloatsType, *, inplace: bool = False) -> FloatsType:
if inplace:
Y *= 1 - Y
return Y
else:
return Y * (1.0 - Y)
def dtanh(self, Y: FloatsT, *, inplace: bool = False) -> FloatsT:
if inplace:
Y **= 2
Y *= -1.0
Y += 1.0
return Y
else:
return 1 - Y ** 2
def softmax(self, x: FloatsT, *, inplace: bool = False, axis: int = -1) -> FloatsT:
maxes = self.xp.max(x, axis=axis, keepdims=True)
shifted = x - maxes
new_x = self.xp.exp(shifted)
new_x /= new_x.sum(axis=axis, keepdims=True)
return new_x
def softmax_sequences(
self, Xs: Floats2d, lengths: Ints1d, *, inplace: bool = False, axis: int = -1
) -> Floats2d:
if Xs.ndim >= 3:
err = f"Softmax currently only supports 2d. Got: {Xs.ndim}"
raise NotImplementedError(err)
# This loses almost no fidelity, and helps the numerical stability.
Xs = self.xp.clip(Xs, -20.0, 20.0)
new_x = self.xp.exp(Xs)
summed = self.backprop_reduce_sum(self.reduce_sum(new_x, lengths), lengths)
new_x /= summed
return new_x
def backprop_softmax(self, Y: FloatsT, dY: FloatsT, *, axis: int = -1) -> FloatsT:
dX = Y * dY
dX -= Y * dX.sum(axis=axis, keepdims=True)
return dX
def backprop_softmax_sequences(
self, dY: Floats2d, Y: Floats2d, lengths: Ints1d
) -> Floats2d:
dX = Y * dY
sum_dX = self.backprop_reduce_sum(self.reduce_sum(dX, lengths), lengths)
dX -= Y * sum_dX
return dX
def lstm_forward_training(
self,
params: Floats1d,
H0: Floats3d,
C0: Floats3d,
X: Floats2d,
size_at_t: Ints1d,
) -> Tuple[Floats2d, Tuple]:
assert H0.shape == C0.shape
assert H0.shape[1] == C0.shape[1]
Y, fwd_state = lstm_forward_training(params, H0, C0, X, size_at_t)
return Y, fwd_state
def lstm_forward_inference(
self,
params: Floats1d,
H0: Floats3d,
C0: Floats3d,
X: Floats2d,
size_at_t: Ints1d,
) -> Floats2d:
Y, _ = lstm_forward_training(params, H0, C0, X, size_at_t)
return Y
def backprop_lstm(
self, dY: Floats2d, lengths: Ints1d, params: Floats1d, fwd_state: Tuple
) -> Tuple[Floats2d, Floats1d]:
dX, d_params = backprop_lstm(dY, lengths, params, fwd_state)
return dX, d_params
def maxout(self, X: Floats3d) -> Tuple[Floats2d, Ints2d]:
which = X.argmax(axis=-1, keepdims=False)
return X.max(axis=-1), which
def backprop_maxout(self, dY: Floats2d, which: Ints2d, P: int) -> Floats3d:
dX = self.alloc3f(dY.shape[0], dY.shape[1], P)
for b in range(dY.shape[0]):
for o in range(dY.shape[1]):
dX[b, o, which[b, o]] = dY[b, o]
return dX
def relu(self, X: Floats2d, inplace: bool = False) -> Floats2d:
if not inplace:
return X * (X > 0)
else:
X *= X > 0
return X
def backprop_relu(
self, dY: Floats2d, Y: Floats2d, inplace: bool = False
) -> Floats2d:
if not inplace:
return dY * (Y > 0)
dY *= Y > 0
return dY
def mish(self, X: Floats2d, threshold: float = 20.0) -> Floats2d:
Y = self.alloc2f(*X.shape, dtype=X.dtype)
tmp = X * self.xp.tanh(self.xp.log(1.0 + self.xp.exp(X)))
for i in range(X.shape[0]):
for j in range(X.shape[1]):
if X[i, j] >= threshold:
Y[i, j] = X[i, j]
else:
Y[i, j] = tmp[i, j]
return Y
def backprop_mish(
self,
dY: Floats2d,
X: Floats2d,
threshold: float = 20.0,
out: Optional[Floats2d] = None,
) -> Floats2d:
xp = get_array_module(X)
indices = X < threshold
Xsub = X[indices]
dYsub = dY[indices]
omega = 4.0 * (Xsub + 1.0)
omega += 4.0 * xp.exp(2.0 * Xsub)
omega += xp.exp(Xsub) * ((4.0 * Xsub) + 6.0)
delta = 2.0 * xp.exp(Xsub)
delta += xp.exp(2.0 * Xsub)
delta += 2.0
dXsub = dYsub * ((xp.exp(Xsub) * omega) / (delta ** 2))
if out is None:
out = xp.zeros(dY.shape, dtype="f")
# Gradient when above threshold will ignore softplus.
out[:] = dY + dY * self.dtanh(X)
out[indices] = dXsub
return out
def update_averages(
self, ema: FloatsT, weights: FloatsT, t: int, max_decay: float = 0.9999
) -> None:
# Internals for optimizer
decay = (1.0 + t) / (10.0 + t)
if decay > max_decay:
decay = max_decay
ema -= (1 - decay) * (ema - weights)
def adam(
self,
weights: Floats1d,
gradient: Floats1d,
mom1: Floats1d,
mom2: Floats1d,
beta1: float,
beta2: float,
eps: float,
learn_rate: float,
mod_rate: float = 1.0,
) -> Tuple[Floats1d, Floats1d, Floats1d, Floats1d]:
# Internals for optimizer
mom1 *= beta1
mom2 *= beta2
mom1 += gradient * (1.0 - beta1)
mom2 += gradient * gradient * (1.0 - beta2)
# Here we assume learn rate is calculated by the caller.
# cdef weight_t a_t = learn_rate * sqrt(1-beta2**hp.t) / (1-beta1**hp.t);
weights -= learn_rate * (mom1 / (mod_rate * self.xp.sqrt(mom2) + eps))
return weights, gradient, mom1, mom2
def clip_gradient(self, gradient: FloatsT, threshold: float) -> FloatsT:
# Internals for optimizer
xp = get_array_module(gradient)
grad_norm = xp.linalg.norm(gradient)
if grad_norm >= threshold:
gradient *= threshold / grad_norm
return gradient
def logloss(self, y_true: FloatsT, y_pred: FloatsT) -> float:
# Currently not used
log_yp = self.xp.log(y_pred + 1e-8)
loss = (y_true * log_yp) + (1 - y_true) * self.xp.log((1 - y_pred) + 1e-8)
return -loss
def reduce_sum(self, X: Floats2d, lengths: Ints1d) -> Floats2d:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
Y[i] = X[start : start + length].sum(axis=0)
start += length
return Y
def reduce_mean(self, X: Floats2d, lengths: Ints1d) -> Floats2d:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
if length:
Y[i] = X[start : start + length].mean(axis=0)
start += length
return Y
def reduce_max(self, X: Floats2d, lengths: Ints1d) -> Tuple[Floats2d, Ints2d]:
Y = self.alloc2f(lengths.shape[0], X.shape[1])
which = self.alloc2i(lengths.shape[0], X.shape[1])
start = 0
for i, length in enumerate(lengths):
if length:
which[i] = X[start : start + length].argmax(axis=0)
Y[i] = X[start : start + length].max(axis=0)
start += length
return Y, which
def backprop_reduce_sum(self, d_sums: Floats2d, lengths: Ints1d) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_sums.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length] = d_sums[i]
start += length
return dX
def backprop_reduce_mean(self, d_means: Floats2d, lengths: Ints1d) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_means.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length] = d_means[i] / length
start += length
return dX
def backprop_reduce_max(
self, d_maxes: Floats2d, which: Ints2d, lengths: Ints1d
) -> Floats2d:
dX = self.alloc2f(lengths.sum(), d_maxes.shape[1])
start = 0
for i, length in enumerate(lengths):
dX[start : start + length, which[i]] = d_maxes[i]
start += length
return dX
def hash(self, ids: Ints1d, seed: int) -> Ints2d:
"""Hash a sequence of 64-bit keys into a table with 4 32-bit keys, using
murmurhash3.
"""
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray2i(
numpy_ops.hash(numpy_ops.asarray(ids, dtype="uint64"), seed)
)
def ngrams(self, n: int, keys: Ints1d) -> Ints1d:
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray1i(
numpy_ops.ngrams(n, numpy_ops.asarray(keys, dtype="uint64"))
)
def position_encode(
self, N: int, D: int, period: int = 10000, out: Optional[Floats2d] = None
) -> Floats2d:
# Currently internals only
from .numpy_ops import NumpyOps
numpy_ops = NumpyOps()
return self.asarray2f(numpy_ops.position_encode(N, D, period, out))
def scatter_add(
self, table: FloatsXd, indices: IntsXd, values: FloatsXd
) -> FloatsXd:
return self.xp.add.at(table, indices, values)
def insert_into(self, shape, Xs):
"""Maybe don't need this? Just a quicky to get Jax working."""
output = self.alloc(shape, dtype=Xs[0].dtype)
for i, x in enumerate(Xs):
output[i, : x.shape[0]] = x
return output
"""
LSTM Notation (kind of involved, but made it a lot easier to write)
X: Inputs
Y: Outputs (aka hiddens)
C: Cells
G: Gates (Output of non-linearity, i.e. lstm_gates(X @ W.T)
A: Activations (X @ W.T, before non-linearity)
Imagine we have the input:
batch = [
["apple", "banana", "cantaloupe", "date", "elderberry"],
["aardvark", "bat", "capybara", "dingo", "elephant"]
]
The input variable X will have one vector per word, so X[0, 1] will be banana's
vector, X[0, 1, 0] will be a float, the first element of that vector.
We're computing an output variable Y of shape (nL, nB, nO), so that Y[0, 1] is
the output variable of banana.
A problem with variables for RNNs is keeping the timesteps straight. It's hard
to distinguish the current, previous, and next timesteps. To solve this problem,
we follow the convention that **we are at timestep 3**.
Additionally, the variables for Y and C are offset by one, as the 0th elements
have the initial hiddens and initial cells. So:
t=3
Xt3: The input vectors for 'dingo' and 'date', i.e. X[t]
Yt3: The output vectors for 'dingo' and 'date', i.e. Y[t+1] (Y is offset.)
Ct2: The cells calculated at 'c...', that are the input for 'd...'
Ct3: The cells calculated at 'd...', that are the input for 'e...'
At3: The activations at 'd...'
Gt3: The gates at 'd...'
"""
def lstm_forward_training(
params: Floats1d, c_init: Floats3d, h_init: Floats3d, X: Floats2d, lengths: Ints1d
) -> Tuple[Floats2d, Tuple]:
xp = get_array_module(params)
depth, dirs, nO = c_init.shape
N, nI = X.shape
batch_size = lengths[0]
# Preallocate these so we can pass them through for loop.
G = cast(Floats4d, xp.zeros((depth, dirs, X.shape[0], nO * 4), dtype="f"))
Y = cast(Floats4d, xp.zeros((depth, dirs, X.shape[0], nO), dtype="f"))
C = cast(Floats4d, xp.zeros((depth, dirs, X.shape[0], nO), dtype="f"))
Yt2 = cast(Floats2d, xp.zeros((batch_size, nO), dtype="f"))
Ct2 = cast(Floats2d, xp.zeros((batch_size, nO), dtype="f"))
# Compute the start and end indices first.
indices = []
start = 0
for batch_size in lengths:
indices.append((start, start + batch_size))
start += batch_size
params_i = 0
orig_X = X
for i in range(depth):
nI = X.shape[1]
for d in range(dirs):
# The inits are shaped (depth, dirs, nO). We add the internal dimension
# to make them set correctly.
Yt2 = h_init[i, d].reshape((1, nO)) # type: ignore
Ct2 = c_init[i, d].reshape((1, nO)) # type: ignore
layer_params, params_i = _split_weights(params, i, nO, nI, params_i)
Wx, Wh, bias = _transpose_weights(layer_params)
G[i, d] += xp.dot(X, Wx.T)
G[i, d] += bias
for start, end in indices if d == 0 else reversed(indices):
# When we iterate left-to-right, t2 might be longer than t3.
Yt2 = Yt2[: end - start]
Ct2 = Ct2[: end - start]
# But in right-to-left, it's the opposite: t3 can be longer.
Gt3 = G[i, d, start:end]
Gt3 = Gt3[: Yt2.shape[0]]
Gt3 += xp.dot(Yt2, Wh.T)
Gt3_ = cast(Floats3d, Gt3.reshape((-1, nO, 4)))
hf = sigmoid(Gt3_[:, :, 0])
hi = sigmoid(Gt3_[:, :, 1])
ho = sigmoid(Gt3_[:, :, 2])
hc = xp.tanh(Gt3_[:, :, 3])
Ct3 = hf * Ct2
Ct3 += hi * hc
# Store results
Gt3 = (
xp.hstack((hf, hi, ho, hc))
.reshape((-1, 4, nO))
.transpose((0, 2, 1))
.reshape((-1, nO * 4))
)
# Fix the endpoint to account for shorter slices when iterating
# reversed. Not 100% sure this is right. If there's a bug, look
# here?
end = min(end, start + ho.shape[0])
Y[i, d, start:end] = xp.tanh(Ct3) * ho
G[i, d, start:end] = Gt3
C[i, d, start:end] = Ct3
# Set the t2 variables to the current t3 variables.
Ct2 = Ct3
Yt2 = Y[i, d, start:end]
H = cast(Floats2d, Y[i].transpose((1, 0, 2)).reshape((N, -1)))
if dirs == 2:
H = xp.ascontiguousarray(H)
X = H
return H, (Y, G, C, orig_X)
def backprop_lstm(dY: Floats2d, lengths: Ints1d, params: Floats1d, fwd_state: Tuple):
xp = get_array_module(params)
Y: Floats4d
G: Floats4d
C: Floats4d
X: Floats2d
Wx: Floats2d
Wh: Floats2d
bias: Floats1d
dWx: Floats2d
dWh: Floats2d
d_bias: Floats1d
Y, G, C, X = fwd_state
depth, dirs, N, nO = C.shape
nI = X.shape[1]
batch_size = lengths[0]
# We don't need to store all the cells for all the layers.
dC = cast(Floats2d, xp.zeros((N, nO), dtype=C.dtype))
dG = cast(Floats2d, xp.zeros((N, nO * 4), dtype=C.dtype))
d_params = cast(Floats1d, xp.zeros((params.shape[0],), dtype=params.dtype))
# Collect the params and slices. It makes it a bit easier to get the indexing
# right, when we're iterating backwards.
params_i = 0
all_layer_params: List[List[Tuple[Tuple[Floats2d, Floats2d, Floats1d], int]]] = []
for i in range(depth):
all_layer_params.append([])
n_inputs = nI if i == 0 else (nO * dirs)
for d in range(dirs):
layer_params, params_i = _split_weights(params, i, nO, n_inputs, params_i)
layer_params = _transpose_weights(layer_params)
all_layer_params[-1].append((layer_params, params_i))
params_i = 0
all_layer_grads: List[List[Tuple[Tuple[Floats2d, Floats2d, Floats1d], int]]] = []
for i in range(depth):
all_layer_grads.append([])
n_inputs = nI if i == 0 else (nO * dirs)
for d in range(dirs):
layer_grads, params_i = _split_weights(d_params, i, nO, n_inputs, params_i)
layer_grads = _transpose_weights(layer_grads)
all_layer_grads[-1].append((layer_grads, params_i))
# Similarly, we want to compute the indices first
indices = []
start = 0
for batch_size in lengths:
indices.append((start, start + batch_size))
start += batch_size
Xs = [X] + [
cast(Floats2d, Y[i].transpose((1, 0, 2)).reshape((N, -1)))
for i in range(depth - 1)
]
dXs = [xp.zeros((X.shape[0], X.shape[1]), dtype=X.dtype) for X in Xs]
# Okay, now do the actual looping
for i in reversed(range(depth)):
dY3d = cast(Floats3d, dY.reshape((N, dirs, nO)).transpose((1, 0, 2)))
dX = dXs[i]
X = Xs[i]
if dirs >= 2:
dY3d = xp.ascontiguousarray(dY3d)
for d in range(dirs):
Wx, Wh, bias = all_layer_params[i][d][0]
dWx, dWh, d_bias = all_layer_grads[i][d][0]
if d == 0:
start_t3, end_t3 = indices[-1]
layer_indices = indices[:-1]
layer_indices.reverse()
else:
start_t3, end_t3 = indices[0]
layer_indices = indices[1:]
for start_t2, end_t2 in layer_indices:
size = min(end_t2 - start_t2, end_t3 - start_t3)
dGt3, dCt2 = backprop_lstm_gates(
dY3d[d, start_t3 : start_t3 + size],
dC[start_t3 : start_t3 + size],
G[i, d, start_t3 : start_t3 + size],
C[i, d, start_t3 : start_t3 + size],
C[i, d, start_t2 : start_t2 + size],
)
# Backprop hidden-to-hidden w.r.t. hidden.
dY3d[d, start_t2 : start_t2 + size] += dGt3 @ Wh
# Update iteration variables
dC[start_t2 : start_t2 + size] = dCt2
start_t3 = start_t2
end_t3 = end_t2
# Backprop input-to-hidden w.r.t. weights.
dWx += dG.T @ X
# Backprop hidden-to-hidden w.r.t. weights.
dWh += dG.T @ Y[i, d]
# Backprop bias
d_bias += dG.sum(axis=0)
# Backprop input-to-hidden w.r.t. input
dX += dG @ Wx
dY = dX
assert dX.shape[1] == X.shape[1]
grad_parts = []
for layer_grads in all_layer_grads:
for dir_grads, _ in layer_grads:
grad_parts.append(_untranspose_unsplit_weights(dir_grads))
return dX, xp.concatenate(grad_parts)
def _split_weights(params: Floats1d, i: int, nO: int, nI: int, params_i: int):
Wx_size = 4 * nO * nI
bx_size = 4 * nO
Wh_size = 4 * nO * nO
bh_size = 4 * nO
Wx = params[params_i : params_i + Wx_size].reshape((4 * nO, nI))
params_i += Wx_size
bx = params[params_i : params_i + bx_size].reshape((4 * nO,))
params_i += bx_size
Wh = params[params_i : params_i + Wh_size].reshape((4 * nO, nO))
params_i += Wh_size
bh = params[params_i : params_i + bh_size].reshape((4 * nO,))
params_i += bh_size
return ((Wx, bx), (Wh, bh)), params_i
def _transpose_weights(params):
# Transpose the parameters so that the gates are the last dimension. This
# makes it easier to fuse.
(Wx, bx), (Wh, bh) = params
xp = get_array_module(Wx)
Wx = Wx.reshape((4, -1, Wx.shape[-1]))
Wx = Wx.transpose((1, 0, 2)).reshape((-1, Wx.shape[-1]))
bx = bx.reshape((4, -1)).transpose((1, 0)).reshape((-1,))
Wh = Wh.reshape((4, -1, Wh.shape[-1]))
Wh = Wh.transpose((1, 0, 2)).reshape((-1, Wh.shape[-1]))
bh = bh.reshape((4, -1)).transpose((1, 0)).reshape((-1,))
ascontig = xp.ascontiguousarray
Wx = ascontig(Wx)
Wh = ascontig(Wh)
bias = ascontig(bx) + bh
return Wx, Wh, bias
def _untranspose_unsplit_weights(params):
Wx, Wh, bias = params
xp = get_array_module(Wx)
nO = Wh.shape[1]
nI = Wx.shape[1]
Wx = Wx.reshape((-1, 4, nI)).transpose((1, 0, 2)).reshape((-1, nI))
Wh = Wh.reshape((-1, 4, nO)).transpose((1, 0, 2)).reshape((-1, nO))
bias = bias.reshape((-1, 4)).transpose((1, 0)).reshape((-1,))
zeros = xp.zeros(bias.shape, dtype="f")
return xp.concatenate((Wx.ravel(), bias, Wh.ravel(), zeros))
def backprop_lstm_gates(
dYt3: Floats2d, dCt3: Floats2d, Gt3: Floats2d, Ct3: Floats2d, Ct2: Floats2d
) -> Tuple[Floats2d, Floats2d]:
# See above for notation. Step numbering refers to forward_lstm_gates
xp = get_array_module(dYt3)
hf, hi, ho, hc = xp.split(Gt3, 4, axis=-1)
assert hf.shape[0] == hi.shape[0] == ho.shape[0] == hc.shape[0]
assert hf.shape[0] == dYt3.shape[0] == dCt3.shape[0] == Ct3.shape[0] == Ct2.shape[0]
tanhCt3 = xp.tanh(Ct3)
# 3b: Yt3 = tanhCt3 * ho
d_ho = dYt3 * tanhCt3
d_tanhCt3 = dYt3 * ho
# 3a: tanhCt3 = tanh(Ct3)
dCt3 += d_tanhCt3 * dtanh(tanhCt3)
# 2b: Ct3 += hi * hc
d_hi = dCt3 * hc
d_hc = dCt3 * hi
# 2a: Ct3 = hf * Ct2
d_hf = dCt3 * Ct2
dCt2 = dCt3 * hf
d_At3_hc = d_hc * dtanh(hc) # 1d
d_At3_ho = d_ho * dsigmoid(ho) # 1c
d_At3_hi = d_hi * dsigmoid(hi) # 1b
d_At3_hf = d_hf * dsigmoid(hf) # 1a
dAt3 = xp.concatenate((d_At3_hf, d_At3_hi, d_At3_ho, d_At3_hc), axis=-1)
return dAt3, dCt2
def sigmoid(X, out=None):
xp = get_array_module(X)
return 1.0 / (1.0 + xp.exp(-X))
def dsigmoid(Y: ArrayT) -> ArrayT:
return Y * (1.0 - Y)
def dtanh(Y: ArrayT) -> ArrayT:
return 1 - Y ** 2
|
{
"content_hash": "afbf687c05e0fa8c3bab20b267ed3e03",
"timestamp": "",
"source": "github",
"line_count": 1154,
"max_line_length": 88,
"avg_line_length": 37.14471403812825,
"alnum_prop": 0.5552082118278315,
"repo_name": "spacy-io/thinc",
"id": "c6d9f6b09acf047fffef7577849b8eb9467e493e",
"size": "42865",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thinc/backends/ops.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "259926"
},
{
"name": "C++",
"bytes": "5131"
},
{
"name": "Python",
"bytes": "135654"
}
],
"symlink_target": ""
}
|
"""
Utilitary functions do download and parse data from Orpha net
"""
from requests import post
from bs4 import BeautifulSoup
from biomart import BiomartServer
from lxml import etree
from collections import defaultdict
def search_orphanet(keyword, search_type='Gen'):
"""
Returns the HTML of a Orphanet search. By default,
it searches for a gene
"""
search_url = r"http://www.orpha.net/consor/cgi-bin/Disease_Search_Simple.php?lng=EN"
search_data = {
'Disease_Disease_Search_diseaseGroup': keyword,
'Disease_Disease_Search_diseaseType': search_type
}
request = post(search_url, search_data)
if request.status_code == 200:
return request.text
else:
raise Exception("Search failed with status {}".format(request.status_code))
def get_first_result(orphanet_search):
"""
Returns the HTML link for the first result of a search
"""
soup = BeautifulSoup(orphanet_search, 'html.parser')
first = soup.find_all('div', class_="blockResults")
if not first:
return ''
else:
return first[0].a.get('href')+"\t"+first[0].a.getText()
def convert_entrez_to_gene_symbol(keyword_list):
"""
Convert Refseq ID to Gene Symbol and description using Biomart
"""
#print("\nConverting Refseq to Gene Symbol\n")
server = BiomartServer( "http://mar2017.archive.ensembl.org/biomart" )
#server.verbose = True
#server.show_databases()
#server.show_datasets()
hsapiens = server.datasets['hsapiens_gene_ensembl']
#hsapiens.show_filters()
#hsapiens.show_attributes()
# run a search with custom filters and attributes (no header)
response = hsapiens.search({
'filters': {
'entrezgene': keyword_list
},
'attributes': [
'external_gene_name', 'description', 'entrezgene'
]
})
return response
def load_orpha_genes(xml):
orpha_genes = {}
tree = etree.parse(xml)
symbols = tree.findall('//Symbol')
for gene in symbols:
orpha_genes[gene.text] = [gene.getparent().getparent().getparent().getparent().find("OrphaNumber").text, gene.getparent().getparent().getparent().getparent().find("Name").text]
return orpha_genes
def load_hyb_db_genes(hyb):
hyb_db = defaultdict(list)
hyb_file = open(hyb).read().splitlines()
for line in hyb_file:
fields = line.split(",")
hyb_db[fields[10]].append(fields[11])
hyb_db[fields[11]].append(fields[10])
return hyb_db
def load_biogrid_genes(biogrid):
biogrid_db = defaultdict(list)
biogrid_file = open(biogrid).read().splitlines()
for line in biogrid_file:
fields = line.split("\t")
biogrid_db[fields[1]].append(fields[2])
biogrid_db[fields[2]].append(fields[1])
return biogrid_db
|
{
"content_hash": "32f27dcd15f5b8893f16dac8a7b4b96e",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 184,
"avg_line_length": 30.967032967032967,
"alnum_prop": 0.6515259048970902,
"repo_name": "osvaldoreisss/orpha-tools",
"id": "ba69651c543e83e9ed9f2e905e6fa11f025aeda8",
"size": "2844",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5520"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import errno
import os
import sqlite3
import time
from contextlib import contextmanager
class CacheLockedError(Exception):
pass
class CacheLocker(object):
def __init__(self, lockfile, polltime=0.1):
self.lockfile = lockfile
self.polltime = polltime
self._initialize_lockfile()
def _initialize_lockfile(self):
db = sqlite3.connect(self.lockfile)
db.execute("""
CREATE TABLE IF NOT EXISTS cache_locks (
cache_name TEXT NOT NULL,
created REAL NOT NULL,
pid INTEGER NUT NULL
);
""")
db.commit()
db.close()
@contextmanager
def _exclusive_db_cursor(self):
db = sqlite3.connect(self.lockfile, isolation_level="EXCLUSIVE")
db.row_factory = sqlite3.Row
cur = db.cursor()
try:
yield cur
finally:
db.commit()
db.close()
@contextmanager
def lock(self, cache_name, no_block=False):
pid = os.getpid()
while True:
with self._exclusive_db_cursor() as cur:
self._add_lock(cur, cache_name, pid)
if self._poll(cur, cache_name, pid):
break
elif no_block:
raise CacheLockedError()
time.sleep(self.polltime)
try:
yield
finally:
with self._exclusive_db_cursor() as cur:
self._remove_lock(cur, cache_name, pid)
def _poll(self, cur, cache_name, pid):
active_locks = False
cur.execute("SELECT * from cache_locks where cache_name = ? ORDER BY created", (cache_name, ))
for lock in cur:
if not active_locks and lock['cache_name'] == cache_name and lock['pid'] == pid:
# we are waiting and it is out turn
return True
if not is_running(lock['pid']):
self._remove_lock(cur, lock['cache_name'], lock['pid'])
else:
active_locks = True
return not active_locks
def _add_lock(self, cur, cache_name, pid):
cur.execute("SELECT count(*) from cache_locks WHERE cache_name = ? AND pid = ?", (cache_name, pid))
if cur.fetchone()[0] == 0:
cur.execute("INSERT INTO cache_locks (cache_name, pid, created) VALUES (?, ?, ?)", (cache_name, pid, time.time()))
def _remove_lock(self, cur, cache_name, pid):
cur.execute("DELETE FROM cache_locks WHERE cache_name = ? AND pid = ?", (cache_name, pid))
class DummyCacheLocker(object):
@contextmanager
def lock(self, cache_name, no_block=False):
yield
def is_running(pid):
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
return False
elif err.errno == errno.EPERM:
return True
else:
raise err
else:
return True
if __name__ == '__main__':
locker = CacheLocker('/tmp/cachelock_test')
with locker.lock('foo'):
pass
|
{
"content_hash": "47eedc669a47a1d6751552d053b61a71",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 126,
"avg_line_length": 29.102803738317757,
"alnum_prop": 0.5507385998715478,
"repo_name": "kaiCu/mapproxy",
"id": "212a749e9f8b67d9bc499eb7ff779f201ae79e23",
"size": "3758",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mapproxy/seed/cachelock.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12401"
},
{
"name": "HTML",
"bytes": "18665"
},
{
"name": "Makefile",
"bytes": "1045"
},
{
"name": "Python",
"bytes": "1590097"
}
],
"symlink_target": ""
}
|
import numpy as np
class mlp:
""" A Multi-Layer Perceptron"""
def __init__(self,inputs,targets,nhidden,beta=1,momentum=0.9,outtype='logistic'):
""" Constructor """
# Set up network size
self.nin = np.shape(inputs)[1]
self.nout = np.shape(targets)[1]
self.ndata = np.shape(inputs)[0]
self.nhidden = nhidden
self.beta = beta
self.momentum = momentum
self.outtype = outtype
# Initialise network
self.weights1 = (np.random.rand(self.nin+1,self.nhidden)-0.5)*2/np.sqrt(self.nin)
self.weights2 = (np.random.rand(self.nhidden+1,self.nout)-0.5)*2/np.sqrt(self.nhidden)
def earlystopping(self,inputs,targets,valid,validtargets,eta,niterations=100):
valid = np.concatenate((valid,-np.ones((np.shape(valid)[0],1))),axis=1)
old_val_error1 = 100002
old_val_error2 = 100001
new_val_error = 100000
count = 0
while (((old_val_error1 - new_val_error) > 0.001) or ((old_val_error2 - old_val_error1)>0.001)):
count+=1
print count
self.mlptrain(inputs,targets,eta,niterations)
old_val_error2 = old_val_error1
old_val_error1 = new_val_error
validout = self.mlpfwd(valid)
new_val_error = 0.5*np.sum((validtargets-validout)**2)
print "Stopped", new_val_error,old_val_error1, old_val_error2
return new_val_error
def mlptrain(self,inputs,targets,eta,niterations):
""" Train the thing """
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,-np.ones((self.ndata,1))),axis=1)
change = range(self.ndata)
updatew1 = np.zeros((np.shape(self.weights1)))
updatew2 = np.zeros((np.shape(self.weights2)))
for n in range(niterations):
self.outputs = self.mlpfwd(inputs)
error = 0.5*np.sum((self.outputs-targets)**2)
if (np.mod(n,100)==0):
print "Iteration: ",n, " Error: ",error
# Different types of output neurons
if self.outtype == 'linear':
deltao = (self.outputs-targets)/self.ndata
elif self.outtype == 'logistic':
deltao = self.beta*(self.outputs-targets)*self.outputs*(1.0-self.outputs)
elif self.outtype == 'softmax':
deltao = (self.outputs-targets)*(self.outputs*(-self.outputs)+self.outputs)/self.ndata
else:
print "error"
deltah = self.hidden*self.beta*(1.0-self.hidden)*(np.dot(deltao,np.transpose(self.weights2)))
updatew1 = eta*(np.dot(np.transpose(inputs),deltah[:,:-1])) + self.momentum*updatew1
updatew2 = eta*(np.dot(np.transpose(self.hidden),deltao)) + self.momentum*updatew2
self.weights1 -= updatew1
self.weights2 -= updatew2
# Randomise order of inputs (not necessary for matrix-based calculation)
#np.random.shuffle(change)
#inputs = inputs[change,:]
#targets = targets[change,:]
def mlpfwd(self,inputs):
""" Run the network forward """
self.hidden = np.dot(inputs,self.weights1);
self.hidden = 1.0/(1.0+np.exp(-self.beta*self.hidden))
self.hidden = np.concatenate((self.hidden,-np.ones((np.shape(inputs)[0],1))),axis=1)
outputs = np.dot(self.hidden,self.weights2);
# Different types of output neurons
if self.outtype == 'linear':
return outputs
elif self.outtype == 'logistic':
return 1.0/(1.0+np.exp(-self.beta*outputs))
elif self.outtype == 'softmax':
normalisers = np.sum(np.exp(outputs),axis=1)*np.ones((1,np.shape(outputs)[0]))
return np.transpose(np.transpose(np.exp(outputs))/normalisers)
else:
print "error"
def confmat(self,inputs,targets):
"""Confusion matrix"""
# Add the inputs that match the bias node
inputs = np.concatenate((inputs,-np.ones((np.shape(inputs)[0],1))),axis=1)
outputs = self.mlpfwd(inputs)
nclasses = np.shape(targets)[1]
if nclasses==1:
nclasses = 2
outputs = np.where(outputs>0.5,1,0)
else:
# 1-of-N encoding
outputs = np.argmax(outputs,1)
targets = np.argmax(targets,1)
cm = np.zeros((nclasses,nclasses))
for i in range(nclasses):
for j in range(nclasses):
cm[i,j] = np.sum(np.where(outputs==i,1,0)*np.where(targets==j,1,0))
print "Confusion matrix is:"
print cm
print "Percentage Correct: ",np.trace(cm)/np.sum(cm)*100
|
{
"content_hash": "ab2f2b9d6ceac8611c4113e70d3920aa",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 105,
"avg_line_length": 38.53174603174603,
"alnum_prop": 0.5631307929969104,
"repo_name": "Anderson-Lab/anderson-lab.github.io",
"id": "b1f24bef156c615c15b150dcafd052b3fd86cc16",
"size": "5231",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "csc_466_2020_winter/MLCode/Ch4/mlp.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "79604"
},
{
"name": "JavaScript",
"bytes": "53016"
},
{
"name": "Jupyter Notebook",
"bytes": "64098"
},
{
"name": "Python",
"bytes": "557510"
},
{
"name": "Ruby",
"bytes": "681"
},
{
"name": "SCSS",
"bytes": "64925"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
}
|
from image_effects import *
import translations
filename = "test"
filename = "thames"
filename = "pumpkin"
filename = "me"
myImg = ImageEffects(filename, True)
#myImg.set_all_pixels([0xff, 0x88, 0x00, 0xff])
#myImg.pointillism(10, 3, 2) #(10, 0, 4)
#myImg.pixelate(15)
#myImg.blur(10)
#myImg.greyscale()
#myImg.colour_filter([0x00, 0xff, 0xff])
#myImg.translate(translations.waves(1000, 50))
myImg.greyscale().partition(50)
myImg.save(filename + myImg.effect)
myImg.open()
|
{
"content_hash": "af0edbf831ef21ae911d72640a5bcaef",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 47,
"avg_line_length": 20.782608695652176,
"alnum_prop": 0.7217573221757322,
"repo_name": "mft25/Images",
"id": "0c64ee24d65c1aa4dae508d14f0d5ba1e519ac69",
"size": "478",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "images/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5822"
}
],
"symlink_target": ""
}
|
"""Tests for checkpointing the _RebatchDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.python.data.experimental.ops import distribute
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import combinations
from tensorflow.python.platform import test
class LegacyRebatchDatasetCheckpointTest(
checkpoint_test_base.CheckpointTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testCore(self):
def build_dataset(num_elements, batch_size):
return distribute._LegacyRebatchDataset(
dataset_ops.Dataset.range(num_elements).batch(
4 * batch_size, drop_remainder=True),
num_replicas=4)
self.run_core_tests(lambda: build_dataset(64, 8), 8)
class RebatchDatasetCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testCore(self):
def build_dataset(num_elements, batch_size):
return distribute._RebatchDataset(
dataset_ops.Dataset.range(num_elements).batch(
2 * batch_size, drop_remainder=True),
batch_sizes=[batch_size, batch_size])
self.run_core_tests(lambda: build_dataset(64, 8), 8)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "a3d20130448ca01eb912620a48b188cf",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 75,
"avg_line_length": 34.12765957446808,
"alnum_prop": 0.7256857855361596,
"repo_name": "annarev/tensorflow",
"id": "1c50d80afa55ff9f2d5368fd237f915a55d494c9",
"size": "2293",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/experimental/kernel_tests/serialization/rebatch_dataset_serialization_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1286"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49343974"
},
{
"name": "CMake",
"bytes": "195286"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "863222"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41289329"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "469612"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1alpha1RoleList(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, api_version=None, items=None, kind=None, metadata=None):
"""
V1alpha1RoleList - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'api_version': 'str',
'items': 'list[V1alpha1Role]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
self.attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
self._api_version = api_version
self._items = items
self._kind = kind
self._metadata = metadata
@property
def api_version(self):
"""
Gets the api_version of this V1alpha1RoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:return: The api_version of this V1alpha1RoleList.
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""
Sets the api_version of this V1alpha1RoleList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources
:param api_version: The api_version of this V1alpha1RoleList.
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""
Gets the items of this V1alpha1RoleList.
Items is a list of Roles
:return: The items of this V1alpha1RoleList.
:rtype: list[V1alpha1Role]
"""
return self._items
@items.setter
def items(self, items):
"""
Sets the items of this V1alpha1RoleList.
Items is a list of Roles
:param items: The items of this V1alpha1RoleList.
:type: list[V1alpha1Role]
"""
if items is None:
raise ValueError("Invalid value for `items`, must not be `None`")
self._items = items
@property
def kind(self):
"""
Gets the kind of this V1alpha1RoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:return: The kind of this V1alpha1RoleList.
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""
Sets the kind of this V1alpha1RoleList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
:param kind: The kind of this V1alpha1RoleList.
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""
Gets the metadata of this V1alpha1RoleList.
Standard object's metadata.
:return: The metadata of this V1alpha1RoleList.
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""
Sets the metadata of this V1alpha1RoleList.
Standard object's metadata.
:param metadata: The metadata of this V1alpha1RoleList.
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1alpha1RoleList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
{
"content_hash": "52446fdcdcecbf691d886e9eb8130663",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 281,
"avg_line_length": 31.215384615384615,
"alnum_prop": 0.5850172498767866,
"repo_name": "djkonro/client-python",
"id": "da53e8f2ee54104bcee82ebca19e3a2350473931",
"size": "6104",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "kubernetes/client/models/v1alpha1_role_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6527154"
},
{
"name": "Shell",
"bytes": "16522"
}
],
"symlink_target": ""
}
|
import sys
def get_class_by_path(name):
parts = name.split('.')
module = import_module('.'.join(parts[0:-1]))
return getattr(import_module('.'.join(parts[0:-1])), parts[-1])
def create_list_of_imported_objects(classlist):
return [get_class_by_path(name)() for name in classlist]
def create_dict_of_imported_objects(classdict):
objdict = {}
for name, classname in classdict.iteritems():
objdict[name] = get_class_by_path(classname)()
return objdict
# Following code was taken from importLib due to some strange problems with importing importlib
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
|
{
"content_hash": "d9b9fc77e35404c5f72f5179712739e9",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 95,
"avg_line_length": 31.581818181818182,
"alnum_prop": 0.6229130685089235,
"repo_name": "Glucksistemi/EGS-DSM",
"id": "a091b4e3f3d400893481a66531b400d71ff8b011",
"size": "1737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/imports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1112"
},
{
"name": "JavaScript",
"bytes": "62"
},
{
"name": "Python",
"bytes": "30770"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.db import migrations
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.db.migrations.state import StateApps
from mock import patch
from zerver.lib.utils import make_safe_digest
from zerver.lib.upload import upload_backend
from zerver.models import UserProfile
from typing import Text
import hashlib
# We hackishly patch this function in order to revert it to the state
# it had when this migration was first written. This is a balance
# between copying in a historical version of hundreds of lines of code
# from zerver.lib.upload (which would pretty annoying, but would be a
# pain) and just using the current version, which doesn't work
# since we rearranged the avatars in Zulip 1.6.
def patched_user_avatar_path(user_profile: UserProfile) -> Text:
email = user_profile.email
user_key = email.lower() + settings.AVATAR_SALT
return make_safe_digest(user_key, hashlib.sha1)
@patch('zerver.lib.upload.user_avatar_path', patched_user_avatar_path)
def verify_medium_avatar_image(apps: StateApps, schema_editor: DatabaseSchemaEditor) -> None:
user_profile_model = apps.get_model('zerver', 'UserProfile')
for user_profile in user_profile_model.objects.filter(avatar_source=u"U"):
upload_backend.ensure_medium_avatar_image(user_profile)
class Migration(migrations.Migration):
dependencies = [
('zerver', '0031_remove_system_avatar_source'),
]
operations = [
migrations.RunPython(verify_medium_avatar_image)
]
|
{
"content_hash": "02af2666a7b93948b4b083dddf322f9b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 93,
"avg_line_length": 39.94871794871795,
"alnum_prop": 0.7612323491655969,
"repo_name": "Galexrt/zulip",
"id": "4531a42f1921e8b9d90274f26c0b8b76febfa923",
"size": "1583",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "zerver/migrations/0032_verify_all_medium_avatar_images.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "164"
},
{
"name": "CSS",
"bytes": "181865"
},
{
"name": "CoffeeScript",
"bytes": "18435"
},
{
"name": "Groovy",
"bytes": "5515"
},
{
"name": "HTML",
"bytes": "376447"
},
{
"name": "JavaScript",
"bytes": "1570488"
},
{
"name": "Nginx",
"bytes": "1228"
},
{
"name": "PHP",
"bytes": "18930"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "383634"
},
{
"name": "Puppet",
"bytes": "93562"
},
{
"name": "Python",
"bytes": "1830400"
},
{
"name": "Ruby",
"bytes": "255867"
},
{
"name": "Shell",
"bytes": "32366"
}
],
"symlink_target": ""
}
|
from registrasion.controllers.cart import CartController
from registrasion.controllers.credit_note import CreditNoteController
from registrasion.controllers.invoice import InvoiceController
from registrasion.models import commerce
from django.core.exceptions import ObjectDoesNotExist
class TestingCartController(CartController):
def set_quantity(self, product, quantity, batched=False):
''' Sets the _quantity_ of the given _product_ in the cart to the given
_quantity_. '''
self.set_quantities(((product, quantity),))
def add_to_cart(self, product, quantity):
''' Adds _quantity_ of the given _product_ to the cart. Raises
ValidationError if constraints are violated.'''
try:
product_item = commerce.ProductItem.objects.get(
cart=self.cart,
product=product)
old_quantity = product_item.quantity
except ObjectDoesNotExist:
old_quantity = 0
self.set_quantity(product, old_quantity + quantity)
def next_cart(self):
if self.cart.status == commerce.Cart.STATUS_ACTIVE:
self.cart.status = commerce.Cart.STATUS_PAID
self.cart.save()
class TestingInvoiceController(InvoiceController):
def pay(self, reference, amount, pre_validate=True):
''' Testing method for simulating an invoice paymenht by the given
amount. '''
if pre_validate:
# Manual payments don't pre-validate; we should test that things
# still work if we do silly things.
self.validate_allowed_to_pay()
''' Adds a payment '''
commerce.PaymentBase.objects.create(
invoice=self.invoice,
reference=reference,
amount=amount,
)
self.update_status()
class TestingCreditNoteController(CreditNoteController):
def refund(self):
commerce.CreditNoteRefund.objects.create(
parent=self.credit_note,
reference="Whoops."
)
|
{
"content_hash": "6237ba0368c718e03437347fe964031b",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 79,
"avg_line_length": 32.285714285714285,
"alnum_prop": 0.6529006882989183,
"repo_name": "chrisjrn/registrasion",
"id": "1684ea7563dd0c2801b707849dbdb9d3099ef9c9",
"size": "2034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registrasion/tests/controller_helpers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "37061"
},
{
"name": "Python",
"bytes": "373937"
}
],
"symlink_target": ""
}
|
__copyright__ = "Copyright 2014, http://radical.rutgers.edu"
__license__ = "MIT"
__author__ = "Ole Weidner <ole.weidner@rutgers.edu>"
from md_task_description import MDTaskDescription, BoundMDTask
|
{
"content_hash": "63279ad034f0e4fdfe5135627ba429e0",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 62,
"avg_line_length": 40.4,
"alnum_prop": 0.7128712871287128,
"repo_name": "radical-cybertools/radical.ensemblemd.mdkernels",
"id": "4aba547be173898cd697a4813880b136117838fb",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/radical/ensemblemd/mdkernels/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "213"
},
{
"name": "Python",
"bytes": "19498"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import datetime
import math
import os
import re
import tokenize
import unittest
import custom_migration_operations.more_operations
import custom_migration_operations.operations
from django.conf import settings
from django.core.validators import EmailValidator, RegexValidator
from django.db import migrations, models
from django.db.migrations.writer import (
MigrationWriter, OperationWriter, SettingsReference,
)
from django.test import SimpleTestCase, TestCase, ignore_warnings
from django.utils import datetime_safe, six
from django.utils._os import upath
from django.utils.deconstruct import deconstructible
from django.utils.timezone import FixedOffset, get_default_timezone, utc
from django.utils.translation import ugettext_lazy as _
from .models import FoodManager, FoodQuerySet
class TestModel1(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
class OperationWriterTests(SimpleTestCase):
def test_empty_signature(self):
operation = custom_migration_operations.operations.TestOperation()
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.TestOperation(\n'
'),'
)
def test_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(1, 2)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
'),'
)
def test_kwargs_signature(self):
operation = custom_migration_operations.operations.KwargsOperation(kwarg1=1)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
'),'
)
def test_args_kwargs_signature(self):
operation = custom_migration_operations.operations.ArgsKwargsOperation(1, 2, kwarg2=4)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsKwargsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' kwarg2=4,\n'
'),'
)
def test_nested_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation(
custom_migration_operations.operations.ArgsOperation(1, 2),
custom_migration_operations.operations.KwargsOperation(kwarg1=3, kwarg2=4)
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ArgsOperation(\n'
' arg1=custom_migration_operations.operations.ArgsOperation(\n'
' arg1=1,\n'
' arg2=2,\n'
' ),\n'
' arg2=custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=3,\n'
' kwarg2=4,\n'
' ),\n'
'),'
)
def test_multiline_args_signature(self):
operation = custom_migration_operations.operations.ArgsOperation("test\n arg1", "test\narg2")
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
"custom_migration_operations.operations.ArgsOperation(\n"
" arg1='test\\n arg1',\n"
" arg2='test\\narg2',\n"
"),"
)
def test_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation([1, 2])
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' 1,\n'
' 2,\n'
' ],\n'
'),'
)
def test_nested_operation_expand_args_signature(self):
operation = custom_migration_operations.operations.ExpandArgsOperation(
arg=[
custom_migration_operations.operations.KwargsOperation(
kwarg1=1,
kwarg2=2,
),
]
)
buff, imports = OperationWriter(operation, indentation=0).serialize()
self.assertEqual(imports, {'import custom_migration_operations.operations'})
self.assertEqual(
buff,
'custom_migration_operations.operations.ExpandArgsOperation(\n'
' arg=[\n'
' custom_migration_operations.operations.KwargsOperation(\n'
' kwarg1=1,\n'
' kwarg2=2,\n'
' ),\n'
' ],\n'
'),'
)
class WriterTests(TestCase):
"""
Tests the migration writer (makes migration files from Migration instances)
"""
def safe_exec(self, string, value=None):
l = {}
try:
exec(string, globals(), l)
except Exception as e:
if value:
self.fail("Could not exec %r (from value %r): %s" % (string.strip(), value, e))
else:
self.fail("Could not exec %r: %s" % (string.strip(), e))
return l
def serialize_round_trip(self, value):
string, imports = MigrationWriter.serialize(value)
return self.safe_exec("%s\ntest_value_result = %s" % ("\n".join(imports), string), value)['test_value_result']
def assertSerializedEqual(self, value):
self.assertEqual(self.serialize_round_trip(value), value)
def assertSerializedResultEqual(self, value, target):
self.assertEqual(MigrationWriter.serialize(value), target)
def assertSerializedFieldEqual(self, value):
new_value = self.serialize_round_trip(value)
self.assertEqual(value.__class__, new_value.__class__)
self.assertEqual(value.max_length, new_value.max_length)
self.assertEqual(value.null, new_value.null)
self.assertEqual(value.unique, new_value.unique)
def test_serialize_numbers(self):
self.assertSerializedEqual(1)
self.assertSerializedEqual(1.2)
self.assertTrue(math.isinf(self.serialize_round_trip(float("inf"))))
self.assertTrue(math.isinf(self.serialize_round_trip(float("-inf"))))
self.assertTrue(math.isnan(self.serialize_round_trip(float("nan"))))
def test_serialize_constants(self):
self.assertSerializedEqual(None)
self.assertSerializedEqual(True)
self.assertSerializedEqual(False)
def test_serialize_strings(self):
self.assertSerializedEqual(b"foobar")
string, imports = MigrationWriter.serialize(b"foobar")
self.assertEqual(string, "b'foobar'")
self.assertSerializedEqual("föobár")
string, imports = MigrationWriter.serialize("foobar")
self.assertEqual(string, "'foobar'")
def test_serialize_multiline_strings(self):
self.assertSerializedEqual(b"foo\nbar")
string, imports = MigrationWriter.serialize(b"foo\nbar")
self.assertEqual(string, "b'foo\\nbar'")
self.assertSerializedEqual("föo\nbár")
string, imports = MigrationWriter.serialize("foo\nbar")
self.assertEqual(string, "'foo\\nbar'")
def test_serialize_collections(self):
self.assertSerializedEqual({1: 2})
self.assertSerializedEqual(["a", 2, True, None])
self.assertSerializedEqual({2, 3, "eighty"})
self.assertSerializedEqual({"lalalala": ["yeah", "no", "maybe"]})
self.assertSerializedEqual(_('Hello'))
def test_serialize_builtin_types(self):
self.assertSerializedEqual([list, tuple, dict, set, frozenset])
self.assertSerializedResultEqual(
[list, tuple, dict, set, frozenset],
("[list, tuple, dict, set, frozenset]", set())
)
def test_serialize_functions(self):
with six.assertRaisesRegex(self, ValueError, 'Cannot serialize function: lambda'):
self.assertSerializedEqual(lambda x: 42)
self.assertSerializedEqual(models.SET_NULL)
string, imports = MigrationWriter.serialize(models.SET(42))
self.assertEqual(string, 'models.SET(42)')
self.serialize_round_trip(models.SET(42))
def test_serialize_datetime(self):
self.assertSerializedEqual(datetime.datetime.utcnow())
self.assertSerializedEqual(datetime.datetime.utcnow)
self.assertSerializedEqual(datetime.datetime.today())
self.assertSerializedEqual(datetime.datetime.today)
self.assertSerializedEqual(datetime.date.today())
self.assertSerializedEqual(datetime.date.today)
self.assertSerializedEqual(datetime.datetime.now().time())
self.assertSerializedEqual(datetime.datetime(2014, 1, 1, 1, 1, tzinfo=get_default_timezone()))
self.assertSerializedEqual(datetime.datetime(2013, 12, 31, 22, 1, tzinfo=FixedOffset(180)))
self.assertSerializedResultEqual(
datetime.datetime(2014, 1, 1, 1, 1),
("datetime.datetime(2014, 1, 1, 1, 1)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
(
"datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc)",
{'import datetime', 'from django.utils.timezone import utc'},
)
)
def test_serialize_datetime_safe(self):
self.assertSerializedResultEqual(
datetime_safe.date(2014, 3, 31),
("datetime.date(2014, 3, 31)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime_safe.time(10, 25),
("datetime.time(10, 25)", {'import datetime'})
)
self.assertSerializedResultEqual(
datetime_safe.datetime(2014, 3, 31, 16, 4, 31),
("datetime.datetime(2014, 3, 31, 16, 4, 31)", {'import datetime'})
)
def test_serialize_fields(self):
self.assertSerializedFieldEqual(models.CharField(max_length=255))
self.assertSerializedResultEqual(
models.CharField(max_length=255),
("models.CharField(max_length=255)", {"from django.db import models"})
)
self.assertSerializedFieldEqual(models.TextField(null=True, blank=True))
self.assertSerializedResultEqual(
models.TextField(null=True, blank=True),
("models.TextField(blank=True, null=True)", {'from django.db import models'})
)
def test_serialize_settings(self):
self.assertSerializedEqual(SettingsReference(settings.AUTH_USER_MODEL, "AUTH_USER_MODEL"))
self.assertSerializedResultEqual(
SettingsReference("someapp.model", "AUTH_USER_MODEL"),
("settings.AUTH_USER_MODEL", {"from django.conf import settings"})
)
self.assertSerializedResultEqual(
((x, x * x) for x in range(3)),
("((0, 0), (1, 1), (2, 4))", set())
)
def test_serialize_compiled_regex(self):
"""
Make sure compiled regex can be serialized.
"""
regex = re.compile(r'^\w+$', re.U)
self.assertSerializedEqual(regex)
def test_serialize_class_based_validators(self):
"""
Ticket #22943: Test serialization of class-based validators, including
compiled regexes.
"""
validator = RegexValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(message='hello')")
self.serialize_round_trip(validator)
# Test with a compiled regex.
validator = RegexValidator(regex=re.compile(r'^\w+$', re.U))
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator(regex=re.compile('^\\\\w+$', 32))")
self.serialize_round_trip(validator)
# Test a string regex with flag
validator = RegexValidator(r'^[0-9]+$', flags=re.U)
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[0-9]+$', flags=32)")
self.serialize_round_trip(validator)
# Test message and code
validator = RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.RegexValidator('^[-a-zA-Z0-9_]+$', 'Invalid', 'invalid')")
self.serialize_round_trip(validator)
# Test with a subclass.
validator = EmailValidator(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "django.core.validators.EmailValidator(message='hello')")
self.serialize_round_trip(validator)
validator = deconstructible(path="migrations.test_writer.EmailValidator")(EmailValidator)(message="hello")
string = MigrationWriter.serialize(validator)[0]
self.assertEqual(string, "migrations.test_writer.EmailValidator(message='hello')")
validator = deconstructible(path="custom.EmailValidator")(EmailValidator)(message="hello")
with six.assertRaisesRegex(self, ImportError, "No module named '?custom'?"):
MigrationWriter.serialize(validator)
validator = deconstructible(path="django.core.validators.EmailValidator2")(EmailValidator)(message="hello")
with self.assertRaisesMessage(ValueError, "Could not find object EmailValidator2 in django.core.validators."):
MigrationWriter.serialize(validator)
def test_serialize_empty_nonempty_tuple(self):
"""
Ticket #22679: makemigrations generates invalid code for (an empty
tuple) default_permissions = ()
"""
empty_tuple = ()
one_item_tuple = ('a',)
many_items_tuple = ('a', 'b', 'c')
self.assertSerializedEqual(empty_tuple)
self.assertSerializedEqual(one_item_tuple)
self.assertSerializedEqual(many_items_tuple)
@unittest.skipUnless(six.PY2, "Only applies on Python 2")
def test_serialize_direct_function_reference(self):
"""
Ticket #22436: You cannot use a function straight from its body
(e.g. define the method and use it in the same body)
"""
with self.assertRaises(ValueError):
self.serialize_round_trip(TestModel1.thing)
def test_serialize_local_function_reference(self):
"""
Neither py2 or py3 can serialize a reference in a local scope.
"""
class TestModel2(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with self.assertRaises(ValueError):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_local_function_reference_message(self):
"""
Make sure user is seeing which module/function is the issue
"""
class TestModel2(object):
def upload_to(self):
return "somewhere dynamic"
thing = models.FileField(upload_to=upload_to)
with six.assertRaisesRegex(self, ValueError,
'^Could not find function upload_to in migrations.test_writer'):
self.serialize_round_trip(TestModel2.thing)
def test_serialize_managers(self):
self.assertSerializedEqual(models.Manager())
self.assertSerializedResultEqual(
FoodQuerySet.as_manager(),
('migrations.models.FoodQuerySet.as_manager()', {'import migrations.models'})
)
self.assertSerializedEqual(FoodManager('a', 'b'))
self.assertSerializedEqual(FoodManager('x', 'y', c=3, d=4))
def test_serialize_frozensets(self):
self.assertSerializedEqual(frozenset())
self.assertSerializedEqual(frozenset("let it go"))
def test_serialize_timedelta(self):
self.assertSerializedEqual(datetime.timedelta())
self.assertSerializedEqual(datetime.timedelta(minutes=42))
def test_simple_migration(self):
"""
Tests serializing a simple migration.
"""
fields = {
'charfield': models.DateTimeField(default=datetime.datetime.utcnow),
'datetimefield': models.DateTimeField(default=datetime.datetime.utcnow),
}
options = {
'verbose_name': 'My model',
'verbose_name_plural': 'My models',
}
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
migrations.CreateModel("MyModel", tuple(fields.items()), options, (models.Model,)),
migrations.CreateModel("MyModel2", tuple(fields.items()), bases=(models.Model,)),
migrations.CreateModel(name="MyModel3", fields=tuple(fields.items()), options=options, bases=(models.Model,)),
migrations.DeleteModel("MyModel"),
migrations.AddField("OtherModel", "datetimefield", fields["datetimefield"]),
],
"dependencies": [("testapp", "some_other_one")],
})
writer = MigrationWriter(migration)
output = writer.as_string()
# It should NOT be unicode.
self.assertIsInstance(output, six.binary_type, "Migration as_string returned unicode")
# We don't test the output formatting - that's too fragile.
# Just make sure it runs for now, and that things look alright.
result = self.safe_exec(output)
self.assertIn("Migration", result)
# In order to preserve compatibility with Python 3.2 unicode literals
# prefix shouldn't be added to strings.
tokens = tokenize.generate_tokens(six.StringIO(str(output)).readline)
for token_type, token_source, (srow, scol), __, line in tokens:
if token_type == tokenize.STRING:
self.assertFalse(
token_source.startswith('u'),
"Unicode literal prefix found at %d:%d: %r" % (
srow, scol, line.strip()
)
)
# Silence warning on Python 2: Not importing directory
# 'tests/migrations/migrations_test_apps/without_init_file/migrations':
# missing __init__.py
@ignore_warnings(category=ImportWarning)
def test_migration_path(self):
test_apps = [
'migrations.migrations_test_apps.normal',
'migrations.migrations_test_apps.with_package_model',
'migrations.migrations_test_apps.without_init_file',
]
base_dir = os.path.dirname(os.path.dirname(upath(__file__)))
for app in test_apps:
with self.modify_settings(INSTALLED_APPS={'append': app}):
migration = migrations.Migration('0001_initial', app.split('.')[-1])
expected_path = os.path.join(base_dir, *(app.split('.') + ['migrations', '0001_initial.py']))
writer = MigrationWriter(migration)
self.assertEqual(writer.path, expected_path)
def test_custom_operation(self):
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
custom_migration_operations.operations.TestOperation(),
custom_migration_operations.operations.CreateModel(),
migrations.CreateModel("MyModel", (), {}, (models.Model,)),
custom_migration_operations.more_operations.TestOperation()
],
"dependencies": []
})
writer = MigrationWriter(migration)
output = writer.as_string()
result = self.safe_exec(output)
self.assertIn("custom_migration_operations", result)
self.assertNotEqual(
result['custom_migration_operations'].operations.TestOperation,
result['custom_migration_operations'].more_operations.TestOperation
)
def test_sorted_imports(self):
"""
#24155 - Tests ordering of imports.
"""
migration = type(str("Migration"), (migrations.Migration,), {
"operations": [
migrations.AddField("mymodel", "myfield", models.DateTimeField(
default=datetime.datetime(2012, 1, 1, 1, 1, tzinfo=utc),
)),
]
})
writer = MigrationWriter(migration)
output = writer.as_string().decode('utf-8')
self.assertIn(
"import datetime\n"
"from django.db import migrations, models\n"
"from django.utils.timezone import utc\n",
output
)
def test_deconstruct_class_arguments(self):
# Yes, it doesn't make sense to use a class as a default for a
# CharField. It does make sense for custom fields though, for example
# an enumfield that takes the enum class as an argument.
class DeconstructableInstances(object):
def deconstruct(self):
return ('DeconstructableInstances', [], {})
string = MigrationWriter.serialize(models.CharField(default=DeconstructableInstances))[0]
self.assertEqual(string, "models.CharField(default=migrations.test_writer.DeconstructableInstances)")
|
{
"content_hash": "4005d38dcf82a851a6c0fddd099c3c7b",
"timestamp": "",
"source": "github",
"line_count": 523,
"max_line_length": 126,
"avg_line_length": 42.7112810707457,
"alnum_prop": 0.6256155430208613,
"repo_name": "marqueedev/django",
"id": "46531df791d762178afb6a01cc743d4ca1e51a9c",
"size": "22366",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/migrations/test_writer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43000"
},
{
"name": "HTML",
"bytes": "171155"
},
{
"name": "JavaScript",
"bytes": "105066"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "10710867"
},
{
"name": "Shell",
"bytes": "3056"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from sqlalchemy_utils import IPAddressType, ChoiceType
from app import db
ROLE_USER = 0
ROLE_ADMIN = 1
projects_users = db.Table(
'project_user',
db.Column('user_id', db.Integer, db.ForeignKey('user.id')),
db.Column('project_id', db.Integer, db.ForeignKey('project.id')),
db.Column('is_owner', db.Boolean, default=False)
)
projects_deploys = db.Table(
'project_deploy',
db.Column('deploy_id', db.Integer, db.ForeignKey('deploy.id')),
db.Column('project_id', db.Integer, db.ForeignKey('project.id'))
)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
role = db.Column(db.SmallInteger, default=ROLE_USER)
email = db.Column(db.String(120), index=True, unique=True)
avatar_url = db.Column(db.String(255), unique=False)
projects = db.relationship(
'Project',
secondary=projects_users,
primaryjoin=(projects_users.c.user_id == id),
secondaryjoin=(projects_users.c.project_id == id),
backref=db.backref('projects_users', lazy='dynamic'),
lazy='dynamic'
)
enabled = db.Column(db.Boolean, default=True)
def __init__(self, role, email, avatar_url, enabled):
self.role = role
self.email = email
self.avatar_url = avatar_url
self.enabled = enabled
def is_authenticated(self):
return True
def is_active(self):
return self.enabled
def is_anonymous(self):
return False
def get_id(self):
return unicode(self.id)
def __repr__(self):
return '<User %r>' % self.email
class Project(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
title = db.Column(db.String(255), unique=True)
branch = db.Column(db.String(255), unique=False)
repo_url = db.Column(db.String(255), unique=True)
deploy_at = db.Column(db.DateTime)
created_at = db.Column(db.DateTime)
updated_at = db.Column(db.DateTime)
servers = db.relationship(
'Server',
backref='project',
lazy='dynamic'
)
def __init__(self, user_id, title, branch, repo_url, deploy_at=None):
self.user_id = user_id
self.title = title
self.branch = branch
self.repo_url = repo_url
self.deploy_at = deploy_at
if self.created_at is None:
self.created_at = datetime.utcnow()
self.updated_at = datetime.utcnow()
def __repr__(self):
return '<Project %r>' % self.title
class Deploy(db.Model):
id = db.Column(db.Integer, primary_key=True)
project_id = db.Column(db.Integer, db.ForeignKey('project.id'))
number = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
projects = db.relationship(
'Project',
secondary=projects_deploys,
primaryjoin=(projects_deploys.c.deploy_id == id),
secondaryjoin=(projects_deploys.c.project_id == id),
backref=db.backref('projects_deploys', lazy='dynamic'),
lazy='dynamic'
)
def __init__(self, project_id, number, timestamp):
self.project_id = project_id
self.number = number
self.timestamp = timestamp
def __repr__(self):
return '<Deploy %r>' % (self.number)
class Server(db.Model):
ROLES = [
(u'demo', u'Demo'),
(u'staging', u'Staging'),
(u'production', u'Production')
]
id = db.Column(db.Integer, primary_key=True)
role = db.Column(ChoiceType(ROLES))
name = db.Column(db.String(255), unique=True)
provider = db.Column(db.String(255))
ssh_login = db.Column(db.String(255), default="developer")
ip_address = db.Column(IPAddressType)
project_id = db.Column(db.Integer, db.ForeignKey('project.id'))
def __init__(self, role, name, provider, ssh_login, ip_address, project_id):
self.role = role
self.name = name
self.provider = provider
self.ssh_login = ssh_login
self.ip_address = ip_address
self.project_id = project_id
def __repr__(self):
return '<Server %r>' % (self.name)
|
{
"content_hash": "a22d16fb081dfd06fc6ae25acd94f637",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 80,
"avg_line_length": 30.255474452554743,
"alnum_prop": 0.6185765983112184,
"repo_name": "iniweb/deployCD",
"id": "f4776de8f603fbe163e6ca089ad1cab334083f07",
"size": "4145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6205"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "20361"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('zerver', '0139_fill_last_message_id_in_subscription_logs'),
]
operations = [
migrations.AddField(
model_name='realm',
name='send_welcome_emails',
field=models.BooleanField(default=True),
),
]
|
{
"content_hash": "264f3d3a3205ce89a6f34132301c054f",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 69,
"avg_line_length": 23.1875,
"alnum_prop": 0.5956873315363881,
"repo_name": "showell/zulip",
"id": "24276818ac95a553094e73129ee20441eec66dda",
"size": "421",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "zerver/migrations/0140_realm_send_welcome_emails.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "433235"
},
{
"name": "Dockerfile",
"bytes": "2941"
},
{
"name": "Emacs Lisp",
"bytes": "157"
},
{
"name": "HTML",
"bytes": "634357"
},
{
"name": "Handlebars",
"bytes": "235334"
},
{
"name": "JavaScript",
"bytes": "3341135"
},
{
"name": "Perl",
"bytes": "8594"
},
{
"name": "Puppet",
"bytes": "79720"
},
{
"name": "Python",
"bytes": "8120030"
},
{
"name": "Ruby",
"bytes": "8480"
},
{
"name": "Shell",
"bytes": "133132"
},
{
"name": "TypeScript",
"bytes": "20603"
}
],
"symlink_target": ""
}
|
from sys import maxsize
class Contact:
def __init__(self, first_name=None, last_name=None, address=None, home_phone=None, mobile_phone=None,
work_phone=None, secondary_phone=None, email=None, email2=None, email3=None, contact_id=None,
all_phones_from_home_page=None, all_emails_from_home_page=None):
self.first_name = first_name
self.last_name = last_name
self.address = address
self.home_phone = home_phone
self.mobile_phone = mobile_phone
self.work_phone = work_phone
self.secondary_phone = secondary_phone
self.email = email
self.email2 = email2
self.email3 = email3
self.contact_id = contact_id
self.all_phones_from_home_page = all_phones_from_home_page
self.all_emails_from_home_page = all_emails_from_home_page
def __repr__(self):
return "%s:%s:%s:%s:%s:%s:%s:%s:%s:%s:%s" % (
self.contact_id, self.first_name, self.last_name, self.mobile_phone, self.home_phone, self.work_phone,
self.secondary_phone, self.address, self.email, self.email2, self.email3)
def __eq__(self, other):
return (self.contact_id is None or other.contact_id is None or self.contact_id == other.contact_id) and (
self.first_name == other.first_name) and (self.last_name == other.last_name)
def id_or_max(self):
if self.contact_id:
return int(self.contact_id)
else:
return maxsize
|
{
"content_hash": "c8f06c3f7aca1504de24e263bcd8abc1",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 114,
"avg_line_length": 43.114285714285714,
"alnum_prop": 0.6182902584493042,
"repo_name": "tkapriyan/python_training",
"id": "05ee731d018336afd779a8dc28a78112231ebe0b",
"size": "1533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "model/contact.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "29309"
}
],
"symlink_target": ""
}
|
"""房间进入接口"""
import clr, sys
from action import *
from lang import Lang
from cardAILogic import CardAILogic
clr.AddReference('ZyGames.Framework.Game')
clr.AddReference('ZyGames.Doudizhu.Lang')
clr.AddReference('ZyGames.Doudizhu.Model')
clr.AddReference('ZyGames.Doudizhu.Bll')
from System.Collections.Generic import *
from ZyGames.Framework.Game.Service import *
from ZyGames.Doudizhu.Lang import *
from ZyGames.Doudizhu.Model import *
from ZyGames.Doudizhu.Bll import *
from ZyGames.Doudizhu.Bll.Logic import *
from ZyGames.Doudizhu.Bll.Base import *
from ZyGames.Doudizhu.Bll.Com.Chat import *
class UrlParam(HttpParam):
def __init__(self):
HttpParam.__init__(self)
self.RoomId = 0
self.Op = 1
class ActionResult(DataResult):
def __init__(self):
DataResult.__init__(self)
self.GameCoin = 0
def getUrlElement(httpGet, parent):
urlParam = UrlParam();
if httpGet.Contains("RoomId"):
urlParam.RoomId = httpGet.GetIntValue("RoomId")
urlParam.Op = httpGet.GetIntValue("Op")
else:
urlParam.Result = False
return urlParam
def takeAction(urlParam, parent):
actionResult = ActionResult()
user = parent.Current.User;
gameRoom = GameRoom.Current
roomInfo = gameRoom.GetRoom(urlParam.RoomId)
if not roomInfo or not user:
parent.ErrorCode = Lang.getLang("ErrorCode")
parent.ErrorInfo = Lang.getLang("LoadError")
actionResult.Result = False
return actionResult
#if not user.RemoteAddress or user.RemoteAddress=='':
# parent.ErrorCode = Lang.getLang("ErrorCode")
# parent.ErrorInfo = Lang.getLang("St2001_ConnectError")
# actionResult.Result = False
# return actionResult
if urlParam.Op == 2:
#续局
GameRoom.Current.Exit(user)
else:
#每日赠送金豆
result = gameRoom.CheckDailyGiffCoin(user, roomInfo)
if result:
parent.ErrorCode = 3
parent.ErrorInfo = gameRoom.Tip( Lang.getLang("St2001_GiffCoin"), roomInfo.GiffCion)
pass
if user.GameCoin < roomInfo.MinGameCion:
parent.ErrorCode = 2
parent.ErrorInfo = gameRoom.Tip( Lang.getLang("St2001_CoinNotEnough"), user.GameCoin, roomInfo.MinGameCion)
actionResult.Result = False
return actionResult
table = GameRoom.Current.GetTableData(user)
actionResult.GameCoin = user.GameCoin
if table and table.IsStarting and user.Property.TableId > 0:
GameTable.Current.SyncNotifyAction(ActionIDDefine.Cst_Action2015, user, None, None)
return actionResult
else:
gameRoom.Enter(user, roomInfo)
return actionResult
def buildPacket(writer, urlParam, actionResult):
writer.PushIntoStack(actionResult.GameCoin)
return True
|
{
"content_hash": "4874d01af390a120dba600a4c83750d0",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 115,
"avg_line_length": 34.19047619047619,
"alnum_prop": 0.6685236768802229,
"repo_name": "wenhulove333/ScutServer",
"id": "7c9d357412a1e5e80583e31ecb48acfa45fea578",
"size": "2902",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Sample/Doudizhu/Server/src/ZyGames.Doudizhu.HostServer/bin/Debug/PyScript/Action/Action2001.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "150472"
},
{
"name": "ActionScript",
"bytes": "339184"
},
{
"name": "Batchfile",
"bytes": "60466"
},
{
"name": "C",
"bytes": "3976261"
},
{
"name": "C#",
"bytes": "9481083"
},
{
"name": "C++",
"bytes": "11640198"
},
{
"name": "CMake",
"bytes": "489"
},
{
"name": "CSS",
"bytes": "13478"
},
{
"name": "Groff",
"bytes": "16179"
},
{
"name": "HTML",
"bytes": "283997"
},
{
"name": "Inno Setup",
"bytes": "28931"
},
{
"name": "Java",
"bytes": "214263"
},
{
"name": "JavaScript",
"bytes": "2809"
},
{
"name": "Lua",
"bytes": "4667522"
},
{
"name": "Makefile",
"bytes": "166623"
},
{
"name": "Objective-C",
"bytes": "401654"
},
{
"name": "Objective-C++",
"bytes": "355347"
},
{
"name": "Python",
"bytes": "1633926"
},
{
"name": "Shell",
"bytes": "101770"
},
{
"name": "Visual Basic",
"bytes": "18764"
}
],
"symlink_target": ""
}
|
"""Blink IDL Intermediate Representation (IR) classes.
Classes are primarily constructors, which build an IdlDefinitions object
(and various contained objects) from an AST (produced by blink_idl_parser).
This is in two steps:
* Constructors walk the AST, creating objects.
* Typedef resolution.
Typedefs are all resolved here, and not stored in IR.
Typedef resolution uses some auxiliary classes and OOP techniques to make this
a generic call, via the resolve_typedefs() method.
Class hierarchy (mostly containment, '<' for inheritance):
IdlDefinitions
IdlCallbackFunction < TypedObject
IdlEnum :: FIXME: remove, just use a dict for enums
IdlInterface
IdlAttribute < TypedObject
IdlConstant < TypedObject
IdlLiteral
IdlOperation < TypedObject
IdlArgument < TypedObject
IdlStringifier
IdlException < IdlInterface
(same contents as IdlInterface)
TypedObject :: mixin for typedef resolution
Design doc: http://www.chromium.org/developers/design-documents/idl-compiler
"""
import abc
from idl_types import IdlType, IdlUnionType, IdlArrayType, IdlSequenceType, IdlNullableType
SPECIAL_KEYWORD_LIST = ['GETTER', 'SETTER', 'DELETER']
STANDARD_TYPEDEFS = {
# http://www.w3.org/TR/WebIDL/#common-DOMTimeStamp
'DOMTimeStamp': 'unsigned long long',
}
################################################################################
# TypedObject (mixin for typedef resolution)
################################################################################
class TypedObject(object):
"""Object with a type, such as an Attribute or Operation (return value).
The type can be an actual type, or can be a typedef, which must be resolved
before passing data to the code generator.
"""
__metaclass__ = abc.ABCMeta
idl_type = None
def resolve_typedefs(self, typedefs):
"""Resolve typedefs to actual types in the object."""
# Constructors don't have their own return type, because it's the
# interface itself.
if not self.idl_type:
return
# Need to re-assign self.idl_type, not just mutate idl_type,
# since type(idl_type) may change.
self.idl_type = self.idl_type.resolve_typedefs(typedefs)
################################################################################
# Definitions (main container class)
################################################################################
class IdlDefinitions(object):
def __init__(self, idl_name, node):
"""Args: node: AST root node, class == 'File'"""
self.callback_functions = {}
self.dictionaries = {}
self.enumerations = {}
self.interfaces = {}
self.idl_name = idl_name
node_class = node.GetClass()
if node_class != 'File':
raise ValueError('Unrecognized node class: %s' % node_class)
typedefs = dict((typedef_name, IdlType(type_name))
for typedef_name, type_name in
STANDARD_TYPEDEFS.iteritems())
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Interface':
interface = IdlInterface(idl_name, child)
self.interfaces[interface.name] = interface
elif child_class == 'Exception':
exception = IdlException(idl_name, child)
# For simplicity, treat exceptions as interfaces
self.interfaces[exception.name] = exception
elif child_class == 'Typedef':
type_name = child.GetName()
typedefs[type_name] = typedef_node_to_type(child)
elif child_class == 'Enum':
enumeration = IdlEnum(idl_name, child)
self.enumerations[enumeration.name] = enumeration
elif child_class == 'Callback':
callback_function = IdlCallbackFunction(idl_name, child)
self.callback_functions[callback_function.name] = callback_function
elif child_class == 'Implements':
# Implements is handled at the interface merging step
pass
elif child_class == 'Dictionary':
dictionary = IdlDictionary(idl_name, child)
self.dictionaries[dictionary.name] = dictionary
else:
raise ValueError('Unrecognized node class: %s' % child_class)
# Typedefs are not stored in IR:
# Resolve typedefs with the actual types and then discard the Typedefs.
# http://www.w3.org/TR/WebIDL/#idl-typedefs
self.resolve_typedefs(typedefs)
def resolve_typedefs(self, typedefs):
for callback_function in self.callback_functions.itervalues():
callback_function.resolve_typedefs(typedefs)
for interface in self.interfaces.itervalues():
interface.resolve_typedefs(typedefs)
def update(self, other):
"""Update with additional IdlDefinitions."""
for interface_name, new_interface in other.interfaces.iteritems():
if not new_interface.is_partial:
# Add as new interface
self.interfaces[interface_name] = new_interface
continue
# Merge partial to existing interface
try:
self.interfaces[interface_name].merge(new_interface)
except KeyError:
raise Exception('Tried to merge partial interface for {0}, '
'but no existing interface by that name'
.format(interface_name))
# Merge callbacks and enumerations
self.enumerations.update(other.enumerations)
self.callback_functions.update(other.callback_functions)
################################################################################
# Callback Functions
################################################################################
class IdlCallbackFunction(TypedObject):
def __init__(self, idl_name, node):
children = node.GetChildren()
num_children = len(children)
if num_children != 2:
raise ValueError('Expected 2 children, got %s' % num_children)
type_node, arguments_node = children
arguments_node_class = arguments_node.GetClass()
if arguments_node_class != 'Arguments':
raise ValueError('Expected Arguments node, got %s' % arguments_node_class)
self.idl_name = idl_name
self.name = node.GetName()
self.idl_type = type_node_to_type(type_node)
self.arguments = arguments_node_to_arguments(idl_name, arguments_node)
def resolve_typedefs(self, typedefs):
TypedObject.resolve_typedefs(self, typedefs)
for argument in self.arguments:
argument.resolve_typedefs(typedefs)
################################################################################
# Dictionary
################################################################################
class IdlDictionary(object):
def __init__(self, idl_name, node):
self.extended_attributes = {}
self.is_partial = node.GetProperty('Partial') or False
self.idl_name = idl_name
self.name = node.GetName()
self.members = []
self.parent = None
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Key':
self.members.append(IdlDictionaryMember(idl_name, child))
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
class IdlDictionaryMember(object):
def __init__(self, idl_name, node):
self.default_value = None
self.extended_attributes = {}
self.idl_type = None
self.idl_name = idl_name
self.name = node.GetName()
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = (
ext_attributes_node_to_extended_attributes(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Enumerations
################################################################################
class IdlEnum(object):
# FIXME: remove, just treat enums as a dictionary
def __init__(self, idl_name, node):
self.idl_name = idl_name
self.name = node.GetName()
self.values = []
for child in node.GetChildren():
self.values.append(child.GetName())
################################################################################
# Interfaces and Exceptions
################################################################################
class IdlInterface(object):
def __init__(self, idl_name, node=None):
self.attributes = []
self.constants = []
self.constructors = []
self.custom_constructors = []
self.extended_attributes = {}
self.operations = []
self.parent = None
self.stringifier = None
if not node: # Early exit for IdlException.__init__
return
self.is_callback = node.GetProperty('CALLBACK') or False
self.is_exception = False
# FIXME: uppercase 'Partial' => 'PARTIAL' in base IDL parser
self.is_partial = node.GetProperty('Partial') or False
self.idl_name = idl_name
self.name = node.GetName()
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
self.attributes.append(IdlAttribute(idl_name, child))
elif child_class == 'Const':
self.constants.append(IdlConstant(idl_name, child))
elif child_class == 'ExtAttributes':
extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
self.constructors, self.custom_constructors = (
extended_attributes_to_constructors(idl_name, extended_attributes))
clear_constructor_attributes(extended_attributes)
self.extended_attributes = extended_attributes
elif child_class == 'Operation':
self.operations.append(IdlOperation(idl_name, child))
elif child_class == 'Inherit':
self.parent = child.GetName()
elif child_class == 'Stringifier':
self.stringifier = IdlStringifier(idl_name, child)
self.process_stringifier()
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def resolve_typedefs(self, typedefs):
for attribute in self.attributes:
attribute.resolve_typedefs(typedefs)
for constant in self.constants:
constant.resolve_typedefs(typedefs)
for constructor in self.constructors:
constructor.resolve_typedefs(typedefs)
for custom_constructor in self.custom_constructors:
custom_constructor.resolve_typedefs(typedefs)
for operation in self.operations:
operation.resolve_typedefs(typedefs)
def process_stringifier(self):
"""Add the stringifier's attribute or named operation child, if it has
one, as a regular attribute/operation of this interface."""
if self.stringifier.attribute:
self.attributes.append(self.stringifier.attribute)
elif self.stringifier.operation:
self.operations.append(self.stringifier.operation)
def merge(self, other):
"""Merge in another interface's members (e.g., partial interface)"""
self.attributes.extend(other.attributes)
self.constants.extend(other.constants)
self.operations.extend(other.operations)
class IdlException(IdlInterface):
# Properly exceptions and interfaces are distinct, and thus should inherit a
# common base class (say, "IdlExceptionOrInterface").
# However, there is only one exception (DOMException), and new exceptions
# are not expected. Thus it is easier to implement exceptions as a
# restricted subclass of interfaces.
# http://www.w3.org/TR/WebIDL/#idl-exceptions
def __init__(self, idl_name, node):
# Exceptions are similar to Interfaces, but simpler
IdlInterface.__init__(self, idl_name)
self.is_callback = False
self.is_exception = True
self.is_partial = False
self.idl_name = idl_name
self.name = node.GetName()
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Attribute':
attribute = IdlAttribute(idl_name, child)
self.attributes.append(attribute)
elif child_class == 'Const':
self.constants.append(IdlConstant(idl_name, child))
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'ExceptionOperation':
self.operations.append(IdlOperation.from_exception_operation_node(idl_name, child))
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Attributes
################################################################################
class IdlAttribute(TypedObject):
def __init__(self, idl_name, node):
self.is_read_only = node.GetProperty('READONLY') or False
self.is_static = node.GetProperty('STATIC') or False
self.idl_name = idl_name
self.name = node.GetName()
# Defaults, overridden below
self.idl_type = None
self.extended_attributes = {}
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
################################################################################
# Constants
################################################################################
class IdlConstant(TypedObject):
def __init__(self, idl_name, node):
children = node.GetChildren()
num_children = len(children)
if num_children < 2 or num_children > 3:
raise ValueError('Expected 2 or 3 children, got %s' % num_children)
type_node = children[0]
value_node = children[1]
value_node_class = value_node.GetClass()
if value_node_class != 'Value':
raise ValueError('Expected Value node, got %s' % value_node_class)
self.idl_name = idl_name
self.name = node.GetName()
# ConstType is more limited than Type, so subtree is smaller and
# we don't use the full type_node_to_type function.
self.idl_type = type_node_inner_to_type(type_node)
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
if value_node.GetProperty('TYPE') == 'float':
self.value = value_node.GetProperty('VALUE')
else:
self.value = value_node.GetName()
if num_children == 3:
ext_attributes_node = children[2]
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, ext_attributes_node)
else:
self.extended_attributes = {}
################################################################################
# Literals
################################################################################
class IdlLiteral(object):
def __init__(self, idl_type, value):
self.idl_type = idl_type
self.value = value
self.is_null = False
def __str__(self):
if self.idl_type == 'DOMString':
return 'String("%s")' % self.value
if self.idl_type == 'integer':
return '%d' % self.value
if self.idl_type == 'float':
return '%g' % self.value
if self.idl_type == 'boolean':
return 'true' if self.value else 'false'
raise ValueError('Unsupported literal type: %s' % self.idl_type)
class IdlLiteralNull(IdlLiteral):
def __init__(self):
self.idl_type = 'NULL'
self.value = None
self.is_null = True
def __str__(self):
return 'nullptr'
def default_node_to_idl_literal(node):
# FIXME: This code is unnecessarily complicated due to the rather
# inconsistent way the upstream IDL parser outputs default values.
# http://crbug.com/374178
idl_type = node.GetProperty('TYPE')
if idl_type == 'DOMString':
value = node.GetProperty('NAME')
if '"' in value or '\\' in value:
raise ValueError('Unsupported string value: %r' % value)
return IdlLiteral(idl_type, value)
if idl_type == 'integer':
return IdlLiteral(idl_type, int(node.GetProperty('NAME'), base=0))
if idl_type == 'float':
return IdlLiteral(idl_type, float(node.GetProperty('VALUE')))
if idl_type == 'boolean':
return IdlLiteral(idl_type, node.GetProperty('VALUE'))
if idl_type == 'NULL':
return IdlLiteralNull()
raise ValueError('Unrecognized default value type: %s' % idl_type)
################################################################################
# Operations
################################################################################
class IdlOperation(TypedObject):
def __init__(self, idl_name, node=None):
self.arguments = []
self.extended_attributes = {}
self.specials = []
self.is_constructor = False
if not node:
self.is_static = False
return
self.idl_name = idl_name
self.name = node.GetName() # FIXME: should just be: or ''
# FIXME: AST should use None internally
if self.name == '_unnamed_':
self.name = ''
self.is_static = node.GetProperty('STATIC') or False
property_dictionary = node.GetProperties()
for special_keyword in SPECIAL_KEYWORD_LIST:
if special_keyword in property_dictionary:
self.specials.append(special_keyword.lower())
self.idl_type = None
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Arguments':
self.arguments = arguments_node_to_arguments(idl_name, child)
elif child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
@classmethod
def from_exception_operation_node(cls, idl_name, node):
# Needed to handle one case in DOMException.idl:
# // Override in a Mozilla compatible format
# [NotEnumerable] DOMString toString();
# FIXME: can we remove this? replace with a stringifier?
operation = cls(idl_name)
operation.name = node.GetName()
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('ExceptionOperation node with %s children, expected 1 or 2' % len(children))
type_node = children[0]
operation.idl_type = type_node_to_type(type_node)
if len(children) > 1:
ext_attributes_node = children[1]
operation.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, ext_attributes_node)
return operation
@classmethod
def constructor_from_arguments_node(cls, name, idl_name, arguments_node):
constructor = cls(idl_name)
constructor.name = name
constructor.arguments = arguments_node_to_arguments(idl_name, arguments_node)
constructor.is_constructor = True
return constructor
def resolve_typedefs(self, typedefs):
TypedObject.resolve_typedefs(self, typedefs)
for argument in self.arguments:
argument.resolve_typedefs(typedefs)
################################################################################
# Arguments
################################################################################
class IdlArgument(TypedObject):
def __init__(self, idl_name, node):
self.extended_attributes = {}
self.idl_type = None
self.is_optional = node.GetProperty('OPTIONAL') # syntax: (optional T)
self.is_variadic = False # syntax: (T...)
self.idl_name = idl_name
self.name = node.GetName()
self.default_value = None
children = node.GetChildren()
for child in children:
child_class = child.GetClass()
if child_class == 'Type':
self.idl_type = type_node_to_type(child)
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
elif child_class == 'Argument':
child_name = child.GetName()
if child_name != '...':
raise ValueError('Unrecognized Argument node; expected "...", got "%s"' % child_name)
self.is_variadic = child.GetProperty('ELLIPSIS') or False
elif child_class == 'Default':
self.default_value = default_node_to_idl_literal(child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
def arguments_node_to_arguments(idl_name, node):
# [Constructor] and [CustomConstructor] without arguments (the bare form)
# have None instead of an arguments node, but have the same meaning as using
# an empty argument list, [Constructor()], so special-case this.
# http://www.w3.org/TR/WebIDL/#Constructor
if node is None:
return []
return [IdlArgument(idl_name, argument_node)
for argument_node in node.GetChildren()]
################################################################################
# Stringifiers
################################################################################
class IdlStringifier(object):
def __init__(self, idl_name, node):
self.attribute = None
self.operation = None
self.extended_attributes = {}
self.idl_name = idl_name
for child in node.GetChildren():
child_class = child.GetClass()
if child_class == 'Attribute':
self.attribute = IdlAttribute(idl_name, child)
elif child_class == 'Operation':
operation = IdlOperation(idl_name, child)
if operation.name:
self.operation = operation
elif child_class == 'ExtAttributes':
self.extended_attributes = ext_attributes_node_to_extended_attributes(idl_name, child)
else:
raise ValueError('Unrecognized node class: %s' % child_class)
# Copy the stringifier's extended attributes (such as [Unforgable]) onto
# the underlying attribute or operation, if there is one.
if self.attribute or self.operation:
(self.attribute or self.operation).extended_attributes.update(
self.extended_attributes)
################################################################################
# Extended attributes
################################################################################
def ext_attributes_node_to_extended_attributes(idl_name, node):
"""
Returns:
Dictionary of {ExtAttributeName: ExtAttributeValue}.
Value is usually a string, with these exceptions:
Constructors: value is a list of Arguments nodes, corresponding to
possible signatures of the constructor.
CustomConstructors: value is a list of Arguments nodes, corresponding to
possible signatures of the custom constructor.
NamedConstructor: value is a Call node, corresponding to the single
signature of the named constructor.
SetWrapperReferenceTo: value is an Arguments node.
"""
# Primarily just make a dictionary from the children.
# The only complexity is handling various types of constructors:
# Constructors and Custom Constructors can have duplicate entries due to
# overloading, and thus are stored in temporary lists.
# However, Named Constructors cannot be overloaded, and thus do not have
# a list.
# FIXME: move Constructor logic into separate function, instead of modifying
# extended attributes in-place.
constructors = []
custom_constructors = []
extended_attributes = {}
def child_node(extended_attribute_node):
children = extended_attribute_node.GetChildren()
if not children:
return None
if len(children) > 1:
raise ValueError('ExtAttributes node with %s children, expected at most 1' % len(children))
return children[0]
extended_attribute_node_list = node.GetChildren()
for extended_attribute_node in extended_attribute_node_list:
name = extended_attribute_node.GetName()
child = child_node(extended_attribute_node)
child_class = child and child.GetClass()
if name == 'Constructor':
if child_class and child_class != 'Arguments':
raise ValueError('Constructor only supports Arguments as child, but has child of class: %s' % child_class)
constructors.append(child)
elif name == 'CustomConstructor':
if child_class and child_class != 'Arguments':
raise ValueError('[CustomConstructor] only supports Arguments as child, but has child of class: %s' % child_class)
custom_constructors.append(child)
elif name == 'NamedConstructor':
if child_class and child_class != 'Call':
raise ValueError('[NamedConstructor] only supports Call as child, but has child of class: %s' % child_class)
extended_attributes[name] = child
elif name == 'SetWrapperReferenceTo':
if not child:
raise ValueError('[SetWrapperReferenceTo] requires a child, but has none.')
if child_class != 'Arguments':
raise ValueError('[SetWrapperReferenceTo] only supports Arguments as child, but has child of class: %s' % child_class)
extended_attributes[name] = arguments_node_to_arguments(idl_name, child)
elif child:
raise ValueError('ExtAttributes node with unexpected children: %s' % name)
else:
value = extended_attribute_node.GetProperty('VALUE')
extended_attributes[name] = value
# Store constructors and custom constructors in special list attributes,
# which are deleted later. Note plural in key.
if constructors:
extended_attributes['Constructors'] = constructors
if custom_constructors:
extended_attributes['CustomConstructors'] = custom_constructors
return extended_attributes
def extended_attributes_to_constructors(idl_name, extended_attributes):
"""Returns constructors and custom_constructors (lists of IdlOperations).
Auxiliary function for IdlInterface.__init__.
"""
constructor_list = extended_attributes.get('Constructors', [])
constructors = [
IdlOperation.constructor_from_arguments_node('Constructor', idl_name, arguments_node)
for arguments_node in constructor_list]
custom_constructor_list = extended_attributes.get('CustomConstructors', [])
custom_constructors = [
IdlOperation.constructor_from_arguments_node('CustomConstructor', idl_name, arguments_node)
for arguments_node in custom_constructor_list]
if 'NamedConstructor' in extended_attributes:
# FIXME: support overloaded named constructors, and make homogeneous
name = 'NamedConstructor'
call_node = extended_attributes['NamedConstructor']
extended_attributes['NamedConstructor'] = call_node.GetName()
children = call_node.GetChildren()
if len(children) != 1:
raise ValueError('NamedConstructor node expects 1 child, got %s.' % len(children))
arguments_node = children[0]
named_constructor = IdlOperation.constructor_from_arguments_node('NamedConstructor', idl_name, arguments_node)
# FIXME: should return named_constructor separately; appended for Perl
constructors.append(named_constructor)
return constructors, custom_constructors
def clear_constructor_attributes(extended_attributes):
# Deletes Constructor*s* (plural), sets Constructor (singular)
if 'Constructors' in extended_attributes:
del extended_attributes['Constructors']
extended_attributes['Constructor'] = None
if 'CustomConstructors' in extended_attributes:
del extended_attributes['CustomConstructors']
extended_attributes['CustomConstructor'] = None
################################################################################
# Types
################################################################################
def type_node_to_type(node):
children = node.GetChildren()
if len(children) < 1 or len(children) > 2:
raise ValueError('Type node expects 1 or 2 children (type + optional array []), got %s (multi-dimensional arrays are not supported).' % len(children))
base_type = type_node_inner_to_type(children[0])
if node.GetProperty('NULLABLE'):
base_type = IdlNullableType(base_type)
if len(children) == 2:
array_node = children[1]
array_node_class = array_node.GetClass()
if array_node_class != 'Array':
raise ValueError('Expected Array node as TypeSuffix, got %s node.' % array_node_class)
array_type = IdlArrayType(base_type)
if array_node.GetProperty('NULLABLE'):
return IdlNullableType(array_type)
return array_type
return base_type
def type_node_inner_to_type(node):
node_class = node.GetClass()
# Note Type*r*ef, not Typedef, meaning the type is an identifier, thus
# either a typedef shorthand (but not a Typedef declaration itself) or an
# interface type. We do not distinguish these, and just use the type name.
if node_class in ['PrimitiveType', 'Typeref']:
# unrestricted syntax: unrestricted double | unrestricted float
is_unrestricted = node.GetProperty('UNRESTRICTED') or False
return IdlType(node.GetName(), is_unrestricted=is_unrestricted)
elif node_class == 'Any':
return IdlType('any')
elif node_class == 'Sequence':
return sequence_node_to_type(node)
elif node_class == 'UnionType':
return union_type_node_to_idl_union_type(node)
raise ValueError('Unrecognized node class: %s' % node_class)
def sequence_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Sequence node expects exactly 1 child, got %s' % len(children))
sequence_child = children[0]
sequence_child_class = sequence_child.GetClass()
if sequence_child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % sequence_child_class)
element_type = type_node_to_type(sequence_child)
sequence_type = IdlSequenceType(element_type)
if node.GetProperty('NULLABLE'):
return IdlNullableType(sequence_type)
return sequence_type
def typedef_node_to_type(node):
children = node.GetChildren()
if len(children) != 1:
raise ValueError('Typedef node with %s children, expected 1' % len(children))
child = children[0]
child_class = child.GetClass()
if child_class != 'Type':
raise ValueError('Unrecognized node class: %s' % child_class)
return type_node_to_type(child)
def union_type_node_to_idl_union_type(node):
member_types = [type_node_to_type(member_type_node)
for member_type_node in node.GetChildren()]
return IdlUnionType(member_types)
|
{
"content_hash": "e67984db7cfd42c81f73bdb3d6ab17f4",
"timestamp": "",
"source": "github",
"line_count": 796,
"max_line_length": 158,
"avg_line_length": 41.89698492462311,
"alnum_prop": 0.5886056971514243,
"repo_name": "Fusion-Rom/android_external_chromium_org_third_party_WebKit",
"id": "ee7158ab663cde7578524829e76c38e02088f376",
"size": "34880",
"binary": false,
"copies": "11",
"ref": "refs/heads/lp5.1",
"path": "Source/bindings/scripts/idl_definitions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "14584"
},
{
"name": "Batchfile",
"bytes": "35"
},
{
"name": "C",
"bytes": "106925"
},
{
"name": "C++",
"bytes": "41915988"
},
{
"name": "CSS",
"bytes": "386919"
},
{
"name": "Groff",
"bytes": "26536"
},
{
"name": "HTML",
"bytes": "11501040"
},
{
"name": "Java",
"bytes": "66510"
},
{
"name": "JavaScript",
"bytes": "9328662"
},
{
"name": "Makefile",
"bytes": "99861997"
},
{
"name": "Objective-C",
"bytes": "48021"
},
{
"name": "Objective-C++",
"bytes": "377388"
},
{
"name": "PHP",
"bytes": "3941"
},
{
"name": "Perl",
"bytes": "490099"
},
{
"name": "Python",
"bytes": "3712782"
},
{
"name": "Ruby",
"bytes": "141818"
},
{
"name": "Shell",
"bytes": "8806"
},
{
"name": "Yacc",
"bytes": "64394"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.