text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from MafiaBot.MafiaItem import MafiaItem
from MafiaBot.MafiaAction import MafiaAction
class FakeBackgroundCheck(MafiaItem):
def __init__(self, name, receiveday=0):
super(FakeBackgroundCheck, self).__init__(name, receiveday)
self.type = MafiaItem.CHECK
self.fake = True
def ReceiveItemPM(self):
return 'You have received a background check! It is called '+self.name+'. You may use it during future nights to investigate another player\'s faction with the command !use '+self.name+' <target>.'
@staticmethod
def GetBaseName():
return 'check'
@staticmethod
def ItemDescription():
return 'Fake background checks pretend to provide a faction investigation to their owner. In reality, they always give a \'Town\' result.'
def HandleCommand(self, param, player, mb):
if self.requiredaction:
target = mb.GetPlayer(param)
if target is not None:
if not target.IsDead():
if target is player:
return 'You cannot investigate yourself!'
else:
mb.actionlist.append(MafiaAction(MafiaAction.CHECKFACTION, player, target, True, {'sanity': 'naive'}))
self.requiredaction = False
player.UpdateActions()
return True, 'You will investigate '+str(target)+' tonight.'
return False, 'Cannot find player '+param
return False, None
def BeginNightPhase(self, mb, player):
self.requiredaction = True
return 'Background Check: You may use your check '+self.name+' received on night '+str(self.receiveday)+' to investigate another player. To do so, use !use '+self.name+' <target>.'
|
{
"content_hash": "6ecea4dfd96a67c8bd3c977402837f80",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 205,
"avg_line_length": 44.6,
"alnum_prop": 0.6266816143497758,
"repo_name": "LLCoolDave/MafiaBot",
"id": "8c4a793601b5af35ee9bdc170a7cdae4c730c0c4",
"size": "1784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MafiaBot/Items/FakeBackgroundCheck.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "158720"
}
],
"symlink_target": ""
}
|
"""Code for creating a dataset out of a NumPy array."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.util import nest
def init_var_from_numpy(input_var, numpy_input, session):
"""Initialize `input_var` to `numpy_input` using `session` in graph mode."""
with ops.init_scope():
if context.executing_eagerly():
input_var.assign(numpy_input)
return
assert session is not None
session.run(input_var.initializer)
start_placeholder = array_ops.placeholder(dtypes.int64, ())
end_placeholder = array_ops.placeholder(dtypes.int64, ())
slice_placeholder = array_ops.placeholder(input_var.dtype)
assign_slice_op = input_var[start_placeholder:end_placeholder].assign(
slice_placeholder)
# If each batch element is > 64 MB, then we copy each batch element
# individually. Otherwise, the slices will be < 128 MB. There might be
# padding which might mean that the slices are 128 MB even if the size of
# the tensor allocated is less than 128 MB. This formula gives slices with
# size: ceil(64 MB / byte size per batch element) bytes. Using ceil()
# guarantees we get a number >= 1.
# Calculate the size of each batch element.
byte_size_per_batch_element = (
np.prod(numpy_input.shape[1:]) * input_var.dtype.size)
# Calculate number of elements we want to copy per slice.
batch_size_per_slice = int(
np.ceil((64 << 20) / byte_size_per_batch_element))
# Copy slices of the above size starting at 0, except the last slice will be
# smaller.
start = 0
limit = numpy_input.shape[0]
while start < limit:
end = min(start + batch_size_per_slice, limit)
session.run(assign_slice_op, feed_dict={
start_placeholder: start,
end_placeholder: end,
slice_placeholder: numpy_input[start:end]})
start = end
def one_host_numpy_dataset(numpy_input, colocate_with, session):
"""Create a dataset on `colocate_with` from `numpy_input`."""
def create_colocated_variable(next_creator, *args, **kwargs):
kwargs["colocate_with"] = colocate_with
return next_creator(*args, **kwargs)
numpy_flat = nest.flatten(numpy_input)
with variable_scope.variable_creator_scope(create_colocated_variable):
vars_flat = tuple(variable_scope.variable(array_ops.zeros(i.shape, i.dtype),
trainable=False)
for i in numpy_flat)
for v, i in zip(vars_flat, numpy_flat):
init_var_from_numpy(v, i, session)
vars_nested = nest.pack_sequence_as(numpy_input, vars_flat)
return dataset_ops.Dataset.from_tensor_slices(vars_nested)
class SingleDevice(object):
"""Used with `colocate_with` to create a non-mirrored variable."""
def __init__(self, device):
self.device = device
|
{
"content_hash": "b08ddaca5a41f713bb310ccf8259119b",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 80,
"avg_line_length": 38.433734939759034,
"alnum_prop": 0.6905956112852665,
"repo_name": "ageron/tensorflow",
"id": "5881e4cd59e75ac5184e400bd0ac90443084635e",
"size": "3879",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "tensorflow/python/distribute/numpy_dataset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644380"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59281238"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1501606"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908340"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94466"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15024"
},
{
"name": "Pascal",
"bytes": "617"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46230508"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481859"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
}
|
from . import core
from . import data
from .data import get
# from .core import year_frac, PerformanceStats, GroupStats, merge
from .core import *
core.extend_pandas()
__version__ = (0, 3, 6)
|
{
"content_hash": "dfe1cb0f7fe202ed9bc69a16abbacaad",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 66,
"avg_line_length": 17.818181818181817,
"alnum_prop": 0.7040816326530612,
"repo_name": "pmorissette/ffn",
"id": "bf6760b46fb2c3f03e19eeb8969ba6fb54a308d6",
"size": "196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ffn/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "998"
},
{
"name": "Python",
"bytes": "120614"
}
],
"symlink_target": ""
}
|
"""
gargoyle.nexus_modules
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import logging
import nexus
import os.path
from functools import wraps
from django.conf import settings
from django.http import HttpResponse, HttpResponseNotFound
from gargoyle import gargoyle, autodiscover
from gargoyle.helpers import dumps
from gargoyle.models import Switch, DISABLED
from gargoyle.conditions import ValidationError
from gargoyle import signals
GARGOYLE_ROOT = os.path.dirname(__file__)
autodiscover()
logger = logging.getLogger('gargoyle.switches')
class GargoyleException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def json(func):
"Decorator to make JSON views simpler"
def wrapper(self, request, *args, **kwargs):
try:
response = {
"success": True,
"data": func(self, request, *args, **kwargs)
}
except GargoyleException, exc:
response = {
"success": False,
"data": exc.message
}
except Switch.DoesNotExist:
response = {
"success": False,
"data": "Switch cannot be found"
}
except ValidationError, e:
response = {
"success": False,
"data": u','.join(map(unicode, e.messages)),
}
except Exception:
if settings.DEBUG:
import traceback
traceback.print_exc()
raise
return HttpResponse(dumps(response), mimetype="application/json")
wrapper = wraps(func)(wrapper)
return wrapper
class GargoyleModule(nexus.NexusModule):
home_url = 'index'
name = 'gargoyle'
def get_title(self):
return 'Gargoyle'
def get_urls(self):
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('',
url(r'^add/$', self.as_view(self.add), name='add'),
url(r'^update/$', self.as_view(self.update), name='update'),
url(r'^delete/$', self.as_view(self.delete), name='delete'),
url(r'^status/$', self.as_view(self.status), name='status'),
url(r'^conditions/add/$', self.as_view(self.add_condition), name='add-condition'),
url(r'^conditions/remove/$', self.as_view(self.remove_condition), name='remove-condition'),
url(r'^$', self.as_view(self.index), name='index'),
)
return urlpatterns
def render_on_dashboard(self, request):
active_switches_count = Switch.objects.exclude(status=DISABLED).count()
switches = list(Switch.objects.exclude(status=DISABLED).order_by("date_created")[:5])
return self.render_to_string('gargoyle/nexus/dashboard.html', {
'switches': switches,
'active_switches_count': active_switches_count,
})
def index(self, request):
sort_by = request.GET.get('by', '-date_modified')
if sort_by not in self.valid_sort_orders:
return HttpResponseNotFound('Invalid sort order.')
switches = list(Switch.objects.all().order_by(sort_by))
return self.render_to_response("gargoyle/index.html", {
"switches": [s.to_dict(gargoyle) for s in switches],
"all_conditions": list(gargoyle.get_all_conditions()),
"sorted_by": sort_by
}, request)
def add(self, request):
key = request.POST.get("key")
if not key:
raise GargoyleException("Key cannot be empty")
if len(key) > 64:
raise GargoyleException("Key must be less than or equal to 64 characters in length")
label = request.POST.get("name", "").strip()
if len(label) > 64:
raise GargoyleException("Name must be less than or equal to 64 characters in length")
switch, created = Switch.objects.get_or_create(
key=key,
defaults=dict(
label=label or None,
description=request.POST.get("desc")
)
)
if not created:
raise GargoyleException("Switch with key %s already exists" % key)
logger.info('Switch %r added (%%s)' % switch.key,
', '.join('%s=%r' % (k, getattr(switch, k)) for k in sorted(('key', 'label', 'description', ))))
signals.switch_added.send(
sender=self,
request=request,
switch=switch,
)
return switch.to_dict(gargoyle)
add = json(add)
def update(self, request):
switch = Switch.objects.get(key=request.POST.get("curkey"))
key = request.POST.get("key")
if len(key) > 64:
raise GargoyleException("Key must be less than or equal to 64 characters in length")
label = request.POST.get("name", "")
if len(label) > 64:
raise GargoyleException("Name must be less than or equal to 64 characters in length")
values = dict(
label=label,
key=key,
description=request.POST.get("desc"),
)
changes = {}
for attribute, value in values.iteritems():
new_value = getattr(switch, attribute)
if new_value != value:
changes[attribute] = (value, new_value)
if changes:
if switch.key != key:
switch.delete()
switch.key = key
switch.label = label
switch.description = request.POST.get("desc")
switch.save()
logger.info('Switch %r updated %%s' % switch.key,
', '.join('%s=%r->%r' % (k, v[0], v[1]) for k, v in sorted(changes.iteritems())))
signals.switch_updated.send(
sender=self,
request=request,
switch=switch,
changes=changes,
)
return switch.to_dict(gargoyle)
update = json(update)
def status(self, request):
switch = Switch.objects.get(key=request.POST.get("key"))
try:
status = int(request.POST.get("status"))
except ValueError:
raise GargoyleException("Status must be integer")
old_status = switch.status
old_status_label = switch.get_status_display()
if switch.status != status:
switch.status = status
switch.save()
logger.info('Switch %r updated (status=%%s->%%s)' % switch.key,
old_status_label, switch.get_status_display())
signals.switch_status_updated.send(
sender=self,
request=request,
switch=switch,
old_status=old_status,
status=status,
)
return switch.to_dict(gargoyle)
status = json(status)
def delete(self, request):
switch = Switch.objects.get(key=request.POST.get("key"))
switch.delete()
logger.info('Switch %r removed' % switch.key)
signals.switch_deleted.send(
sender=self,
request=request,
switch=switch,
)
return {}
delete = json(delete)
def add_condition(self, request):
key = request.POST.get("key")
condition_set_id = request.POST.get("id")
field_name = request.POST.get("field")
exclude = int(request.POST.get("exclude") or 0)
if not all([key, condition_set_id, field_name]):
raise GargoyleException("Fields cannot be empty")
field = gargoyle.get_condition_set_by_id(condition_set_id).fields[field_name]
value = field.validate(request.POST)
switch = gargoyle[key]
switch.add_condition(condition_set_id, field_name, value, exclude=exclude)
logger.info('Condition added to %r (%r, %s=%r, exclude=%r)' % (switch.key,
condition_set_id, field_name, value, bool(exclude)))
signals.switch_condition_added.send(
sender=self,
request=request,
switch=switch,
condition={
'condition_set_id': condition_set_id,
'field_name': field_name,
'value': value,
},
)
return switch.to_dict(gargoyle)
add_condition = json(add_condition)
def remove_condition(self, request):
key = request.POST.get("key")
condition_set_id = request.POST.get("id")
field_name = request.POST.get("field")
value = request.POST.get("value")
if not all([key, condition_set_id, field_name, value]):
raise GargoyleException("Fields cannot be empty")
switch = gargoyle[key]
switch.remove_condition(condition_set_id, field_name, value)
logger.info('Condition removed from %r (%r, %s=%r)' % (switch.key,
condition_set_id, field_name, value))
signals.switch_condition_removed.send(
sender=self,
request=request,
switch=switch,
condition={
'condition_set_id': condition_set_id,
'field_name': field_name,
'value': value,
},
)
return switch.to_dict(gargoyle)
remove_condition = json(remove_condition)
@property
def valid_sort_orders(self):
fields = ['label', 'date_created', 'date_modified']
return fields + ['-' + f for f in fields]
nexus.site.register(GargoyleModule, 'gargoyle')
|
{
"content_hash": "52cf21639f12253766f250c21d861281",
"timestamp": "",
"source": "github",
"line_count": 314,
"max_line_length": 122,
"avg_line_length": 31.29936305732484,
"alnum_prop": 0.5534188034188035,
"repo_name": "miing/mci_migo_packages_gargoyle",
"id": "bf0c0792b23b19cbec72c7ddc4516649460ba01c",
"size": "9828",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gargoyle/nexus_modules.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "8569"
},
{
"name": "Python",
"bytes": "111217"
},
{
"name": "Shell",
"bytes": "4515"
}
],
"symlink_target": ""
}
|
import os
import logging
from project_generator_definitions.definitions import ProGenTargets
from ..tools_supported import ToolsSupported
from ..generate import Generator
from ..settings import ProjectSettings
from . import argparse_filestring_type
help = 'List general progen data as projects, tools or targets'
def run(args):
if args.file and os.path.exists(args.file):
generator = Generator(args.file)
for project in generator.generate():
if args.section == 'targets':
print("%s supports: %s"%(project.project['name'], project.project['target']))
elif args.section == 'projects':
print (project.name)
elif args.section == 'tools':
tools = [tool for tool, value in project.tool_specific.items() if value.linker_file is not None]
tools = ", ".join(tools)
print("%s supports: %s\n"%(project.project['name'], tools))
else:
if args.section == 'targets':
print("\nProgen supports the following targets:\n")
print("\n".join(ProGenTargets().get_targets()))
elif args.section == 'tools':
print("\nProgen supports the following tools:\n")
print("\n".join(ToolsSupported().get_supported()))
elif args.section == 'projects':
print("\nFile needs to be defined for projects.")
return 0
def setup(subparser):
subparser.add_argument("section", choices = ['targets','tools','projects'],
help="What section you would like listed", default='projects')
subparser.add_argument("-f", "--file", help="YAML projects file", type=argparse_filestring_type)
|
{
"content_hash": "f494bd6809a2ed8e1dfce0bd242b20ef",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 112,
"avg_line_length": 42.55,
"alnum_prop": 0.6257344300822562,
"repo_name": "ohagendorf/project_generator",
"id": "aa1ab6caef8965f101fb81598b04dbbebc59fff7",
"size": "2278",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "project_generator/commands/list_projects.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "256765"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division
"""
This module implements error handlers for Nwchem runs. Currently tested only
for B3LYP DFT jobs.
"""
__author__ = "Shyue Ping Ong"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "ongsp@ucsd.edu"
__status__ = "Beta"
__date__ = "5/20/13"
from custodian.custodian import ErrorHandler
from custodian.utils import backup
from pymatgen.io.nwchem import NwOutput, NwInput
from custodian.ansible.interpreter import Modder
class NwchemErrorHandler(ErrorHandler):
"""
Error handler for Nwchem Jobs. Currently tested only for B3LYP DFT jobs
generated by pymatgen.
"""
def __init__(self, output_filename="mol.nwout"):
"""
Initializes with an output file name.
Args:
output_filename (str): This is the file where the stdout for nwchem
is being redirected. The error messages that are checked are
present in the stdout. Defaults to "mol.nwout", which is the
default redirect used by :class:`custodian.nwchem.jobs
.NwchemJob`.
"""
self.output_filename = output_filename
def check(self):
# Checks output file for errors.
out = NwOutput(self.output_filename)
self.errors = []
self.input_file = out.job_info['input']
if out.data[-1]["has_error"]:
self.errors.extend(out.data[-1]["errors"])
self.errors = list(set(self.errors))
self.ntasks = len(out.data)
return len(self.errors) > 0
def _mod_input(self, search_string_func, mod_string_func):
with open(self.input_file) as f:
lines = []
for l in f:
if search_string_func(l):
lines.append(mod_string_func(l))
else:
lines.append(l)
with open(self.input_file, "w") as fout:
fout.write("".join(lines))
def correct(self):
backup("*.nw*")
actions = []
nwi = NwInput.from_file(self.input_file)
for e in self.errors:
if e == "autoz error":
action = {"_set": {"geometry_options": ["units",
"angstroms",
"noautoz"]}}
actions.append(action)
elif e == "Bad convergence":
t = nwi.tasks[self.ntasks - 1]
if "cgmin" in t.theory_directives:
nwi.tasks.pop(self.ntasks - 1)
else:
t.theory_directives["cgmin"] = ""
for t in nwi.tasks:
if t.operation.startswith("freq"):
#You cannot calculate hessian with cgmin.
t.theory_directives["nocgmin"] = ""
action = {"_set": {"tasks": [t.as_dict() for t in nwi.tasks]}}
actions.append(action)
else:
# For unimplemented errors, this should just cause the job to
# die.
return {"errors": self.errors, "actions": None}
m = Modder()
for action in actions:
nwi = m.modify_object(action, nwi)
nwi.write_file(self.input_file)
return {"errors": self.errors, "actions": actions}
def __str__(self):
return "NwchemErrorHandler"
|
{
"content_hash": "81336492dba4ef2ff2ad5a5e3e7a02d7",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 79,
"avg_line_length": 34.60606060606061,
"alnum_prop": 0.5344424985405721,
"repo_name": "specter119/custodian",
"id": "920ea19d8d07211bb975abfa43bfa335dd9b0cc6",
"size": "3443",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "custodian/nwchem/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "5100"
},
{
"name": "CSS",
"bytes": "1133"
},
{
"name": "HTML",
"bytes": "2710"
},
{
"name": "Makefile",
"bytes": "5577"
},
{
"name": "Python",
"bytes": "316498"
},
{
"name": "Roff",
"bytes": "1494237"
},
{
"name": "Shell",
"bytes": "3021"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
from socialbeer.posts.models import Post
from socialregistration.models import TwitterProfile
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
for post in Post.objects.all():
if post.tweeter_id:
twitter_id = post.tweeter_id
tp = TwitterProfile.objects.get(twitter_id=twitter_id)
post.author = tp.user
post.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'beers.beer': {
'Meta': {'object_name': 'Beer'},
'brewery': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beers.Brewery']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beers.BeerType']", 'null': 'True', 'blank': 'True'})
},
'beers.beertype': {
'Meta': {'object_name': 'BeerType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'beers.brewery': {
'Meta': {'object_name': 'Brewery'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'posts.post': {
'Meta': {'ordering': "['-pub_date']", 'object_name': 'Post'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'beer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['beers.Beer']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'live': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'tweeter_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'tweeter_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tweeter_profile_image': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['posts']
|
{
"content_hash": "236fa478650985e69a5043072d6a98ab",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 182,
"avg_line_length": 64.33333333333333,
"alnum_prop": 0.5435704192180876,
"repo_name": "fxdgear/beersocial",
"id": "2d139b9cfe8b78b02bfabac5ccb0d237dbc0f5f7",
"size": "6387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "socialbeer/posts/migrations/0007_add_author_to_posts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "216423"
},
{
"name": "Python",
"bytes": "107389"
}
],
"symlink_target": ""
}
|
from bs4 import BeautifulSoup
from bs4.element import NavigableString
from bs4.element import Tag
SOURCE = "/Users/lindsayrgwatt/apps/kindle2text/recent_highlights.html"
entries = open(SOURCE, 'r')
soup = BeautifulSoup(entries)
highlights = {} # {'book title':[highlight1, highlight2]}
current_book = {}
# Each book starts with a div with class"bookMain"
first_title = soup.find("div", class_="bookMain")
current_book = {
'title':first_title.find("a").string,
'highlights':[]
}
counter = 0
# Alas, Amazon made every highlight and book siblings of each other; highlights are not children of books
for sibling in first_title.next_siblings:
# In a document, the siblings will be of two types:
# NavigableString - typically a newline character
# Tag - the actual BeautifulSoup tag you can iterate over
if isinstance(sibling, Tag):
if 'bookMain' in sibling['class']:
# Promote current book to highlights
highlights[current_book['title']] = current_book['highlights']
# Reset current book
current_book = {
'title':sibling.find("a").string,
'highlights':[]
}
else:
highlight = sibling.find("span", class_="highlight")
if highlight and highlight.string:
current_book['highlights'].append(highlight.string+"\n")
else:
print "Something else here"
# Promote last book
highlights[current_book['title']] = current_book['highlights']
books = highlights.keys()
for book in books:
print "\n"
print book
print "\n"
highlighted = highlights[book]
for highlight in highlighted:
print highlight
print "\n"
print "\n"
print "============================================================================"
|
{
"content_hash": "35ecdbec42dae9d94114cc5216d34194",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 105,
"avg_line_length": 30.766666666666666,
"alnum_prop": 0.609967497291441,
"repo_name": "lindsayrgwatt/kindle2text",
"id": "f4d7a8dea994e077c86a5c9c98a8393859653719",
"size": "1987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kindle2text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1987"
}
],
"symlink_target": ""
}
|
from pip.backwardcompat import any
import textwrap
from tests.test_pip import reset_env, run_pip, write_file
from tests.path import Path
def test_download_if_requested():
"""
It should download (in the scratch path) and not install if requested.
"""
env = reset_env()
result = run_pip('install', 'INITools==0.1', '-d', '.', expect_error=True)
assert Path('scratch')/ 'INITools-0.1.tar.gz' in result.files_created
assert env.site_packages/ 'initools' not in result.files_created
def test_single_download_from_requirements_file():
"""
It should support download (in the scratch path) from PyPi from a requirements file
"""
env = reset_env()
write_file('test-req.txt', textwrap.dedent("""
INITools==0.1
"""))
result = run_pip('install', '-r', env.scratch_path/ 'test-req.txt', '-d', '.', expect_error=True)
assert Path('scratch')/ 'INITools-0.1.tar.gz' in result.files_created
assert env.site_packages/ 'initools' not in result.files_created
def test_download_should_download_dependencies():
"""
It should download dependencies (in the scratch path)
"""
env = reset_env()
result = run_pip('install', 'Paste[openid]==1.7.5.1', '-d', '.', expect_error=True)
assert Path('scratch')/ 'Paste-1.7.5.1.tar.gz' in result.files_created
openid_tarball_prefix = str(Path('scratch')/ 'python-openid-')
assert any(path.startswith(openid_tarball_prefix) for path in result.files_created)
assert env.site_packages/ 'openid' not in result.files_created
|
{
"content_hash": "7cdfff21df3f23cdcfa437405f24b11c",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 101,
"avg_line_length": 36.18604651162791,
"alnum_prop": 0.6696658097686375,
"repo_name": "integricho/heroku-buildpack-python-ffmpeg2-lame",
"id": "5d4923ccdedbd8da96e7043e9624b32773b14956",
"size": "1556",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "vendor/pip-1.2.1/tests/test_download.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20214"
},
{
"name": "Ruby",
"bytes": "0"
},
{
"name": "Shell",
"bytes": "9942"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: hashi_vault
author: Jonathan Davila <jdavila(at)ansible.com>
version_added: "2.0"
short_description: retrieve secrets from HasihCorp's vault
requirements:
- hvac (python library)
description:
- retrieve secrets from HasihCorp's vault
notes:
- Due to a current limitation in the HVAC library there won't necessarily be an error if a bad endpoint is specified.
options:
secret:
description: query you are making
required: True
token:
description: vault token
env:
- name: VAULT_TOKEN
url:
description: url to vault service
env:
- name: VAULT_ADDR
default: 'http://127.0.0.1:8200'
username:
description: authentication user name
password:
description: authentication password
auth_method:
description: authentication method used
mount_point:
description: vault mount point, only required if you have a custom mount point
default: ldap
cacert:
description: path to certificate to use for authentication
validate_certs:
description: controls verification and validation of SSL certificates, mostly you only want to turn off with self signed ones.
type: boolean
default: True
"""
EXAMPLES = """
- debug: msg="{{ lookup('hashi_vault', 'secret=secret/hello:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}"
- name: Vault that requires authentication via ldap
debug: msg="{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=ldap mount_point=ldap username=myuser password=mypas url=http://myvault:8200')}}"
- name: Using an ssl vault
debug: msg="{{ lookup('hashi_vault', 'secret=secret/hola:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=https://myvault:8200 validate_certs=False')}}"
- name: using certificate auth
debug: msg="{{ lookup('hashi_vault', 'secret=secret/hi:value token=xxxx-xxx-xxx url=https://myvault:8200 validate_certs=True cacert=/cacert/path/ca.pem')}}"
"""
RETURN = """
_raw:
description:
- secrets(s) requested
"""
import os
from ansible.errors import AnsibleError
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.lookup import LookupBase
HAS_HVAC = False
try:
import hvac
HAS_HVAC = True
except ImportError:
HAS_HVAC = False
ANSIBLE_HASHI_VAULT_ADDR = 'http://127.0.0.1:8200'
if os.getenv('VAULT_ADDR') is not None:
ANSIBLE_HASHI_VAULT_ADDR = os.environ['VAULT_ADDR']
class HashiVault:
def __init__(self, **kwargs):
self.url = kwargs.get('url', ANSIBLE_HASHI_VAULT_ADDR)
# split secret arg, which has format 'secret/hello:value' into secret='secret/hello' and secret_field='value'
s = kwargs.get('secret')
if s is None:
raise AnsibleError("No secret specified for hashi_vault lookup")
s_f = s.split(':')
self.secret = s_f[0]
if len(s_f) >= 2:
self.secret_field = s_f[1]
else:
self.secret_field = 'value'
# if a particular backend is asked for (and its method exists) we call it, otherwise drop through to using
# token auth. this means if a particular auth backend is requested and a token is also given, then we
# ignore the token and attempt authentication against the specified backend.
#
# to enable a new auth backend, simply add a new 'def auth_<type>' method below.
#
self.auth_method = kwargs.get('auth_method')
if self.auth_method:
try:
self.client = hvac.Client(url=self.url)
# prefixing with auth_ to limit which methods can be accessed
getattr(self, 'auth_' + self.auth_method)(**kwargs)
except AttributeError:
raise AnsibleError("Authentication method '%s' not supported" % self.auth_method)
else:
self.token = kwargs.get('token', os.environ.get('VAULT_TOKEN', None))
if self.token is None and os.environ.get('HOME'):
token_filename = os.path.join(
os.environ.get('HOME'),
'.vault-token'
)
if os.path.exists(token_filename):
with open(token_filename) as token_file:
self.token = token_file.read().strip()
if self.token is None:
raise AnsibleError("No Vault Token specified")
self.verify = self.boolean_or_cacert(kwargs.get('validate_certs', True), kwargs.get('cacert', ''))
self.client = hvac.Client(url=self.url, token=self.token, verify=self.verify)
if not self.client.is_authenticated():
raise AnsibleError("Invalid Hashicorp Vault Token Specified for hashi_vault lookup")
def get(self):
data = self.client.read(self.secret)
if data is None:
raise AnsibleError("The secret %s doesn't seem to exist for hashi_vault lookup" % self.secret)
if self.secret_field == '': # secret was specified with trailing ':'
return data['data']
if self.secret_field not in data['data']:
raise AnsibleError("The secret %s does not contain the field '%s'. for hashi_vault lookup" % (self.secret, self.secret_field))
return data['data'][self.secret_field]
def auth_ldap(self, **kwargs):
username = kwargs.get('username')
if username is None:
raise AnsibleError("Authentication method ldap requires a username")
password = kwargs.get('password')
if password is None:
raise AnsibleError("Authentication method ldap requires a password")
mount_point = kwargs.get('mount_point')
if mount_point is None:
mount_point = 'ldap'
self.client.auth_ldap(username, password, mount_point)
def boolean_or_cacert(self, validate_certs, cacert):
validate_certs = boolean(validate_certs, strict=False)
'''' return a bool or cacert '''
if validate_certs is True:
if cacert != '':
return cacert
else:
return True
else:
return False
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
if not HAS_HVAC:
raise AnsibleError("Please pip install hvac to use the hashi_vault lookup module.")
vault_args = terms[0].split(' ')
vault_dict = {}
ret = []
for param in vault_args:
try:
key, value = param.split('=')
except ValueError:
raise AnsibleError("hashi_vault lookup plugin needs key=value pairs, but received %s" % terms)
vault_dict[key] = value
vault_conn = HashiVault(**vault_dict)
for term in terms:
key = term.split()[0]
value = vault_conn.get()
ret.append(value)
return ret
|
{
"content_hash": "695b691a6f75796cf8ddbaa6a4d5b4c8",
"timestamp": "",
"source": "github",
"line_count": 200,
"max_line_length": 159,
"avg_line_length": 35.4,
"alnum_prop": 0.6244350282485875,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "6b11f5f1bd17c6f3b86e0129819ab99ad00e9bbf",
"size": "7254",
"binary": false,
"copies": "16",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/plugins/lookup/hashi_vault.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
}
|
import mock
from oslo_messaging.rpc import dispatcher
from heat.common import exception
from heat.engine import service
from heat.engine import service_stack_watch
from heat.engine import stack
from heat.engine import watchrule
from heat.objects import stack as stack_object
from heat.objects import watch_data as watch_data_object
from heat.objects import watch_rule as watch_rule_object
from heat.rpc import api as rpc_api
from heat.tests import common
from heat.tests.engine import tools
from heat.tests import utils
class StackWatchTest(common.HeatTestCase):
def setUp(self):
super(StackWatchTest, self).setUp()
self.ctx = utils.dummy_context(tenant_id='stack_watch_test_tenant')
self.eng = service.EngineService('a-host', 'a-topic')
self.eng.create_periodic_tasks()
# self.eng.engine_id = 'engine-fake-uuid'
@mock.patch.object(service_stack_watch.StackWatch, 'start_watch_task')
@mock.patch.object(stack_object.Stack, 'get_all')
@mock.patch.object(service.service.Service, 'start')
def test_start_watches_all_stacks(self, mock_super_start, mock_get_all,
start_watch_task):
s1 = mock.Mock(id=1)
s2 = mock.Mock(id=2)
mock_get_all.return_value = [s1, s2]
start_watch_task.return_value = None
self.eng.thread_group_mgr = None
self.eng.create_periodic_tasks()
mock_get_all.assert_called_once_with(mock.ANY, tenant_safe=False,
show_hidden=True)
calls = start_watch_task.call_args_list
self.assertEqual(2, start_watch_task.call_count)
self.assertIn(mock.call(1, mock.ANY), calls)
self.assertIn(mock.call(2, mock.ANY), calls)
@tools.stack_context('service_show_watch_test_stack', False)
def test_show_watch(self):
# Insert two dummy watch rules into the DB
rule = {u'EvaluationPeriods': u'1',
u'AlarmActions': [u'WebServerRestartPolicy'],
u'AlarmDescription': u'Restart the WikiDatabase',
u'Namespace': u'system/linux',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'ServiceFailure'}
self.wr = []
self.wr.append(watchrule.WatchRule(context=self.ctx,
watch_name='show_watch_1',
rule=rule,
watch_data=[],
stack_id=self.stack.id,
state='NORMAL'))
self.wr[0].store()
self.wr.append(watchrule.WatchRule(context=self.ctx,
watch_name='show_watch_2',
rule=rule,
watch_data=[],
stack_id=self.stack.id,
state='NORMAL'))
self.wr[1].store()
# watch_name=None should return all watches
result = self.eng.show_watch(self.ctx, watch_name=None)
result_names = [r.get('name') for r in result]
self.assertIn('show_watch_1', result_names)
self.assertIn('show_watch_2', result_names)
result = self.eng.show_watch(self.ctx, watch_name="show_watch_1")
self.assertEqual(1, len(result))
self.assertIn('name', result[0])
self.assertEqual('show_watch_1', result[0]['name'])
result = self.eng.show_watch(self.ctx, watch_name="show_watch_2")
self.assertEqual(1, len(result))
self.assertIn('name', result[0])
self.assertEqual('show_watch_2', result[0]['name'])
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.show_watch,
self.ctx, watch_name="nonexistent")
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
# Check the response has all keys defined in the engine API
for key in rpc_api.WATCH_KEYS:
self.assertIn(key, result[0])
@tools.stack_context('service_show_watch_metric_test_stack', False)
def test_show_watch_metric(self):
# Insert dummy watch rule into the DB
rule = {u'EvaluationPeriods': u'1',
u'AlarmActions': [u'WebServerRestartPolicy'],
u'AlarmDescription': u'Restart the WikiDatabase',
u'Namespace': u'system/linux',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'ServiceFailure'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='show_watch_metric_1',
rule=rule,
watch_data=[],
stack_id=self.stack.id,
state='NORMAL')
self.wr.store()
# And add a metric datapoint
watch = watch_rule_object.WatchRule.get_by_name(self.ctx,
'show_watch_metric_1')
self.assertIsNotNone(watch)
values = {'watch_rule_id': watch.id,
'data': {u'Namespace': u'system/linux',
u'ServiceFailure': {
u'Units': u'Counter', u'Value': 1}}}
watch_data_object.WatchData.create(self.ctx, values)
# Check there is one result returned
result = self.eng.show_watch_metric(self.ctx,
metric_namespace=None,
metric_name=None)
self.assertEqual(1, len(result))
# Create another metric datapoint and check we get two
watch_data_object.WatchData.create(self.ctx, values)
result = self.eng.show_watch_metric(self.ctx,
metric_namespace=None,
metric_name=None)
self.assertEqual(2, len(result))
# Check the response has all keys defined in the engine API
for key in rpc_api.WATCH_DATA_KEYS:
self.assertIn(key, result[0])
@tools.stack_context('service_show_watch_state_test_stack')
@mock.patch.object(stack.Stack, 'resource_by_refid')
def test_set_watch_state(self, mock_ref):
# Insert dummy watch rule into the DB
rule = {u'EvaluationPeriods': u'1',
u'AlarmActions': [u'WebServerRestartPolicy'],
u'AlarmDescription': u'Restart the WikiDatabase',
u'Namespace': u'system/linux',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'ServiceFailure'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='OverrideAlarm',
rule=rule,
watch_data=[],
stack_id=self.stack.id,
state='NORMAL')
self.wr.store()
class DummyAction(object):
def signal(self):
return "dummyfoo"
dummy_action = DummyAction()
mock_ref.return_value = dummy_action
# Replace the real stack threadgroup with a dummy one, so we can
# check the function returned on ALARM is correctly scheduled
dtg = tools.DummyThreadGroup()
self.eng.thread_group_mgr.groups[self.stack.id] = dtg
state = watchrule.WatchRule.NODATA
result = self.eng.set_watch_state(self.ctx,
watch_name="OverrideAlarm",
state=state)
self.assertEqual(state, result[rpc_api.WATCH_STATE_VALUE])
self.assertEqual(
[], self.eng.thread_group_mgr.groups[self.stack.id].threads)
state = watchrule.WatchRule.NORMAL
result = self.eng.set_watch_state(self.ctx,
watch_name="OverrideAlarm",
state=state)
self.assertEqual(state, result[rpc_api.WATCH_STATE_VALUE])
self.assertEqual(
[], self.eng.thread_group_mgr.groups[self.stack.id].threads)
state = watchrule.WatchRule.ALARM
result = self.eng.set_watch_state(self.ctx,
watch_name="OverrideAlarm",
state=state)
self.assertEqual(state, result[rpc_api.WATCH_STATE_VALUE])
self.assertEqual(
[dummy_action.signal],
self.eng.thread_group_mgr.groups[self.stack.id].threads)
mock_ref.assert_called_once_with('WebServerRestartPolicy')
@tools.stack_context('service_show_watch_state_badstate_test_stack')
@mock.patch.object(watchrule.WatchRule, 'set_watch_state')
def test_set_watch_state_badstate(self, mock_set):
mock_set.side_effect = ValueError
# Insert dummy watch rule into the DB
rule = {u'EvaluationPeriods': u'1',
u'AlarmActions': [u'WebServerRestartPolicy'],
u'AlarmDescription': u'Restart the WikiDatabase',
u'Namespace': u'system/linux',
u'Period': u'300',
u'ComparisonOperator': u'GreaterThanThreshold',
u'Statistic': u'SampleCount',
u'Threshold': u'2',
u'MetricName': u'ServiceFailure'}
self.wr = watchrule.WatchRule(context=self.ctx,
watch_name='OverrideAlarm2',
rule=rule,
watch_data=[],
stack_id=self.stack.id,
state='NORMAL')
self.wr.store()
for state in ["HGJHGJHG", "1234", "!\*(&%"]:
self.assertRaises(ValueError,
self.eng.set_watch_state,
self.ctx, watch_name="OverrideAlarm2",
state=state)
calls = [mock.call("HGJHGJHG"),
mock.call("1234"),
mock.call("!\*(&%")]
mock_set.assert_has_calls(calls)
@mock.patch.object(watchrule.WatchRule, 'load')
def test_set_watch_state_noexist(self, mock_load):
state = watchrule.WatchRule.ALARM # State valid
mock_load.side_effect = exception.EntityNotFound(entity='Watch Rule',
name='test')
ex = self.assertRaises(dispatcher.ExpectedException,
self.eng.set_watch_state,
self.ctx, watch_name="nonexistent",
state=state)
self.assertEqual(exception.EntityNotFound, ex.exc_info[0])
mock_load.assert_called_once_with(self.ctx, "nonexistent")
|
{
"content_hash": "c0643b46e7b17fe636f984b26edd8b69",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 78,
"avg_line_length": 45.22529644268775,
"alnum_prop": 0.5297150847753889,
"repo_name": "dims/heat",
"id": "c47963ca342ee886dd872a31906655bb1758a0e4",
"size": "12017",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "heat/tests/engine/service/test_stack_watch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7618889"
},
{
"name": "Shell",
"bytes": "32548"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cart', '0007_cartitem_begin_date'),
]
operations = [
migrations.AddField(
model_name='cartitem',
name='auto_renew',
field=models.BooleanField(default=True),
),
]
|
{
"content_hash": "24ebabe0c9708c838885b1a69d5e62b3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 52,
"avg_line_length": 21.22222222222222,
"alnum_prop": 0.5942408376963351,
"repo_name": "davogler/POSTv3",
"id": "83d35b621f9ce224fba5e46b59a5a137caec7d3e",
"size": "406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cart/migrations/0008_cartitem_auto_renew.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "144324"
},
{
"name": "HTML",
"bytes": "282463"
},
{
"name": "JavaScript",
"bytes": "244051"
},
{
"name": "Python",
"bytes": "358932"
}
],
"symlink_target": ""
}
|
"""
Tests for flask-geckoboard.
"""
from test_decorators import *
|
{
"content_hash": "68072aa96364095a1de383c95c769eac",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 29,
"avg_line_length": 13.4,
"alnum_prop": 0.7014925373134329,
"repo_name": "rossdeane/flask-geckoboard",
"id": "ad579f135437fa93c6d533eeb60a40c98b6289b2",
"size": "67",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53659"
}
],
"symlink_target": ""
}
|
"""Provides a sensor for Home Connect."""
from datetime import timedelta
import logging
from homeassistant.components.sensor import SensorDeviceClass, SensorEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ENTITIES
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
import homeassistant.util.dt as dt_util
from .const import ATTR_VALUE, BSH_OPERATION_STATE, DOMAIN
from .entity import HomeConnectEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up the Home Connect sensor."""
def get_entities():
"""Get a list of entities."""
entities = []
hc_api = hass.data[DOMAIN][config_entry.entry_id]
for device_dict in hc_api.devices:
entity_dicts = device_dict.get(CONF_ENTITIES, {}).get("sensor", [])
entities += [HomeConnectSensor(**d) for d in entity_dicts]
return entities
async_add_entities(await hass.async_add_executor_job(get_entities), True)
class HomeConnectSensor(HomeConnectEntity, SensorEntity):
"""Sensor class for Home Connect."""
def __init__(self, device, desc, key, unit, icon, device_class, sign=1):
"""Initialize the entity."""
super().__init__(device, desc)
self._state = None
self._key = key
self._unit = unit
self._icon = icon
self._device_class = device_class
self._sign = sign
@property
def native_value(self):
"""Return sensor value."""
return self._state
@property
def available(self) -> bool:
"""Return true if the sensor is available."""
return self._state is not None
async def async_update(self) -> None:
"""Update the sensor's status."""
status = self.device.appliance.status
if self._key not in status:
self._state = None
else:
if self.device_class == SensorDeviceClass.TIMESTAMP:
if ATTR_VALUE not in status[self._key]:
self._state = None
elif (
self._state is not None
and self._sign == 1
and self._state < dt_util.utcnow()
):
# if the date is supposed to be in the future but we're
# already past it, set state to None.
self._state = None
else:
seconds = self._sign * float(status[self._key][ATTR_VALUE])
self._state = dt_util.utcnow() + timedelta(seconds=seconds)
else:
self._state = status[self._key].get(ATTR_VALUE)
if self._key == BSH_OPERATION_STATE:
# Value comes back as an enum, we only really care about the
# last part, so split it off
# https://developer.home-connect.com/docs/status/operation_state
self._state = self._state.split(".")[-1]
_LOGGER.debug("Updated, new state: %s", self._state)
@property
def native_unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def device_class(self):
"""Return the device class."""
return self._device_class
|
{
"content_hash": "0e5a421e881fd8b6ace0424cf936602f",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 84,
"avg_line_length": 35,
"alnum_prop": 0.5910364145658263,
"repo_name": "mezz64/home-assistant",
"id": "38a45ccf7095e15e93ef93dc76f7bc1adf624d61",
"size": "3570",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/home_connect/sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
'''
Copyleft May 04, 2017 Arya Iranmehr, PhD Student, Bafna Lab, UC San Diego, Email: airanmehr@gmail.com
'''
import numpy as np;
np.set_printoptions(linewidth=200, precision=5, suppress=True)
import pandas as pd;
pd.options.display.max_rows = 20;
pd.options.display.expand_frame_repr = False
import seaborn as sns
import pylab as plt;
import matplotlib as mpl
import os;
home = os.path.expanduser('~') + '/'
import popgen.Util as utl
import popgen.Estimate as est
import popgen.Simulation as Simulation
|
{
"content_hash": "636dffccd749ff7a8c1118136ef43f0d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 102,
"avg_line_length": 26.68421052631579,
"alnum_prop": 0.7593688362919132,
"repo_name": "airanmehr/bio",
"id": "1183ed4b89785a16bae2ec38800fee372192f647",
"size": "507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Scripts/Bash/VCF/createAnnotation/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "423063"
},
{
"name": "C++",
"bytes": "29379"
},
{
"name": "CSS",
"bytes": "5230"
},
{
"name": "HTML",
"bytes": "320610"
},
{
"name": "Jupyter Notebook",
"bytes": "49142550"
},
{
"name": "Makefile",
"bytes": "798"
},
{
"name": "Python",
"bytes": "921165"
},
{
"name": "R",
"bytes": "4193"
},
{
"name": "Shell",
"bytes": "86117"
},
{
"name": "TeX",
"bytes": "50192"
}
],
"symlink_target": ""
}
|
import sys
import os
import os.path as op
import glob
import warnings
import shutil
from nose.tools import assert_true, assert_equal, assert_raises
from nose.plugins.skip import SkipTest
from mne import Epochs, read_events, pick_types, read_evokeds
from mne.io import Raw
from mne.datasets import testing
from mne.report import Report
from mne.utils import (_TempDir, requires_mayavi, requires_nibabel,
requires_PIL, run_tests_if_main, slow_test)
from mne.viz import plot_trans
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_dir = testing.data_path(download=False)
subjects_dir = op.join(data_dir, 'subjects')
report_dir = op.join(data_dir, 'MEG', 'sample')
raw_fname = op.join(report_dir, 'sample_audvis_trunc_raw.fif')
event_fname = op.join(report_dir, 'sample_audvis_trunc_raw-eve.fif')
cov_fname = op.join(report_dir, 'sample_audvis_trunc-cov.fif')
fwd_fname = op.join(report_dir, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
trans_fname = op.join(report_dir, 'sample_audvis_trunc-trans.fif')
inv_fname = op.join(report_dir,
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
mri_fname = op.join(subjects_dir, 'sample', 'mri', 'T1.mgz')
base_dir = op.realpath(op.join(op.dirname(__file__), '..', 'io', 'tests',
'data'))
evoked_fname = op.join(base_dir, 'test-ave.fif')
# Set our plotters to test mode
warnings.simplefilter('always') # enable b/c these tests throw warnings
@slow_test
@testing.requires_testing_data
@requires_PIL
def test_render_report():
"""Test rendering -*.fif files for mne report.
"""
tempdir = _TempDir()
raw_fname_new = op.join(tempdir, 'temp_raw.fif')
event_fname_new = op.join(tempdir, 'temp_raw-eve.fif')
cov_fname_new = op.join(tempdir, 'temp_raw-cov.fif')
fwd_fname_new = op.join(tempdir, 'temp_raw-fwd.fif')
inv_fname_new = op.join(tempdir, 'temp_raw-inv.fif')
for a, b in [[raw_fname, raw_fname_new],
[event_fname, event_fname_new],
[cov_fname, cov_fname_new],
[fwd_fname, fwd_fname_new],
[inv_fname, inv_fname_new]]:
shutil.copyfile(a, b)
# create and add -epo.fif and -ave.fif files
epochs_fname = op.join(tempdir, 'temp-epo.fif')
evoked_fname = op.join(tempdir, 'temp-ave.fif')
raw = Raw(raw_fname_new)
picks = pick_types(raw.info, meg='mag', eeg=False) # faster with one type
epochs = Epochs(raw, read_events(event_fname), 1, -0.2, 0.2, picks=picks)
epochs.save(epochs_fname)
epochs.average().save(evoked_fname)
report = Report(info_fname=raw_fname_new, subjects_dir=subjects_dir)
if sys.version.startswith('3.5'): # XXX Some strange MPL/3.5 error...
raise SkipTest('Python 3.5 and mpl have unresolved issues')
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, on_error='raise')
assert_true(len(w) >= 1)
# Check correct paths and filenames
fnames = glob.glob(op.join(tempdir, '*.fif'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
assert_equal(len(report.fnames), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving functionality
report.data_path = tempdir
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
assert_equal(len(report.html), len(fnames))
assert_equal(len(report.html), len(report.fnames))
# Check saving same report to new filename
report.save(fname=op.join(tempdir, 'report2.html'), open_browser=False)
assert_true(op.isfile(op.join(tempdir, 'report2.html')))
# Check overwriting file
report.save(fname=op.join(tempdir, 'report.html'), open_browser=False,
overwrite=True)
assert_true(op.isfile(op.join(tempdir, 'report.html')))
# Check pattern matching with multiple patterns
pattern = ['*raw.fif', '*eve.fif']
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, pattern=pattern)
assert_true(len(w) >= 1)
fnames = glob.glob(op.join(tempdir, '*.raw')) + \
glob.glob(op.join(tempdir, '*.raw'))
for fname in fnames:
assert_true(op.basename(fname) in
[op.basename(x) for x in report.fnames])
assert_true(''.join(report.html).find(op.basename(fname)) != -1)
@testing.requires_testing_data
@requires_mayavi
@requires_PIL
def test_render_add_sections():
"""Test adding figures/images to section.
"""
from PIL import Image
tempdir = _TempDir()
import matplotlib.pyplot as plt
report = Report(subjects_dir=subjects_dir)
# Check add_figs_to_section functionality
fig = plt.plot([1, 2], [1, 2])[0].figure
report.add_figs_to_section(figs=fig, # test non-list input
captions=['evoked response'], scale=1.2,
image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=[fig, fig],
captions='H')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=0, image_format='svg')
assert_raises(ValueError, report.add_figs_to_section, figs=fig,
captions=['foo'], scale=1e-10, image_format='svg')
# need to recreate because calls above change size
fig = plt.plot([1, 2], [1, 2])[0].figure
# Check add_images_to_section with png and then gif
img_fname = op.join(tempdir, 'testimage.png')
fig.savefig(img_fname)
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
im = Image.open(img_fname)
op.join(tempdir, 'testimage.gif')
im.save(img_fname) # matplotlib does not support gif
report.add_images_to_section(fnames=[img_fname],
captions=['evoked response'])
assert_raises(ValueError, report.add_images_to_section,
fnames=[img_fname, img_fname], captions='H')
assert_raises(ValueError, report.add_images_to_section,
fnames=['foobar.xxx'], captions='H')
evoked = read_evokeds(evoked_fname, condition='Left Auditory',
baseline=(-0.2, 0.0))
fig = plot_trans(evoked.info, trans_fname, subject='sample',
subjects_dir=subjects_dir)
report.add_figs_to_section(figs=fig, # test non-list input
captions='random image', scale=1.2)
@slow_test
@testing.requires_testing_data
@requires_mayavi
@requires_nibabel()
def test_render_mri():
"""Test rendering MRI for mne report.
"""
tempdir = _TempDir()
trans_fname_new = op.join(tempdir, 'temp-trans.fif')
for a, b in [[trans_fname, trans_fname_new]]:
shutil.copyfile(a, b)
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
with warnings.catch_warnings(record=True):
warnings.simplefilter('always')
report.parse_folder(data_path=tempdir, mri_decim=30, pattern='*',
n_jobs=2)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_render_mri_without_bem():
"""Test rendering MRI without BEM for mne report.
"""
tempdir = _TempDir()
os.mkdir(op.join(tempdir, 'sample'))
os.mkdir(op.join(tempdir, 'sample', 'mri'))
shutil.copyfile(mri_fname, op.join(tempdir, 'sample', 'mri', 'T1.mgz'))
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=tempdir)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
report.parse_folder(tempdir)
assert_true(len(w) >= 1)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
@testing.requires_testing_data
@requires_nibabel()
def test_add_htmls_to_section():
"""Test adding html str to mne report.
"""
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
html = '<b>MNE-Python is AWESOME</b>'
caption, section = 'html', 'html_section'
report.add_htmls_to_section(html, caption, section)
idx = report._sectionlabels.index('report_' + section)
html_compare = report.html[idx]
assert_true(html in html_compare)
def test_add_slider_to_section():
"""Test adding a slider with a series of images to mne report.
"""
tempdir = _TempDir()
from matplotlib import pyplot as plt
report = Report(info_fname=raw_fname,
subject='sample', subjects_dir=subjects_dir)
section = 'slider_section'
figs = list()
figs.append(plt.figure())
plt.plot([1, 2, 3])
plt.close('all')
figs.append(plt.figure())
plt.plot([3, 2, 1])
plt.close('all')
report.add_slider_to_section(figs, section=section)
report.save(op.join(tempdir, 'report.html'), open_browser=False)
assert_raises(NotImplementedError, report.add_slider_to_section,
[figs, figs])
assert_raises(ValueError, report.add_slider_to_section, figs, ['wug'])
assert_raises(TypeError, report.add_slider_to_section, figs, 'wug')
def test_validate_input():
report = Report()
items = ['a', 'b', 'c']
captions = ['Letter A', 'Letter B', 'Letter C']
section = 'ABCs'
comments = ['First letter of the alphabet.',
'Second letter of the alphabet',
'Third letter of the alphabet']
assert_raises(ValueError, report._validate_input, items, captions[:-1],
section, comments=None)
assert_raises(ValueError, report._validate_input, items, captions, section,
comments=comments[:-1])
values = report._validate_input(items, captions, section, comments=None)
items_new, captions_new, comments_new = values
assert_equal(len(comments_new), len(items))
run_tests_if_main()
|
{
"content_hash": "a08a4b7bd1029285eb7a6f815212ad94",
"timestamp": "",
"source": "github",
"line_count": 266,
"max_line_length": 79,
"avg_line_length": 38.6578947368421,
"alnum_prop": 0.6388213556355149,
"repo_name": "cmoutard/mne-python",
"id": "f10167bdd6bf1d854ba06dcaf6e29c7605fc261e",
"size": "10402",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "mne/tests/test_report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "3171"
},
{
"name": "PowerShell",
"bytes": "2988"
},
{
"name": "Python",
"bytes": "4669153"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
'''
Copyright (C) 2016 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
'''
from __future__ import print_function
from ..sys_util import get_config_file
from ..sys_util import setup_environment_from_config_file
from ..sys_util import write_config_file_value
from ..sys_util import get_library_name
from ..sys_util import make_unity_server_env
import unittest
import tempfile
import subprocess
import sys
from os.path import join
import os
import shutil
class EnvironmentConfigTester(unittest.TestCase):
def test_config_basic_write(self):
test_dir = tempfile.mkdtemp()
config_file = join(test_dir, "test_config")
os.environ["GRAPHLAB_CONFIG_FILE"] = config_file
try:
self.assertEqual(get_config_file(), config_file)
write_config_file_value("GRAPHLAB_FILE_TEST_VALUE", "this-is-a-test")
setup_environment_from_config_file()
self.assertEqual(os.environ["GRAPHLAB_FILE_TEST_VALUE"], "this-is-a-test")
finally:
shutil.rmtree(test_dir)
del os.environ["GRAPHLAB_CONFIG_FILE"]
def test_environment_import(self):
test_dir = tempfile.mkdtemp()
config_file = join(test_dir, "test_config")
os.environ["GRAPHLAB_CONFIG_FILE"] = config_file
write_config_file_value("GRAPHLAB_FILEIO_MAXIMUM_CACHE_CAPACITY", "123456")
run_script = r"""
import sys
import os
system_path = os.environ.get("__GL_SYS_PATH__", "")
del sys.path[:]
sys.path.extend(p.strip() for p in system_path.split(os.pathsep) if p.strip())
import %(library)s
var = %(library)s.get_runtime_config()["GRAPHLAB_FILEIO_MAXIMUM_CACHE_CAPACITY"]
if var == 123456:
sys.exit(0)
else:
print("boo: GRAPHLAB_FILEIO_MAXIMUM_CACHE_CAPACITY = ", var, "and this is wrong. Seriously.")
sys.exit(1)
""" % {"library" : get_library_name()}
run_file = join(test_dir, "run_test.py")
with open(run_file, 'w') as f:
f.write(run_script)
env = make_unity_server_env()
env["GRAPHLAB_CONFIG_FILE"] = config_file
ret_code = subprocess.call([sys.executable, run_file], env = env)
self.assertEqual(ret_code, 0)
|
{
"content_hash": "91fb23cb3b2a94df26d02a33737ab08a",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 98,
"avg_line_length": 28.024390243902438,
"alnum_prop": 0.6575282854656223,
"repo_name": "haijieg/SFrame",
"id": "1d109ab0edc7bc8c64166deb0b38e7d0afa48d54",
"size": "2322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oss_src/unity/python/sframe/test/test_environment_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "169684"
},
{
"name": "C++",
"bytes": "12042105"
},
{
"name": "CMake",
"bytes": "104454"
},
{
"name": "CSS",
"bytes": "127000"
},
{
"name": "HTML",
"bytes": "24575"
},
{
"name": "Hack",
"bytes": "277"
},
{
"name": "JavaScript",
"bytes": "20909"
},
{
"name": "Makefile",
"bytes": "9614"
},
{
"name": "Perl",
"bytes": "9663"
},
{
"name": "Python",
"bytes": "2160225"
},
{
"name": "R",
"bytes": "537"
},
{
"name": "Scala",
"bytes": "5232"
},
{
"name": "Shell",
"bytes": "51745"
},
{
"name": "Smarty",
"bytes": "966"
},
{
"name": "XSLT",
"bytes": "74068"
}
],
"symlink_target": ""
}
|
import re
import collections
from JumpScale import j
STATUS_LINE = re.compile('^Status:\s*(.+)')
RULE_LINE = re.compile('^\[\s*(\d+)\] (.+?)\s{2,}(.+?)\s{2,}(.+)$')
ParsedDestination = collections.namedtuple('ParsedDestination',
'ip proto port dev')
class UFWError(Exception):
pass
class UFWRule:
def __init__(self, action=None, source=None, destination=None, number=None):
self._number = number
self._source = source
self._action = action
self._destination = destination
@property
def number(self):
return self._number
@property
def source(self):
return self._source
@property
def destination(self):
return self._destination
@property
def action(self):
return self._action
def __str__(self):
return ('[%2s] %s to %s from %s' %
(self.number if self.number is not None else '',
self.action, self.destination, self.source))
def __repr__(self):
return str(self)
class UFWOperation:
def cmd(self):
raise NotImplemented()
class StatusOp(UFWOperation):
def __init__(self, status=None):
self._status = status
def cmd(self):
return '--force enable' if self._status else 'disable'
class ResetOp(UFWOperation):
def cmd(self):
return '--force reset'
class RuleOp(UFWOperation):
def __init__(self, rule=None, add=True):
self._add = add
self._rule = rule
def _parser(self, src):
src = src.replace('(v6)', '').replace('(out)', '')
source = re.search('\d+\.\d+\.\d+.\d+[^\s]*', src)
ip = None
pos = 0
if source:
ip = source.group()
pos = source.end()
else:
ip = 'any'
port_proto_m = re.search('\\b(\d+)(/([^\s]+))?', src[pos:])
proto = None
port = None
if port_proto_m:
proto = port_proto_m.group(3)
port = port_proto_m.group(1)
pos = port_proto_m.end()
on_m = re.search('on \w+', src)
dev = None
if on_m:
dev = on_m.group()
return ParsedDestination(ip=ip, proto=proto, port=port, dev=dev)
def cmd(self):
rule = self._rule
cmd = []
if not self._add:
cmd.append('delete')
cmd.append(rule.action.lower())
def push(src):
cmd.append(src.ip)
if src.proto:
cmd.append('proto %s' % src.proto)
if src.port:
cmd.append('port %s' % src.port)
src = self._parser(rule.source)
dst = self._parser(rule.destination)
if src.dev and dst.dev:
raise UFWError('Both source and destination has devices')
if src.dev:
if 'out' not in rule.action.lower():
raise UFWError('Invalid source for %s' % rule.action)
cmd.append(src.dev)
elif dst.dev:
if 'in' not in rule.action.lower():
raise UFWError('Invalid destination for %s' % rule.action)
cmd.append(dst.dev)
cmd.append('from')
push(src)
cmd.append('to')
push(dst)
return ' '.join(cmd)
class UFWManager:
ACTION_ALLOW_IN = 'allow in'
ACTION_ALLOW_OUT = 'allow out'
ACTION_DENY_IN = 'deny in'
ACTION_DENY_OUT = 'deny out'
ACTION_REJECT_IN = 'reject in'
ACTION_REJECT_OUT = 'reject out'
def __init__(self):
self.__jslocation__ = "j.sal.ufw"
self._local = j.tools.executor.getLocal()
self._rules = None
self._enabled = None
self._transactions = []
def _bool(self, status):
return status == 'active'
def _load(self):
rc, status = self._local.execute('ufw status numbered')
self._rules = []
for line in status.splitlines():
line = line.strip()
if not line or '(v6)' in line:
continue
status = STATUS_LINE.match(line)
if status is not None:
self._enabled = self._bool(status.group(1))
continue
rule = RULE_LINE.match(line)
if rule is None:
continue
number, destination, action, source = rule.groups()
self._rules.append(UFWRule(action, source, destination, number))
@property
def rules(self):
"""
List of current rules.
"""
if self._rules is None:
self._load()
return self._rules
@property
def enabled(self):
"""
Get the `current` actual status of ufw. Setting enabled on the
otherhand will not take effect until you call commit()
"""
if self._enabled is None:
self._load()
return self._enabled
@enabled.setter
def enabled(self, value):
"""
Set the anbled status. Note that this doesn't take action
until you apply the change by calling commit.
"""
self._transactions.append(
StatusOp(value)
)
def addRule(self, action, source='any', destination='any'):
"""
Add a new UFW rule
:action: One of the actions defined
ACTION_ALLOW_IN
ACTION_ALLOW_OUT
ACTION_DENY_IN
ACTION_DENY_OUT
ACTION_REJECT_IN
ACTION_REJECT_OUT
:source: Source to match, default to 'any'. Examples of valid sources
'192.168.1.0/24 proto tcp'
'22/tcp'
'any'
'any on eth0'
:destination: Destination to match, default to 'any'.
"""
self._transactions.append(
RuleOp(UFWRule(action, source, destination))
)
def removeRule(self, rule):
"""
Remove the specified rule
:rule: rule to remove
"""
self._transactions.append(
RuleOp(rule, add=False)
)
def reset(self):
"""
Remove all rules.
"""
self._transactions.append(
ResetOp()
)
def portOpen(self, port):
"""
Short cut to open port
"""
self.addRule(UFWManager.ACTION_ALLOW_IN, 'any', str(port))
def portClose(self, port):
"""
Short cut to closing a port (which is previously open by portOpen)
"""
port = str(port)
for rule in self.rules:
if rule.destination == port:
self.removeRule(rule)
def commit(self):
"""
Apply all bending actions
:example:
ufw.enabled = False
ufw.reset()
ufw.addRule(ufw.ACTION_ALLOW_IN, 'any', '22/tcp')
ufw.enabled = True
ufw.commit()
"""
try:
while self._transactions:
op = self._transactions.pop(0)
self._local.execute('ufw %s' % op.cmd())
except Exception as e:
raise UFWError(e)
# force reload on next access.
self._rules = None
self._status = None
|
{
"content_hash": "091a47a6a52546f9361c13ce411afcbc",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 80,
"avg_line_length": 24.715753424657535,
"alnum_prop": 0.5169738118331717,
"repo_name": "Jumpscale/jumpscale_core8",
"id": "6b4c38540b9d4d836975c2c6270480b800a802bb",
"size": "7217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/JumpScale/sal/ufw/UFWManager.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1113"
},
{
"name": "Cap'n Proto",
"bytes": "9033"
},
{
"name": "Lua",
"bytes": "12538"
},
{
"name": "Python",
"bytes": "4343122"
},
{
"name": "Shell",
"bytes": "7091"
}
],
"symlink_target": ""
}
|
from json import dumps
from qiita_db.sql_connection import TRN
with TRN:
sql = """SELECT *
FROM qiita.artifact
JOIN qiita.artifact_output_processing_job
USING (artifact_id)
WHERE command_id IS NOT NULL"""
TRN.add(sql)
sql_update_artifact = """UPDATE qiita.artifact
SET command_parameters = %s
WHERE artifact_id = %s"""
sql_update_job = """UPDATE qiita.processing_job
SET command_parameters = %s
WHERE processing_job_id = %s"""
for ainfo in TRN.execute_fetchindex():
ainfo = dict(ainfo)
params = dumps(
{k: str(v) for k, v in ainfo['command_parameters'].items()})
TRN.add(sql_update_artifact, [params, ainfo['artifact_id']])
TRN.add(sql_update_job, [params, ainfo['processing_job_id']])
|
{
"content_hash": "5dae2ad6d4e4b2fe99ef11967a36e5e4",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 72,
"avg_line_length": 38.958333333333336,
"alnum_prop": 0.5358288770053476,
"repo_name": "biocore/qiita",
"id": "8ebbab746d235c7c7255b3ff7a63f9857a1659cf",
"size": "1107",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "qiita_db/support_files/patches/python_patches/61.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2334"
},
{
"name": "HTML",
"bytes": "552473"
},
{
"name": "JavaScript",
"bytes": "93567"
},
{
"name": "Makefile",
"bytes": "6838"
},
{
"name": "PLpgSQL",
"bytes": "84875"
},
{
"name": "Python",
"bytes": "2469738"
},
{
"name": "SQLPL",
"bytes": "2805"
},
{
"name": "Shell",
"bytes": "3232"
},
{
"name": "TSQL",
"bytes": "202297"
}
],
"symlink_target": ""
}
|
from subprocess import check_call, CalledProcessError
# This file contains the class Task, meant to encapsulate a given task as configured.
__author__ = 'Sander Krause <sanderkrause@gmail.com>'
__author__ = 'Roel van Nuland <roel@kompjoefriek.nl>'
class Task:
# Encapsulate a task as configured in the configuration file.
name = None
command = None
parameters = None
output = None
no_name = 0
def __init__(self, name, command, parameters=[], output=None):
if name is None:
Task.no_name += 1
self.name = "No Name {}".format(Task.no_name)
else:
self.name = name
self.command = command
self.parameters = parameters
self.output = output
def get_name(self):
return self.name
def get_command(self):
return self.command
def get_parameters(self):
return self.parameters
def get_output(self):
return self.output
def run(self):
# TODO: handle input / output redirects properly
command_and_parameters = [self.command]
command_and_parameters.extend(self.parameters)
try:
check_call(command_and_parameters, shell=True)
except (OSError, IOError) as e:
print("Error in task({}): {}".format(self.name, e.strerror))
except CalledProcessError as e:
print("Error in task({}): exit code {}".format(self.name, e.returncode))
def __repr__(self):
import json
return "Task {{\n name:\"{}\",\n command: \"{}\",\n parameters: {}\n output: {},\n}}" \
.format(self.name, self.command, json.dumps(self.parameters), self.output)
|
{
"content_hash": "85d3570ef63ee86ee18d13c06e54cc62",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 99,
"avg_line_length": 30.69090909090909,
"alnum_prop": 0.6048578199052133,
"repo_name": "kompjoefriek/Simr",
"id": "874d1386d971da3fecd475787708ccca4b2a1e67",
"size": "1688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simr/Configuration/Task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17815"
}
],
"symlink_target": ""
}
|
"""API routes for tokens."""
from flask import jsonify
from ceraon.auth.basic_auth import BasicAuth
from ceraon.user.models import User
from ceraon.utils import RESTBlueprint
from .schema import TokenSchema
blueprint = RESTBlueprint('token', __name__, version='v1')
TOKEN_SCHEMA = TokenSchema()
@blueprint.list()
@BasicAuth.login_required
def get_token():
"""Get the token for a user."""
current_user = User.query.filter_by(email=BasicAuth.email()).first()
if current_user:
token = current_user.get_auth_token()
return jsonify(data=TOKEN_SCHEMA.dump({'token': token}).data)
else:
return jsonify(data=TOKEN_SCHEMA.dump('').data)
|
{
"content_hash": "e3b6ae5a0686e05c37f4ee7849573a82",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 72,
"avg_line_length": 26,
"alnum_prop": 0.7026627218934911,
"repo_name": "Rdbaker/Mealbound",
"id": "e6375dbcf345712b2ace79e1575caa96ad2a46d4",
"size": "676",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceraon/api/v1/token/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "758133"
},
{
"name": "HTML",
"bytes": "21264"
},
{
"name": "JavaScript",
"bytes": "747474"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "200911"
},
{
"name": "Shell",
"bytes": "210"
},
{
"name": "TypeScript",
"bytes": "166005"
}
],
"symlink_target": ""
}
|
from rdkit.Chem.FeatMaps.FeatMapPoint import FeatMapPoint
import math
class FeatMapScoreMode(object):
All = 0
""" score each feature in the probe against every matching
feature in the FeatMap.
"""
Closest = 1
""" score each feature in the probe against the closest
matching feature in the FeatMap.
"""
Best = 2
""" score each feature in the probe against the matching
feature in the FeatMap that leads to the highest score
"""
class FeatDirScoreMode(object):
Ignore = 0
""" ignore feature directions
"""
DotFullRange = 1
""" Use the dot product and allow negative contributions when
directions are anti-parallel.
e.g. score = dot(f1Dir,f2Dir)
"""
DotPosRange = 2
""" Use the dot product and scale contributions to lie between
zero and one.
e.g. score = ( dot(f1Dir,f2Dir) + 1 ) / 2
"""
class FeatMapParams(object):
""" one of these should be instantiated for each
feature type in the feature map
"""
radius = 2.5
" cutoff radius "
width = 1.0
" width parameter (e.g. the gaussian sigma) "
class FeatProfile(object):
" scoring profile of the feature "
Gaussian = 0
Triangle = 1
Box = 2
featProfile = FeatProfile.Gaussian
class FeatMap(object):
dirScoreMode = FeatDirScoreMode.Ignore
scoreMode = FeatMapScoreMode.All
params = {}
def __init__(self, params=None, feats=None, weights=None):
if params:
self.params = params
self._initializeFeats(feats, weights)
def _initializeFeats(self, feats, weights):
self._feats = []
if feats:
if len(feats) != len(weights):
raise ValueError('feats and weights lists must be the same length')
for feat, weight in zip(feats, weights):
self.AddFeature(feat, weight)
def AddFeature(self, feat, weight=None):
if self.params and not feat.GetFamily() in self.params:
raise ValueError('feature family %s not found in params' % feat.GetFamily())
newFeat = FeatMapPoint()
newFeat.initFromFeat(feat)
newFeat.weight = weight
self.AddFeatPoint(newFeat)
def AddFeatPoint(self, featPt):
if not isinstance(featPt, FeatMapPoint):
raise ValueError('addFeatPoint() must be called with a FeatMapPoint instance')
if self.params and not featPt.GetFamily() in self.params:
raise ValueError('feature family %s not found in params' % featPt.GetFamily())
self._feats.append(featPt)
def GetFeatures(self):
return self._feats
def GetNumFeatures(self):
return len(self._feats)
def GetFeature(self, i):
return self._feats[i]
def DropFeature(self, i):
del self._feats[i]
def _loopOverMatchingFeats(self, oFeat):
for sIdx, sFeat in enumerate(self._feats):
if sFeat.GetFamily() == oFeat.GetFamily():
yield sIdx, sFeat
def GetFeatFeatScore(self, feat1, feat2, typeMatch=True):
""" feat1 is one of our feats
feat2 is any Feature
"""
if typeMatch and feat1.GetFamily() != feat2.GetFamily():
return 0.0
d2 = feat1.GetDist2(feat2)
params = self.params[feat1.GetFamily()]
if d2 > params.radius * params.radius:
return 0.0
if params.featProfile == FeatMapParams.FeatProfile.Gaussian:
score = math.exp(-d2 / params.width)
elif params.featProfile == FeatMapParams.FeatProfile.Triangle:
d = math.sqrt(d2)
if d < params.width:
score = 1. - d / params.width
else:
score = 0.0
elif params.featProfile == FeatMapParams.FeatProfile.Box:
score = 1.0
score *= feat1.weight
if self.dirScoreMode != FeatDirScoreMode.Ignore:
dirScore = feat1.GetDirMatch(feat2)
if self.dirScoreMode == FeatDirScoreMode.DotPosRange:
dirScore = (dirScore + 1.0) / 2.0
elif self.dirScoreMode != FeatDirScoreMode.DotFullRange:
raise NotImplementedError('bad feature dir score mode')
score *= dirScore
return score
def ScoreFeats(self, featsToScore, mapScoreVect=None, featsScoreVect=None, featsToFeatMapIdx=None):
nFeats = len(self._feats)
if mapScoreVect is not None:
if len(mapScoreVect) != nFeats:
raise ValueError('if provided, len(mapScoreVect) should equal numFeats')
for i in range(nFeats):
mapScoreVect[i] = 0.0
else:
mapScoreVect = [0.0] * nFeats
nToScore = len(featsToScore)
if self.scoreMode == FeatMapScoreMode.Closest:
defScore = 1000.0
else:
defScore = 0.0
if featsScoreVect is not None:
if len(featsScoreVect) != nToScore:
raise ValueError('if provided, len(featsScoreVect) should equal len(featsToScore)')
for i in range(nToScore):
featsScoreVect[i] = defScore
else:
featsScoreVect = [defScore] * nToScore
if featsToFeatMapIdx is not None: # Initialize a 2D-empty array
if len(featsToFeatMapIdx) != nToScore:
raise ValueError('if provided, len(featsToFeatMapIdx) should equal len(featsToScore)')
else:
featsToFeatMapIdx = [None] * nToScore
for i in range(nToScore):
if self.scoreMode != FeatMapScoreMode.All:
featsToFeatMapIdx[i] = [-1]
else:
featsToFeatMapIdx[i] = []
for oIdx, oFeat in enumerate(featsToScore):
for sIdx, sFeat in self._loopOverMatchingFeats(oFeat):
if self.scoreMode == FeatMapScoreMode.Closest:
d = sFeat.GetDist2(oFeat)
if d < featsScoreVect[oIdx]:
featsScoreVect[oIdx] = d
featsToFeatMapIdx[oIdx][0] = sIdx
else:
lScore = self.GetFeatFeatScore(sFeat, oFeat, typeMatch=False)
if self.scoreMode == FeatMapScoreMode.Best:
if lScore > featsScoreVect[oIdx]:
featsScoreVect[oIdx] = lScore
featsToFeatMapIdx[oIdx][0] = sIdx
elif self.scoreMode == FeatMapScoreMode.All:
featsScoreVect[oIdx] += lScore
mapScoreVect[sIdx] += lScore
featsToFeatMapIdx[oIdx].append(sIdx)
else:
raise ValueError('bad score mode')
totScore = 0.0
if self.scoreMode == FeatMapScoreMode.Closest:
for oIdx, oFeat in enumerate(featsToScore):
sIdx = featsToFeatMapIdx[oIdx][0]
if sIdx > -1:
lScore = self.GetFeatFeatScore(sFeat, oFeat, typeMatch=False)
featsScoreVect[oIdx] = lScore
mapScoreVect[sIdx] = lScore
totScore += lScore
else:
featsScoreVect[oIdx] = 0
else:
totScore = sum(featsScoreVect)
if self.scoreMode == FeatMapScoreMode.Best:
for oIdx, lScore in enumerate(featsScoreVect):
sIdx = featsToFeatMapIdx[oIdx][0]
if sIdx > -1:
mapScoreVect[sIdx] = lScore
# replace placeholders:
if self.scoreMode != FeatMapScoreMode.All:
for elem in featsToFeatMapIdx:
if elem == [-1]:
elem.pop()
return totScore
def __str__(self):
res = ''
for i, feat in enumerate(self._feats):
weight = feat.weight
pos = feat.GetPos()
res += '% 3d % 12s % 6.4f % 6.4f % 6.4f % 6.4f\n' % (i + 1, feat.GetFamily(), pos.x, pos.y,
pos.z, weight)
return res
|
{
"content_hash": "c25cf11939d9b42e90b26f42b73d6823",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 101,
"avg_line_length": 30.846153846153847,
"alnum_prop": 0.6388196176226102,
"repo_name": "rdkit/rdkit",
"id": "c2f6f28a0f0c504e8b14b81117e6a79cc6234dae",
"size": "7483",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "rdkit/Chem/FeatMaps/FeatMaps.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1595174"
},
{
"name": "C#",
"bytes": "10167"
},
{
"name": "C++",
"bytes": "13855391"
},
{
"name": "CMake",
"bytes": "761863"
},
{
"name": "Dockerfile",
"bytes": "2590"
},
{
"name": "Fortran",
"bytes": "7590"
},
{
"name": "HTML",
"bytes": "43059702"
},
{
"name": "Java",
"bytes": "369457"
},
{
"name": "JavaScript",
"bytes": "54009"
},
{
"name": "Jupyter Notebook",
"bytes": "498341"
},
{
"name": "LLVM",
"bytes": "40048"
},
{
"name": "Lex",
"bytes": "4508"
},
{
"name": "Makefile",
"bytes": "10862"
},
{
"name": "Python",
"bytes": "4157348"
},
{
"name": "QMake",
"bytes": "389"
},
{
"name": "SMT",
"bytes": "3010"
},
{
"name": "SWIG",
"bytes": "342569"
},
{
"name": "Shell",
"bytes": "3822"
},
{
"name": "Smarty",
"bytes": "5864"
},
{
"name": "Yacc",
"bytes": "61677"
}
],
"symlink_target": ""
}
|
"""
This bot is used for checking external links found at the wiki.
It checks several pages at once, with a limit set by the config variable
max_external_links, which defaults to 50.
The bot won't change any wiki pages, it will only report dead links such that
people can fix or remove the links themselves.
The bot will store all links found dead in a .dat file in the deadlinks
subdirectory. To avoid the removing of links which are only temporarily
unavailable, the bot ONLY reports links which were reported dead at least
two times, with a time lag of at least one week. Such links will be logged to a
.txt file in the deadlinks subdirectory.
After running the bot and waiting for at least one week, you can re-check those
pages where dead links were found, using the -repeat parameter.
In addition to the logging step, it is possible to automatically report dead
links to the talk page of the article where the link was found. To use this
feature, set report_dead_links_on_talk = True in your user-config.py, or
specify "-talk" on the command line. Adding "-notalk" switches this off
irrespective of the configuration variable.
When a link is found alive, it will be removed from the .dat file.
These command line parameters can be used to specify which pages to work on:
¶ms;
-repeat Work on all pages were dead links were found before. This is
useful to confirm that the links are dead after some time (at
least one week), which is required before the script will report
the problem.
-namespace Only process templates in the namespace with the given number or
name. This parameter may be used multiple times.
-xml Should be used instead of a simple page fetching method from
pagegenerators.py for performance and load issues
-xmlstart Page to start with when using an XML dump
-ignore HTTP return codes to ignore. Can be provided several times :
-ignore:401 -ignore:500
Furthermore, the following command line parameters are supported:
-talk Overrides the report_dead_links_on_talk config variable, enabling
the feature.
-notalk Overrides the report_dead_links_on_talk config variable, disabling
the feature.
-day the first time found dead link longer than x day ago, it should
probably be fixed or removed. if no set, default is 7 day.
The following config variables are supported:
max_external_links - The maximum number of web pages that should be
loaded simultaneously. You should change this
according to your Internet connection speed.
Be careful: if it is set too high, the script
might get socket errors because your network
is congested, and will then think that the page
is offline.
report_dead_links_on_talk - If set to true, causes the script to report dead
links on the article's talk page if (and ONLY if)
the linked page has been unavailable at least two
times during a timespan of at least one week.
Syntax examples:
python weblinkchecker.py -start:!
Loads all wiki pages in alphabetical order using the Special:Allpages
feature.
python weblinkchecker.py -start:Example_page
Loads all wiki pages using the Special:Allpages feature, starting at
"Example page"
python weblinkchecker.py -weblink:www.example.org
Loads all wiki pages that link to www.example.org
python weblinkchecker.py Example page
Only checks links found in the wiki page "Example page"
python weblinkchecker.py -repeat
Loads all wiki pages where dead links were found during a prior run
"""
#
# (C) Daniel Herding, 2005
# (C) Pywikibot team, 2005-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id: 175b8a2172b1f42d94c74249b53e2403282af935 $'
import re
import codecs
import pickle
import socket
import threading
import time
import sys
import pywikibot
from pywikibot import i18n, config, pagegenerators, textlib, xmlreader, weblib
# TODO: Convert to httlib2
if sys.version_info[0] > 2:
import urllib.parse as urlparse
import urllib.request as urllib
import http.client as httplib
basestring = (str, )
unicode = str
else:
import urlparse
import urllib
import httplib
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp
}
ignorelist = [
# Officially reserved for testing, documentation, etc. in
# https://tools.ietf.org/html/rfc2606#page-2
# top-level domains:
re.compile(r'.*[\./@]test(/.*)?'),
re.compile(r'.*[\./@]example(/.*)?'),
re.compile(r'.*[\./@]invalid(/.*)?'),
re.compile(r'.*[\./@]localhost(/.*)?'),
# second-level domains:
re.compile(r'.*[\./@]example\.com(/.*)?'),
re.compile(r'.*[\./@]example\.net(/.*)?'),
re.compile(r'.*[\./@]example\.org(/.*)?'),
# Other special cases
# bot somehow can't handle their redirects:
re.compile(r'.*[\./@]gso\.gbv\.de(/.*)?'),
re.compile(r'.*[\./@]berlinonline\.de(/.*)?'),
# above entry to be manually fixed per request at [[de:Benutzer:BLueFiSH.as/BZ]]
# bot can't handle their redirects:
re.compile(r'.*[\./@]bodo\.kommune\.no(/.*)?'),
re.compile(r'.*[\./@]jpl\.nasa\.gov(/.*)?'), # bot rejected on the site
re.compile(r'.*[\./@]itis\.gov(/.*)?'), # bot rejected on the site
re.compile(r'.*[\./@]cev\.lu(/.*)?'), # bot rejected on the site
# very slow response resulting in bot error:
re.compile(r'.*[\./@]science\.ksc\.nasa\.gov(/.*)?'),
re.compile(r'.*[\./@]britannica\.com(/.*)?'), # HTTP redirect loop
# bot rejected on the site:
re.compile(r'.*[\./@]quickfacts\.census\.gov(/.*)?'),
]
def weblinksIn(text, withoutBracketed=False, onlyBracketed=False):
"""
Yield web links from text.
TODO: move to textlib
"""
text = textlib.removeDisabledParts(text)
# MediaWiki parses templates before parsing external links. Thus, there
# might be a | or a } directly after a URL which does not belong to
# the URL itself.
# First, remove the curly braces of inner templates:
nestedTemplateR = re.compile(r'{{([^}]*?){{(.*?)}}(.*?)}}')
while nestedTemplateR.search(text):
text = nestedTemplateR.sub(r'{{\1 \2 \3}}', text)
# Then blow up the templates with spaces so that the | and }} will not
# be regarded as part of the link:.
templateWithParamsR = re.compile(r'{{([^}]*?[^ ])\|([^ ][^}]*?)}}',
re.DOTALL)
while templateWithParamsR.search(text):
text = templateWithParamsR.sub(r'{{ \1 | \2 }}', text)
# Add <blank> at the end of a template
# URL as last param of multiline template would not be correct
text = text.replace('}}', ' }}')
# Remove HTML comments in URLs as well as URLs in HTML comments.
# Also remove text inside nowiki links etc.
text = textlib.removeDisabledParts(text)
linkR = textlib.compileLinkR(withoutBracketed, onlyBracketed)
for m in linkR.finditer(text):
if m.group('url'):
yield m.group('url')
else:
yield m.group('urlb')
class XmlDumpPageGenerator:
"""Xml generator that yiels pages containing a web link."""
def __init__(self, xmlFilename, xmlStart, namespaces):
self.xmlStart = xmlStart
self.namespaces = namespaces
self.skipping = bool(xmlStart)
self.site = pywikibot.Site()
dump = xmlreader.XmlDump(xmlFilename)
self.parser = dump.parse()
def __iter__(self):
return self
def next(self):
try:
for entry in self.parser:
if self.skipping:
if entry.title != self.xmlStart:
continue
self.skipping = False
page = pywikibot.Page(self.site, entry.title)
if self.namespaces:
if page.namespace() not in self.namespaces:
continue
found = False
for url in weblinksIn(entry.text):
found = True
if found:
return page
except KeyboardInterrupt:
try:
if not self.skipping:
pywikibot.output(
u'To resume, use "-xmlstart:%s" on the command line.'
% entry.title)
except NameError:
pass
__next__ = next
class NotAnURLError(BaseException):
"""The link is not an URL."""
class LinkChecker(object):
"""
Check links.
Given a HTTP URL, tries to load the page from the Internet and checks if it
is still online.
Returns a (boolean, string) tuple saying if the page is online and including
a status reason.
Warning: Also returns false if your Internet connection isn't working
correctly! (This will give a Socket Error)
"""
def __init__(self, url, redirectChain=[], serverEncoding=None,
HTTPignore=[]):
"""
Constructor.
redirectChain is a list of redirects which were resolved by
resolveRedirect(). This is needed to detect redirect loops.
"""
self.url = url
self.serverEncoding = serverEncoding
self.header = {
# 'User-agent': pywikibot.useragent,
# we fake being Firefox because some webservers block unknown
# clients, e.g. https://images.google.de/images?q=Albit gives a 403
# when using the PyWikipediaBot user agent.
'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; de; rv:1.8) Gecko/20051128 SUSE/1.5-0.1 Firefox/1.5',
'Accept': 'text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5',
'Accept-Language': 'de-de,de;q=0.8,en-us;q=0.5,en;q=0.3',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Keep-Alive': '30',
'Connection': 'keep-alive',
}
self.redirectChain = redirectChain + [url]
self.changeUrl(url)
self.HTTPignore = HTTPignore
def getConnection(self):
if self.scheme == 'http':
return httplib.HTTPConnection(self.host)
elif self.scheme == 'https':
return httplib.HTTPSConnection(self.host)
else:
raise NotAnURLError(self.url)
def getEncodingUsedByServer(self):
if not self.serverEncoding:
try:
pywikibot.output(
u'Contacting server %s to find out its default encoding...'
% self.host)
conn = self.getConnection()
conn.request('HEAD', '/', None, self.header)
self.response = conn.getresponse()
self.readEncodingFromResponse(self.response)
except:
pass
if not self.serverEncoding:
# TODO: We might also load a page, then check for an encoding
# definition in a HTML meta tag.
pywikibot.output(u'Error retrieving server\'s default charset. '
u'Using ISO 8859-1.')
# most browsers use ISO 8859-1 (Latin-1) as the default.
self.serverEncoding = 'iso8859-1'
return self.serverEncoding
def readEncodingFromResponse(self, response):
if not self.serverEncoding:
try:
ct = response.getheader('Content-Type')
charsetR = re.compile('charset=(.+)')
charset = charsetR.search(ct).group(1)
self.serverEncoding = charset
except:
pass
def changeUrl(self, url):
self.url = url
# we ignore the fragment
(self.scheme, self.host, self.path, self.query,
self.fragment) = urlparse.urlsplit(self.url)
if not self.path:
self.path = '/'
if self.query:
self.query = '?' + self.query
self.protocol = url.split(':', 1)[0]
# check if there are non-ASCII characters inside path or query, and if
# so, encode them in an encoding that hopefully is the right one.
try:
self.path.encode('ascii')
self.query.encode('ascii')
except UnicodeEncodeError:
encoding = self.getEncodingUsedByServer()
self.path = unicode(urllib.quote(self.path.encode(encoding)))
self.query = unicode(urllib.quote(self.query.encode(encoding), '=&'))
def resolveRedirect(self, useHEAD=False):
"""
Return the redirect target URL as a string, if it is a HTTP redirect.
If useHEAD is true, uses the HTTP HEAD method, which saves bandwidth
by not downloading the body. Otherwise, the HTTP GET method is used.
@rtype: unicode or None
"""
conn = self.getConnection()
try:
if useHEAD:
conn.request('HEAD', '%s%s' % (self.path, self.query), None,
self.header)
else:
conn.request('GET', '%s%s' % (self.path, self.query), None,
self.header)
self.response = conn.getresponse()
# read the server's encoding, in case we need it later
self.readEncodingFromResponse(self.response)
except httplib.BadStatusLine:
# Some servers don't seem to handle HEAD requests properly,
# e.g. http://www.radiorus.ru/ which is running on a very old
# Apache server. Using GET instead works on these (but it uses
# more bandwidth).
if useHEAD:
return self.resolveRedirect(useHEAD=False)
else:
raise
if self.response.status >= 300 and self.response.status <= 399:
# to debug, print response.getheaders()
redirTarget = self.response.getheader('Location')
if redirTarget:
try:
redirTarget.encode('ascii')
except UnicodeError:
redirTarget = redirTarget.decode(
self.getEncodingUsedByServer())
if redirTarget.startswith('http://') or \
redirTarget.startswith('https://'):
self.changeUrl(redirTarget)
return True
elif redirTarget.startswith('/'):
self.changeUrl(u'%s://%s%s'
% (self.protocol, self.host, redirTarget))
return True
else: # redirect to relative position
# cut off filename
directory = self.path[:self.path.rindex('/') + 1]
# handle redirect to parent directory
while redirTarget.startswith('../'):
redirTarget = redirTarget[3:]
# some servers redirect to .. although we are already
# in the root directory; ignore this.
if directory != '/':
# change /foo/bar/ to /foo/
directory = directory[:-1]
directory = directory[:directory.rindex('/') + 1]
self.changeUrl('%s://%s%s%s'
% (self.protocol, self.host, directory,
redirTarget))
return True
else:
return False # not a redirect
def check(self, useHEAD=False):
"""
Return True and the server status message if the page is alive.
@rtype: tuple of (bool, unicode)
"""
try:
wasRedirected = self.resolveRedirect(useHEAD=useHEAD)
except UnicodeError as error:
return False, u'Encoding Error: %s (%s)' % (
error.__class__.__name__, error)
except httplib.error as error:
return False, u'HTTP Error: %s' % error.__class__.__name__
except socket.error as error:
# https://docs.python.org/2/library/socket.html :
# socket.error :
# The accompanying value is either a string telling what went
# wrong or a pair (errno, string) representing an error
# returned by a system call, similar to the value
# accompanying os.error
if isinstance(error, basestring):
msg = error
else:
try:
msg = error[1]
except IndexError:
print(u'### DEBUG information for #2972249')
raise IndexError(type(error))
# TODO: decode msg. On Linux, it's encoded in UTF-8.
# How is it encoded in Windows? Or can we somehow just
# get the English message?
return False, u'Socket Error: %s' % repr(msg)
if wasRedirected:
if self.url in self.redirectChain:
if useHEAD:
# Some servers don't seem to handle HEAD requests properly,
# which leads to a cyclic list of redirects.
# We simply start from the beginning, but this time,
# we don't use HEAD, but GET requests.
redirChecker = LinkChecker(
self.redirectChain[0],
serverEncoding=self.serverEncoding,
HTTPignore=self.HTTPignore)
return redirChecker.check(useHEAD=False)
else:
urlList = ['[%s]' % url
for url in self.redirectChain + [self.url]]
return (False,
u'HTTP Redirect Loop: %s' % ' -> '.join(urlList))
elif len(self.redirectChain) >= 19:
if useHEAD:
# Some servers don't seem to handle HEAD requests properly,
# which leads to a long (or infinite) list of redirects.
# We simply start from the beginning, but this time,
# we don't use HEAD, but GET requests.
redirChecker = LinkChecker(
self.redirectChain[0],
serverEncoding=self.serverEncoding,
HTTPignore=self.HTTPignore)
return redirChecker.check(useHEAD=False)
else:
urlList = ['[%s]' % url
for url in self.redirectChain + [self.url]]
return (False,
u'Long Chain of Redirects: %s'
% ' -> '.join(urlList))
else:
redirChecker = LinkChecker(self.url, self.redirectChain,
self.serverEncoding,
HTTPignore=self.HTTPignore)
return redirChecker.check(useHEAD=useHEAD)
else:
try:
conn = self.getConnection()
except httplib.error as error:
return False, u'HTTP Error: %s' % error.__class__.__name__
try:
conn.request('GET', '%s%s'
% (self.path, self.query), None, self.header)
except socket.error as error:
return False, u'Socket Error: %s' % repr(error[1])
try:
self.response = conn.getresponse()
except Exception as error:
return False, u'Error: %s' % error
# read the server's encoding, in case we need it later
self.readEncodingFromResponse(self.response)
# site down if the server status is between 400 and 499
alive = self.response.status not in range(400, 500)
if self.response.status in self.HTTPignore:
alive = False
return alive, '%s %s' % (self.response.status, self.response.reason)
class LinkCheckThread(threading.Thread):
"""A thread responsible for checking one URL.
After checking the page, it will die.
"""
def __init__(self, page, url, history, HTTPignore, day):
threading.Thread.__init__(self)
self.page = page
self.url = url
self.history = history
# identification for debugging purposes
self.setName((u'%s - %s' % (page.title(), url)).encode('utf-8',
'replace'))
self.HTTPignore = HTTPignore
self.day = day
def run(self):
linkChecker = LinkChecker(self.url, HTTPignore=self.HTTPignore)
try:
ok, message = linkChecker.check()
except NotAnURLError:
ok = False
message = i18n.twtranslate(self.page.site,
'weblinkchecker-badurl_msg',
{'URL': self.url})
except:
pywikibot.output('Exception while processing URL %s in page %s'
% (self.url, self.page.title()))
raise
if ok:
if self.history.setLinkAlive(self.url):
pywikibot.output('*Link to %s in [[%s]] is back alive.'
% (self.url, self.page.title()))
else:
pywikibot.output('*[[%s]] links to %s - %s.'
% (self.page.title(), self.url, message))
self.history.setLinkDead(self.url, message, self.page, self.day)
class History:
"""
Store previously found dead links.
The URLs are dictionary keys, and
values are lists of tuples where each tuple represents one time the URL was
found dead. Tuples have the form (title, date, error) where title is the
wiki page where the URL was found, date is an instance of time, and error is
a string with error code and message.
We assume that the first element in the list represents the first time we
found this dead link, and the last element represents the last time.
Example:
dict = {
'https://www.example.org/page': [
('WikiPageTitle', DATE, '404: File not found'),
('WikiPageName2', DATE, '404: File not found'),
]
"""
def __init__(self, reportThread):
self.reportThread = reportThread
self.site = pywikibot.Site()
self.semaphore = threading.Semaphore()
self.datfilename = pywikibot.config.datafilepath(
'deadlinks', 'deadlinks-%s-%s.dat' % (self.site.family.name, self.site.code))
# Count the number of logged links, so that we can insert captions
# from time to time
self.logCount = 0
try:
with open(self.datfilename, 'rb') as datfile:
self.historyDict = pickle.load(datfile)
except (IOError, EOFError):
# no saved history exists yet, or history dump broken
self.historyDict = {}
def log(self, url, error, containingPage, archiveURL):
"""Log an error report to a text file in the deadlinks subdirectory."""
if archiveURL:
errorReport = u'* %s ([%s archive])\n' % (url, archiveURL)
else:
errorReport = u'* %s\n' % url
for (pageTitle, date, error) in self.historyDict[url]:
# ISO 8601 formulation
isoDate = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(date))
errorReport += "** In [[%s]] on %s, %s\n" % (pageTitle, isoDate,
error)
pywikibot.output(u"** Logging link for deletion.")
txtfilename = pywikibot.config.datafilepath('deadlinks',
'results-%s-%s.txt'
% (self.site.family.name,
self.site.lang))
txtfile = codecs.open(txtfilename, 'a', 'utf-8')
self.logCount += 1
if self.logCount % 30 == 0:
# insert a caption
txtfile.write('=== %s ===\n' % containingPage.title()[:3])
txtfile.write(errorReport)
txtfile.close()
if self.reportThread and not containingPage.isTalkPage():
self.reportThread.report(url, errorReport, containingPage,
archiveURL)
def setLinkDead(self, url, error, page, day):
"""Add the fact that the link was found dead to the .dat file."""
self.semaphore.acquire()
now = time.time()
if url in self.historyDict:
timeSinceFirstFound = now - self.historyDict[url][0][1]
timeSinceLastFound = now - self.historyDict[url][-1][1]
# if the last time we found this dead link is less than an hour
# ago, we won't save it in the history this time.
if timeSinceLastFound > 60 * 60:
self.historyDict[url].append((page.title(), now, error))
# if the first time we found this link longer than x day ago
# (default is a week), it should probably be fixed or removed.
# We'll list it in a file so that it can be removed manually.
if timeSinceFirstFound > 60 * 60 * 24 * day:
# search for archived page
archiveURL = weblib.getInternetArchiveURL(url)
if archiveURL is None:
archiveURL = weblib.getWebCitationURL(url)
self.log(url, error, page, archiveURL)
else:
self.historyDict[url] = [(page.title(), now, error)]
self.semaphore.release()
def setLinkAlive(self, url):
"""
Record that the link is now alive.
If link was previously found dead, remove it from the .dat file.
@return: True if previously found dead, else returns False.
"""
if url in self.historyDict:
self.semaphore.acquire()
try:
del self.historyDict[url]
except KeyError:
# Not sure why this can happen, but I guess we can ignore this.
pass
self.semaphore.release()
return True
else:
return False
def save(self):
"""Save the .dat file to disk."""
with open(self.datfilename, 'wb') as f:
pickle.dump(self.historyDict, f, protocol=config.pickle_protocol)
class DeadLinkReportThread(threading.Thread):
"""
A Thread that is responsible for posting error reports on talk pages.
There is only one DeadLinkReportThread, and it is using a semaphore to make
sure that two LinkCheckerThreads can not access the queue at the same time.
"""
def __init__(self):
threading.Thread.__init__(self)
self.semaphore = threading.Semaphore()
self.queue = []
self.finishing = False
self.killed = False
def report(self, url, errorReport, containingPage, archiveURL):
"""Report error on talk page of the page containing the dead link."""
self.semaphore.acquire()
self.queue.append((url, errorReport, containingPage, archiveURL))
self.semaphore.release()
def shutdown(self):
self.finishing = True
def kill(self):
# TODO: remove if unneeded
self.killed = True
def run(self):
while not self.killed:
if len(self.queue) == 0:
if self.finishing:
break
else:
time.sleep(0.1)
else:
self.semaphore.acquire()
(url, errorReport, containingPage, archiveURL) = self.queue[0]
self.queue = self.queue[1:]
talkPage = containingPage.toggleTalkPage()
pywikibot.output(
u'\03{lightaqua}** Reporting dead link on %s...\03{default}'
% talkPage.title(asLink=True))
try:
content = talkPage.get() + "\n\n"
if url in content:
pywikibot.output(
u'\03{lightaqua}** Dead link seems to have already '
u'been reported on %s\03{default}'
% talkPage.title(asLink=True))
self.semaphore.release()
continue
except (pywikibot.NoPage, pywikibot.IsRedirectPage):
content = u''
if archiveURL:
archiveMsg = u'\n' + \
i18n.twtranslate(containingPage.site,
'weblinkchecker-archive_msg',
{'URL': archiveURL})
else:
archiveMsg = u''
# The caption will default to "Dead link". But if there is
# already such a caption, we'll use "Dead link 2",
# "Dead link 3", etc.
caption = i18n.twtranslate(containingPage.site,
'weblinkchecker-caption')
i = 1
count = u''
# Check if there is already such a caption on the talk page.
while re.search('= *%s%s *=' % (caption, count),
content) is not None:
i += 1
count = u' ' + str(i)
caption += count
content += '\n\n== %s ==\n\n%s\n\n%s%s--~~~~' % \
(caption,
i18n.twtranslate(containingPage.site,
'weblinkchecker-report'),
errorReport,
archiveMsg)
comment = u'[[%s#%s|→]] %s' % \
(talkPage.title(), caption,
i18n.twtranslate(containingPage.site,
'weblinkchecker-summary'))
try:
talkPage.put(content, comment)
except pywikibot.SpamfilterError as error:
pywikibot.output(
u'\03{lightaqua}** SpamfilterError while trying to '
u'change %s: %s\03{default}'
% (talkPage.title(asLink=True), error.url))
self.semaphore.release()
class WeblinkCheckerRobot:
"""
Bot which will search for dead weblinks.
It uses several LinkCheckThreads at once to process pages from generator.
"""
def __init__(self, generator, HTTPignore=None, day=7):
self.generator = generator
if config.report_dead_links_on_talk:
pywikibot.log("Starting talk page thread")
reportThread = DeadLinkReportThread()
# thread dies when program terminates
# reportThread.setDaemon(True)
reportThread.start()
else:
reportThread = None
self.history = History(reportThread)
if HTTPignore is None:
self.HTTPignore = []
else:
self.HTTPignore = HTTPignore
self.day = day
def run(self):
for page in self.generator:
self.checkLinksIn(page)
def checkLinksIn(self, page):
try:
text = page.get()
except pywikibot.NoPage:
pywikibot.output(u'%s does not exist.' % page.title())
return
for url in weblinksIn(text):
ignoreUrl = False
for ignoreR in ignorelist:
if ignoreR.match(url):
ignoreUrl = True
if not ignoreUrl:
# Limit the number of threads started at the same time. Each
# thread will check one page, then die.
while threading.activeCount() >= config.max_external_links:
# wait 100 ms
time.sleep(0.1)
thread = LinkCheckThread(page, url, self.history,
self.HTTPignore, self.day)
# thread dies when program terminates
thread.setDaemon(True)
thread.start()
def RepeatPageGenerator():
"""Generator for pages in History."""
history = History(None)
pageTitles = set()
for value in history.historyDict.values():
for entry in value:
pageTitles.add(entry[0])
for pageTitle in sorted(pageTitles):
page = pywikibot.Page(pywikibot.Site(), pageTitle)
yield page
def countLinkCheckThreads():
"""
Count LinkCheckThread threads.
@return: number of LinkCheckThread threads
@rtype: int
"""
i = 0
for thread in threading.enumerate():
if isinstance(thread, LinkCheckThread):
i += 1
return i
def check(url):
"""Peform a check on URL."""
c = LinkChecker(url)
return c.check()
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
gen = None
xmlFilename = None
HTTPignore = []
day = 7
# Process global args and prepare generator args parser
local_args = pywikibot.handle_args(args)
genFactory = pagegenerators.GeneratorFactory()
for arg in local_args:
if arg == '-talk':
config.report_dead_links_on_talk = True
elif arg == '-notalk':
config.report_dead_links_on_talk = False
elif arg == '-repeat':
gen = RepeatPageGenerator()
elif arg.startswith('-ignore:'):
HTTPignore.append(int(arg[8:]))
elif arg.startswith('-day:'):
day = int(arg[5:])
elif arg.startswith('-xmlstart'):
if len(arg) == 9:
xmlStart = pywikibot.input(
u'Please enter the dumped article to start with:')
else:
xmlStart = arg[10:]
elif arg.startswith('-xml'):
if len(arg) == 4:
xmlFilename = i18n.input('pywikibot-enter-xml-filename')
else:
xmlFilename = arg[5:]
else:
genFactory.handleArg(arg)
if xmlFilename:
try:
xmlStart
except NameError:
xmlStart = None
gen = XmlDumpPageGenerator(xmlFilename, xmlStart, genFactory.namespaces)
if not gen:
gen = genFactory.getCombinedGenerator()
if gen:
# fetch at least 240 pages simultaneously from the wiki, but more if
# a high thread number is set.
pageNumber = max(240, config.max_external_links * 2)
gen = pagegenerators.PreloadingGenerator(gen, step=pageNumber)
gen = pagegenerators.RedirectFilterPageGenerator(gen)
bot = WeblinkCheckerRobot(gen, HTTPignore, day)
try:
bot.run()
finally:
waitTime = 0
# Don't wait longer than 30 seconds for threads to finish.
while countLinkCheckThreads() > 0 and waitTime < 30:
try:
pywikibot.output(u"Waiting for remaining %i threads to "
u"finish, please wait..."
% countLinkCheckThreads())
# wait 1 second
time.sleep(1)
waitTime += 1
except KeyboardInterrupt:
pywikibot.output(u'Interrupted.')
break
if countLinkCheckThreads() > 0:
pywikibot.output(u'Remaining %i threads will be killed.'
% countLinkCheckThreads())
# Threads will die automatically because they are daemonic.
if bot.history.reportThread:
bot.history.reportThread.shutdown()
# wait until the report thread is shut down; the user can
# interrupt it by pressing CTRL-C.
try:
while bot.history.reportThread.isAlive():
time.sleep(0.1)
except KeyboardInterrupt:
pywikibot.output(u'Report thread interrupted.')
bot.history.reportThread.kill()
pywikibot.output(u'Saving history...')
bot.history.save()
else:
pywikibot.showHelp()
if __name__ == "__main__":
main()
|
{
"content_hash": "f01cae2b61ca12816131b8282a1ff7ee",
"timestamp": "",
"source": "github",
"line_count": 944,
"max_line_length": 124,
"avg_line_length": 39.293432203389834,
"alnum_prop": 0.5472191518615372,
"repo_name": "hperala/kontuwikibot",
"id": "1e048a8d59cd50fdf20ab893cfe20e7ce28d0d2c",
"size": "37138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/weblinkchecker.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "C",
"bytes": "137889"
},
{
"name": "C++",
"bytes": "4113"
},
{
"name": "Python",
"bytes": "3758566"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
import tensorflow as tf
from planet.tools import nested
from planet.tools import shape
def overshooting(
cell, target, embedded, prev_action, length, amount, posterior=None,
ignore_input=False):
"""Perform open loop rollouts from the posteriors at every step.
First, we apply the encoder to embed raw inputs and apply the model to obtain
posterior states for every time step. Then, we perform `amount` long open
loop rollouts from these posteriors.
Note that the actions should be those leading to the current time step. So
under common convention, it contains the last actions while observations are
the current ones.
Input:
target, embedded:
[A B C D E F] [A B C D E ]
prev_action:
[0 A B C D E] [0 A B C D ]
length:
[6 5]
amount:
3
Output:
prior, posterior, target:
[A B C D E F] [A B C D E ]
[B C D E F ] [B C D E ]
[C D E F ] [C D E ]
[D E F ] [D E ]
mask:
[1 1 1 1 1 1] [1 1 1 1 1 0]
[1 1 1 1 1 0] [1 1 1 1 0 0]
[1 1 1 1 0 0] [1 1 1 0 0 0]
[1 1 1 0 0 0] [1 1 0 0 0 0]
"""
# Closed loop unroll to get posterior states, which are the starting points
# for open loop unrolls. We don't need the last time step, since we have no
# targets for unrolls from it.
if posterior is None:
use_obs = tf.ones(tf.shape(
nested.flatten(embedded)[0][:, :, :1])[:3], tf.bool)
use_obs = tf.cond(
tf.convert_to_tensor(ignore_input),
lambda: tf.zeros_like(use_obs, tf.bool),
lambda: use_obs)
(_, posterior), _ = tf.nn.dynamic_rnn(
cell, (embedded, prev_action, use_obs), length, dtype=tf.float32,
swap_memory=True)
# Arrange inputs for every iteration in the open loop unroll. Every loop
# iteration below corresponds to one row in the docstring illustration.
max_length = shape.shape(nested.flatten(embedded)[0])[1]
first_output = {
# 'observ': embedded,
'prev_action': prev_action,
'posterior': posterior,
'target': target,
'mask': tf.sequence_mask(length, max_length, tf.int32),
}
progress_fn = lambda tensor: tf.concat([tensor[:, 1:], 0 * tensor[:, :1]], 1)
other_outputs = tf.scan(
lambda past_output, _: nested.map(progress_fn, past_output),
tf.range(amount), first_output)
sequences = nested.map(
lambda lhs, rhs: tf.concat([lhs[None], rhs], 0),
first_output, other_outputs)
# Merge batch and time dimensions of steps to compute unrolls from every
# time step as one batch. The time dimension becomes the number of
# overshooting distances.
sequences = nested.map(
lambda tensor: _merge_dims(tensor, [1, 2]),
sequences)
sequences = nested.map(
lambda tensor: tf.transpose(
tensor, [1, 0] + list(range(2, tensor.shape.ndims))),
sequences)
merged_length = tf.reduce_sum(sequences['mask'], 1)
# Mask out padding frames; unnecessary if the input is already masked.
sequences = nested.map(
lambda tensor: tensor * tf.cast(
_pad_dims(sequences['mask'], tensor.shape.ndims),
tensor.dtype),
sequences)
# Compute open loop rollouts.
use_obs = tf.zeros(tf.shape(sequences['mask']), tf.bool)[..., None]
embed_size = nested.flatten(embedded)[0].shape[2].value
obs = tf.zeros(shape.shape(sequences['mask']) + [embed_size])
prev_state = nested.map(
lambda tensor: tf.concat([0 * tensor[:, :1], tensor[:, :-1]], 1),
posterior)
prev_state = nested.map(
lambda tensor: _merge_dims(tensor, [0, 1]), prev_state)
(priors, _), _ = tf.nn.dynamic_rnn(
cell, (obs, sequences['prev_action'], use_obs),
merged_length,
prev_state)
# Restore batch dimension.
target, prior, posterior, mask = nested.map(
functools.partial(_restore_batch_dim, batch_size=shape.shape(length)[0]),
(sequences['target'], priors, sequences['posterior'], sequences['mask']))
mask = tf.cast(mask, tf.bool)
return target, prior, posterior, mask
def _merge_dims(tensor, dims):
"""Flatten consecutive axes of a tensor trying to preserve static shapes."""
if isinstance(tensor, (list, tuple, dict)):
return nested.map(tensor, lambda x: _merge_dims(x, dims))
tensor = tf.convert_to_tensor(tensor)
if (np.array(dims) - min(dims) != np.arange(len(dims))).all():
raise ValueError('Dimensions to merge must all follow each other.')
start, end = dims[0], dims[-1]
output = tf.reshape(tensor, tf.concat([
tf.shape(tensor)[:start],
[tf.reduce_prod(tf.shape(tensor)[start: end + 1])],
tf.shape(tensor)[end + 1:]], axis=0))
merged = tensor.shape[start: end + 1].as_list()
output.set_shape(
tensor.shape[:start].as_list() +
[None if None in merged else np.prod(merged)] +
tensor.shape[end + 1:].as_list())
return output
def _pad_dims(tensor, rank):
"""Append empty dimensions to the tensor until it is of the given rank."""
for _ in range(rank - tensor.shape.ndims):
tensor = tensor[..., None]
return tensor
def _restore_batch_dim(tensor, batch_size):
"""Split batch dimension out of the first dimension of a tensor."""
initial = shape.shape(tensor)
desired = [batch_size, initial[0] // batch_size] + initial[1:]
return tf.reshape(tensor, desired)
|
{
"content_hash": "fe1fca076f956c5263feff6d0d070325",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 79,
"avg_line_length": 33.75308641975309,
"alnum_prop": 0.6391733723482077,
"repo_name": "google-research/planet",
"id": "527120e40b036d31e16fac97f1a4ba844cee1183",
"size": "6074",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "planet/tools/overshooting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "251609"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name='xes',
version='1.2',
packages=['xes'],
url='http://pypi.python.org/pypi/xes/',
license='Apache License 2.0',
author='Jonathan Sumrall',
author_email='j.m.sumrall@student.tue.nl',
description='A simple tool for generating XES files for Process Mining',
long_description=open('README.txt').read()
)
|
{
"content_hash": "63ed64214e72014fd897a05f725a2124",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 29.153846153846153,
"alnum_prop": 0.6675461741424802,
"repo_name": "msurkovsky/xes",
"id": "b8ce2dd0d9aacd80484fb964fb0c89b927513b63",
"size": "379",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6633"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from six import BytesIO
from sentry.models import File, OrganizationAvatar
from sentry.testutils import TestCase
from sentry.web.frontend.generic import FOREVER_CACHE
class OrganizationAvatarTest(TestCase):
def test_headers(self):
org = self.create_organization()
photo = File.objects.create(name='test.png', type='avatar.file')
photo.putfile(BytesIO(b'test'))
avatar = OrganizationAvatar.objects.create(organization=org, file=photo)
url = reverse('sentry-organization-avatar-url', kwargs={'avatar_id': avatar.ident})
response = self.client.get(url)
assert response.status_code == 200
assert response['Cache-Control'] == FOREVER_CACHE
assert response.get('Vary') is None
assert response.get('Set-Cookie') is None
|
{
"content_hash": "f15f4b9c76cc1c793d0a180bb0bd7627",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 91,
"avg_line_length": 40.22727272727273,
"alnum_prop": 0.7141242937853107,
"repo_name": "ifduyue/sentry",
"id": "b22be2884b9f1567ff04fa41ba82b604e8c630d7",
"size": "885",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/sentry/web/frontend/test_organization_avatar.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
}
|
import os
import subprocess
from process import process
from main.models import Client, Show, Location, Episode, Raw_File
import rax_uploader
import gslevels
class SyncRax(process):
def cdn_exists(self, show, dst):
dst = os.path.join("veyepar",show.client.slug,show.slug,dst)
return dst in self.names
def mk_audio_png(self,src,png_name):
"""
make audio png from source,
src can be http:// or file://
dst is the local fs.
"""
p = gslevels.Make_png()
p.uri = src
p.verbose = self.options.verbose
p.setup()
p.start()
ret = p.mk_png(png_name)
return ret
def mk_final_audio_png(self,ep):
""" whack to catch up
if the ep doen't have a png on the local fs,
make it from the public webm.
"""
png_name = os.path.join(
self.show_dir,"webm", ep.slug + "_audio.png")
# if not os.path.exists(png_name):
ret = self.mk_audio_png(ep.public_url,png_name)
return ret
def rf_web(self, show, rf):
"""
make a low bitrate version of the raw file
for previewing over the web
"""
base = os.path.join( "dv", rf.location.slug, rf.basename() )
if self.options.verbose: print base
# look for .webm on local file system
rf = os.path.join(self.show_dir, base + ".dv")
web = os.path.join(self.show_dir, base + ".webm")
if not os.path.exists(web):
cmd = "melt {rf} -consumer avformat:{out} vb=50k progress=1".format( rf=rf, out=web ).split()
p=subprocess.Popen(cmd)
p.wait()
retcode=p.returncode
web = base + ".webm"
if not self.cdn_exists(show,web):
self.file2cdn(show,web)
def rf_audio_png(self, show, rf):
# check for audio image
rf_base = os.path.join( "dv",
rf.location.slug, rf.filename )
png_base = os.path.join( "audio_png", "dv",
rf.location.slug, rf.basename() + "_audio.png")
if not self.cdn_exists(show,png_base):
print rf.filesize
src = os.path.join(self.show_dir,rf_base)
dst = os.path.join(self.show_dir,png_base)
ret = self.mk_audio_png(src,dst)
self.file2cdn(show,png_base)
def raw_files(self, show):
print "getting raw files..."
rfs = Raw_File.objects.filter(show=show,)
if self.options.day:
rfs = rfs.filter(start__day=self.options.day)
if self.options.room:
loc = Location.objects.get(slug=self.options.room)
rfs = rfs.filter(location = loc)
# rfs = rfs.cut_list_set.filter(episode__id=8748)
# rfs = rfs.cut_list_set.filter(episode__state=1)
for rf in rfs:
if self.options.verbose: print rf
self.rf_web(show, rf)
# self.rf_audio_png(show, rf)
def sync_final(self,show,ep):
base = os.path.join("webm", ep.slug + ".webm" )
if not self.cdn_exists(show,base):
self.file2cdn(show,base)
def sync_final_audio_png(self,show,ep):
base = os.path.join("webm", ep.slug + "_audio.png" )
if not self.cdn_exists(show,base):
png_name = os.path.join( self.show_dir, base )
ret = self.mk_audio_png(ep.public_url,png_name)
self.file2cdn(show,base)
def sync_title_png(self,show,ep):
base = os.path.join("titles", ep.slug + ".png" )
p = u"base:{}".format(base)
print(p)
if not self.cdn_exists(show,base):
png_name = os.path.join( self.show_dir, base )
self.file2cdn(show,base)
def episodes(self, show):
eps = Episode.objects.filter(show=show)
if self.options.day:
eps = eps.filter(start__day=self.options.day)
if self.options.room:
loc = Location.objects.get(slug=self.options.room)
eps = eps.filter(location = loc)
# for ep in eps.filter(state=5):
# self.sync_final(show,ep)
# self.sync_final_audio_png(show,ep)
for ep in eps.filter(state=1):
print(ep)
# self.sync_title_png(show,ep)
# import code; code.interact(local=locals())
for cl in ep.cut_list_set.all():
self.rf_web(show, cl.raw_file)
def init_rax(self, show):
# user = self.show.client.rax_id
# bucket_id = self.show.client.bucket_id
# user = self.options.cloud_user
# bucket_id = self.options.rax_bucket
user = show.client.rax_id
bucket_id = show.client.bucket_id
cf = rax_uploader.auth(user)
print "cf.get_all_containers", cf.get_all_containers()
container = cf.get_container(bucket_id)
objects = container.get_objects()
print "loading names..."
self.names = {o.name for o in objects}
print "loaded."
def one_show(self, show):
self.set_dirs(show)
self.init_rax(show)
# self.raw_files(show)
self.episodes(show)
def work(self):
"""
find and process show
"""
if self.options.client and self.options.show:
client = Client.objects.get(slug=self.options.client)
show = Show.objects.get(client=client, slug=self.options.show)
self.one_show(show)
return
if __name__=='__main__':
p=SyncRax()
p.main()
|
{
"content_hash": "d528929a19a601122182e85c00a2ea0d",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 105,
"avg_line_length": 29.9247311827957,
"alnum_prop": 0.5544376572044556,
"repo_name": "EricSchles/veyepar",
"id": "75041a6f3a945aaf45e9d2b39eece1d0921494de",
"size": "5783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dj/scripts/sync_rax.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1772"
},
{
"name": "HTML",
"bytes": "73422"
},
{
"name": "JavaScript",
"bytes": "38788"
},
{
"name": "Python",
"bytes": "619994"
},
{
"name": "Ruby",
"bytes": "7581"
},
{
"name": "Shell",
"bytes": "80531"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import pkgutil
__path__ = pkgutil.extend_path(__path__, __name__)
|
{
"content_hash": "bc556f027f236592fc0ccb6f75f217d7",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 50,
"avg_line_length": 35,
"alnum_prop": 0.6857142857142857,
"repo_name": "DarkEnergyScienceCollaboration/Monitor",
"id": "4e7800b098e9599d58400ba5f8303fc73b8981a5",
"size": "105",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "python/desc/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "59904"
},
{
"name": "Shell",
"bytes": "2687"
}
],
"symlink_target": ""
}
|
import argparse
import sys
import os
import numpy as np
import urllib, urllib2
import cStringIO
from contextlib import closing
from PIL import Image
import zlib
sys.path += [os.path.abspath('../django')]
import OCP.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'OCP.settings'
from django.conf import settings
import django
django.setup()
from cube import Cube
import ocpcarest
from ndctypelib import XYZMorton
import ocpcaproj
import ocpcadb
"""Construct an image hierarchy up from a given resolution for 16-bit images"""
def buildStack(token, channel, res):
"""Build the hierarchy of images"""
with closing (ocpcaproj.OCPCAProjectsDB()) as projdb:
proj = projdb.loadToken(token)
with closing (ocpcadb.OCPCADB(proj)) as db:
ch = proj.getChannelObj(channel)
high_res = proj.datasetcfg.scalinglevels
for cur_res in range(res, high_res+1):
# Get the source database sizes
[[ximagesz, yimagesz, zimagesz], timerange] = proj.datasetcfg.imageSize(cur_res)
[xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.getCubeDims()[cur_res]
[xoffset, yoffset, zoffset] = proj.datasetcfg.getOffset()[cur_res]
biggercubedim = [xcubedim*2,ycubedim*2,zcubedim]
# Set the limits for iteration on the number of cubes in each dimension
xlimit = (ximagesz-1) / xcubedim + 1
ylimit = (yimagesz-1) / ycubedim + 1
zlimit = (zimagesz-1) / zcubedim + 1
for z in range(zlimit):
for y in range(ylimit):
for x in range(xlimit):
# cutout the data at the -1 resolution
olddata = db.cutout(ch, [ x*2*xcubedim, y*2*ycubedim, z*zcubedim], biggercubedim, cur_res-1 ).data
# target array for the new data (z,y,x) order
newdata = np.zeros([zcubedim,ycubedim,xcubedim], dtype=np.uint16)
for sl in range(zcubedim):
# Convert each slice to an image
slimage = Image.frombuffer ( 'I;16', (xcubedim*2,ycubedim*2), olddata[sl,:,:].flatten(), 'raw', 'I;16', 0, 1 )
# Resize the image
newimage = slimage.resize ( [xcubedim,ycubedim] )
# Put to a new cube
newdata[sl,:,:] = np.asarray ( newimage )
zidx = XYZMorton ( [x,y,z] )
cube = Cube.getCube(cubedim, ch.getChannelType(), ch.getDataType())
cube.zeros()
cube.data = newdata
print "Inserting Cube {} at res {}".format(zidx, cur_res)
db.putCube(ch, zidx, cur_res, cube, update=True)
def main():
parser = argparse.ArgumentParser(description='Build an image stack')
parser.add_argument('token', action="store", help='Token for the project.')
parser.add_argument('channel', action="store", help='Channel for the project.')
parser.add_argument('resolution', action="store", type=int, help='Start (highest) resolution to build')
result = parser.parse_args()
buildStack(result.token, result.channel, result.resolution)
if __name__ == "__main__":
main()
|
{
"content_hash": "5b261fd149a86f2d5c0f8c2217f199ed",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 124,
"avg_line_length": 32.75268817204301,
"alnum_prop": 0.6457649376231123,
"repo_name": "openconnectome/open-connectome",
"id": "f808493fa87c5953d2ad15cd2efaaf9116e72dff",
"size": "3645",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/imgstack16.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "43100"
},
{
"name": "C++",
"bytes": "23724"
},
{
"name": "CSS",
"bytes": "53255"
},
{
"name": "HTML",
"bytes": "142332"
},
{
"name": "JavaScript",
"bytes": "303249"
},
{
"name": "Makefile",
"bytes": "2273"
},
{
"name": "Python",
"bytes": "1409968"
},
{
"name": "Shell",
"bytes": "5637"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from django.conf import settings
from purl import URL
import requests
from ..models import FMAArtist
from .utils import rate_limited
FIELDS = ['created', 'modified', 'artist_id', 'artist_handle', 'artist_url',
'artist_name', 'artist_bio', 'artist_members', 'artist_website',
'artist_wikipedia_page', 'artist_donation_url', 'artist_contact',
'artist_active_year_begin', 'artist_active_year_end',
'artist_related_projects', 'artist_associated_labels',
'artist_comments', 'artist_favorites', 'artist_date_created',
'artist_flattr_name', 'artist_paypal_name', 'artist_latitude',
'artist_longitude', 'artist_image_file', 'artist_location',
'tags', 'artist_images']
API_URL = URL("http://freemusicarchive.org/api/get/artists.json"
"?sort_by=artist_date_created&sort_dir=desc&limit=200"
"&api_key={}".format(settings.FMA_API_KEY))
def format_date(date_str):
if len(date_str) <= 11:
date_str = "{date} {time}".format(
date=datetime.utcnow().strftime('%m/%d/%Y'),
time=date_str,
)
return datetime.utcnow().strptime(
date_str,
'%m/%d/%Y %H:%M:%S %p',
)
@rate_limited(4)
def find_on_fma(page):
"""Return artist URL for FreeMusicArchive.org"""
url = API_URL.query_param('page', page)
r = requests.get(url)
r.raise_for_status()
return r.json()
def fetch_from_fma(force_update=False):
"""Fetch new artists from FreeMusicArchive.org."""
page = 1
count = 0
while True:
results = find_on_fma(page)
for artist_data in results['dataset']:
artist_data['artist_date_created'] = format_date(
artist_data['artist_date_created'])
artist, created = FMAArtist.objects.get_or_create(
artist_id=artist_data['artist_id'],
defaults={f: v for (f, v) in artist_data.items() if f in FIELDS}
)
count += 1
if not created and not force_update:
break # Found duplicate artist
if results['dataset']:
print("Imported up to", results['dataset'][-1]['artist_name'])
count += len(results['dataset'])
if results['total_pages'] == page or (not force_update and not created):
break # Found duplicate artist or reached last page
page += 1
return count
|
{
"content_hash": "d3f56f1a873ce7cc26d317f45a87ac0f",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 80,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.5956928078017066,
"repo_name": "FreeMusicNinja/api.freemusic.ninja",
"id": "bc6e57c9b762b00511326adc1443fc4281eddd75",
"size": "2461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "artists/importers/fma.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "135608"
}
],
"symlink_target": ""
}
|
from xml.dom import minidom
import webob
from cinder.api.contrib import qos_specs_manage
from cinder import exception
from cinder import test
from cinder.tests.api import fakes
from cinder.tests import fake_notifier
from cinder.volume import qos_specs
def stub_qos_specs(id):
res = dict(name='qos_specs_' + str(id))
res.update(dict(consumer='back-end'))
res.update(dict(id=str(id)))
specs = {"key1": "value1",
"key2": "value2",
"key3": "value3",
"key4": "value4",
"key5": "value5"}
res.update(dict(specs=specs))
return res
def stub_qos_associates(id):
return [{
'association_type': 'volume_type',
'name': 'FakeVolTypeName',
'id': 'FakeVolTypeID'}]
def return_qos_specs_get_all(context):
return [
stub_qos_specs(1),
stub_qos_specs(2),
stub_qos_specs(3),
]
def return_qos_specs_get_qos_specs(context, id):
if id == "777":
raise exception.QoSSpecsNotFound(specs_id=id)
return stub_qos_specs(int(id))
def return_qos_specs_delete(context, id, force):
if id == "777":
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == "666":
raise exception.QoSSpecsInUse(specs_id=id)
pass
def return_qos_specs_delete_keys(context, id, keys):
if id == "777":
raise exception.QoSSpecsNotFound(specs_id=id)
if 'foo' in keys:
raise exception.QoSSpecsKeyNotFound(specs_id=id,
specs_key='foo')
def return_qos_specs_update(context, id, specs):
if id == "777":
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == "888":
raise exception.InvalidQoSSpecs(reason=id)
elif id == "999":
raise exception.QoSSpecsUpdateFailed(specs_id=id,
qos_specs=specs)
pass
def return_qos_specs_create(context, name, specs):
if name == "666":
raise exception.QoSSpecsExists(specs_id=name)
elif name == "555":
raise exception.QoSSpecsCreateFailed(name=id, qos_specs=specs)
pass
def return_qos_specs_get_by_name(context, name):
if name == "777":
raise exception.QoSSpecsNotFound(specs_id=name)
return stub_qos_specs(int(name.split("_")[2]))
def return_get_qos_associations(context, id):
if id == "111":
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == "222":
raise exception.CinderException()
return stub_qos_associates(id)
def return_associate_qos_specs(context, id, type_id):
if id == "111":
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == "222":
raise exception.QoSSpecsAssociateFailed(specs_id=id,
type_id=type_id)
elif id == "333":
raise exception.QoSSpecsDisassociateFailed(specs_id=id,
type_id=type_id)
if type_id == "1234":
raise exception.VolumeTypeNotFound(
volume_type_id=type_id)
pass
def return_disassociate_all(context, id):
if id == "111":
raise exception.QoSSpecsNotFound(specs_id=id)
elif id == "222":
raise exception.QoSSpecsDisassociateFailed(specs_id=id,
type_id=None)
class QoSSpecManageApiTest(test.TestCase):
def setUp(self):
super(QoSSpecManageApiTest, self).setUp()
self.flags(host='fake')
self.controller = qos_specs_manage.QoSSpecsController()
#reset notifier drivers left over from other api/contrib tests
# NOTE(flaper87) WTF? ^^^^ Cleanups should happen in each test,
# not the purpose of this patch, though.
fake_notifier.reset()
self.addCleanup(fake_notifier.reset)
def test_index(self):
self.stubs.Set(qos_specs, 'get_all_specs',
return_qos_specs_get_all)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
res = self.controller.index(req)
self.assertEqual(3, len(res['qos_specs']))
names = set()
for item in res['qos_specs']:
self.assertEqual('value1', item['specs']['key1'])
names.add(item['name'])
expected_names = ['qos_specs_1', 'qos_specs_2', 'qos_specs_3']
self.assertEqual(names, set(expected_names))
def test_index_xml_response(self):
self.stubs.Set(qos_specs, 'get_all_specs',
return_qos_specs_get_all)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
res = self.controller.index(req)
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
qos_specs_response = dom.getElementsByTagName('qos_spec')
names = set()
for qos_spec in qos_specs_response:
name = qos_spec.getAttribute('name')
names.add(name)
expected_names = ['qos_specs_1', 'qos_specs_2', 'qos_specs_3']
self.assertEqual(names, set(expected_names))
def test_qos_specs_delete(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'delete',
return_qos_specs_delete)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.controller.delete(req, 1)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_not_found(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'delete',
return_qos_specs_delete)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777')
self.assertRaises(webob.exc.HTTPNotFound, self.controller.delete,
req, '777')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_inuse(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'delete',
return_qos_specs_delete)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPBadRequest, self.controller.delete,
req, '666')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_inuse_force(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'delete',
return_qos_specs_delete)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666?force=True')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.delete,
req, '666')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_keys(self):
self.stubs.Set(qos_specs, 'delete_keys',
return_qos_specs_delete_keys)
body = {"keys": ['bar', 'zoo']}
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.controller.delete_keys(req, '666', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_keys_qos_notfound(self):
self.stubs.Set(qos_specs, 'delete_keys',
return_qos_specs_delete_keys)
body = {"keys": ['bar', 'zoo']}
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777/delete_keys')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete_keys,
req, '777', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_qos_specs_delete_keys_badkey(self):
self.stubs.Set(qos_specs, 'delete_keys',
return_qos_specs_delete_keys)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/666/delete_keys')
body = {"keys": ['foo', 'zoo']}
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete_keys,
req, '666', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_create(self):
self.stubs.Set(qos_specs, 'create',
return_qos_specs_create)
self.stubs.Set(qos_specs, 'get_qos_specs_by_name',
return_qos_specs_get_by_name)
body = {"qos_specs": {"name": "qos_specs_1",
"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
res_dict = self.controller.create(req, body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
self.assertEqual('qos_specs_1', res_dict['qos_specs']['name'])
def test_create_conflict(self):
self.stubs.Set(qos_specs, 'create',
return_qos_specs_create)
self.stubs.Set(qos_specs, 'get_qos_specs_by_name',
return_qos_specs_get_by_name)
body = {"qos_specs": {"name": "666",
"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPConflict,
self.controller.create, req, body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_create_failed(self):
self.stubs.Set(qos_specs, 'create',
return_qos_specs_create)
self.stubs.Set(qos_specs, 'get_qos_specs_by_name',
return_qos_specs_get_by_name)
body = {"qos_specs": {"name": "555",
"key1": "value1"}}
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.create, req, body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def _create_qos_specs_bad_body(self, body):
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs')
req.method = 'POST'
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_create_no_body(self):
self._create_qos_specs_bad_body(body=None)
def test_create_missing_specs_name(self):
body = {'foo': {'a': 'b'}}
self._create_qos_specs_bad_body(body=body)
def test_create_malformed_entity(self):
body = {'qos_specs': 'string'}
self._create_qos_specs_bad_body(body=body)
def test_update(self):
self.stubs.Set(qos_specs, 'update',
return_qos_specs_update)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/555')
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
res = self.controller.update(req, '555', body)
self.assertDictMatch(res, body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_update_not_found(self):
self.stubs.Set(qos_specs, 'update',
return_qos_specs_update)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/777')
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
req, '777', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_update_invalid_input(self):
self.stubs.Set(qos_specs, 'update',
return_qos_specs_update)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/888')
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.update,
req, '888', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_update_failed(self):
self.stubs.Set(qos_specs, 'update',
return_qos_specs_update)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 0)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/999')
body = {'qos_specs': {'key1': 'value1',
'key2': 'value2'}}
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.update,
req, '999', body)
self.assertEqual(len(fake_notifier.NOTIFICATIONS), 1)
def test_show(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1')
res_dict = self.controller.show(req, '1')
self.assertEqual('1', res_dict['qos_specs']['id'])
self.assertEqual('qos_specs_1', res_dict['qos_specs']['name'])
def test_show_xml_response(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1')
res = self.controller.show(req, '1')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
qos_spec_response = dom.getElementsByTagName('qos_spec')
qos_spec = qos_spec_response.item(0)
id = qos_spec.getAttribute('id')
name = qos_spec.getAttribute('name')
consumer = qos_spec.getAttribute('consumer')
self.assertEqual(id, u'1')
self.assertEqual(name, 'qos_specs_1')
self.assertEqual(consumer, 'back-end')
def test_get_associations(self):
self.stubs.Set(qos_specs, 'get_associations',
return_get_qos_associations)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/associations')
res = self.controller.associations(req, '1')
self.assertEqual('FakeVolTypeName',
res['qos_associations'][0]['name'])
self.assertEqual('FakeVolTypeID',
res['qos_associations'][0]['id'])
def test_get_associations_xml_response(self):
self.stubs.Set(qos_specs, 'get_associations',
return_get_qos_associations)
req = fakes.HTTPRequest.blank('/v2/fake/qos-specs/1/associations')
res = self.controller.associations(req, '1')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
associations_response = dom.getElementsByTagName('associations')
association = associations_response.item(0)
id = association.getAttribute('id')
name = association.getAttribute('name')
association_type = association.getAttribute('association_type')
self.assertEqual(id, 'FakeVolTypeID')
self.assertEqual(name, 'FakeVolTypeName')
self.assertEqual(association_type, 'volume_type')
def test_get_associations_not_found(self):
self.stubs.Set(qos_specs, 'get_associations',
return_get_qos_associations)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/111/associations')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.associations,
req, '111')
def test_get_associations_failed(self):
self.stubs.Set(qos_specs, 'get_associations',
return_get_qos_associations)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/222/associations')
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.associations,
req, '222')
def test_associate(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'associate_qos_with_type',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/associate?vol_type_id=111')
res = self.controller.associate(req, '1')
self.assertEqual(res.status_int, 202)
def test_associate_no_type(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'associate_qos_with_type',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/associate')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.associate, req, '1')
def test_associate_not_found(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'associate_qos_with_type',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/111/associate?vol_type_id=12')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.associate, req, '111')
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/associate?vol_type_id=1234')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.associate, req, '1')
def test_associate_fail(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'associate_qos_with_type',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/222/associate?vol_type_id=1000')
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.associate, req, '222')
def test_disassociate(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_qos_specs',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/disassociate?vol_type_id=111')
res = self.controller.disassociate(req, '1')
self.assertEqual(res.status_int, 202)
def test_disassociate_no_type(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_qos_specs',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/disassociate')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.disassociate, req, '1')
def test_disassociate_not_found(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_qos_specs',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/111/disassociate?vol_type_id=12')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.disassociate, req, '111')
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/disassociate?vol_type_id=1234')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.disassociate, req, '1')
def test_disassociate_failed(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_qos_specs',
return_associate_qos_specs)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/333/disassociate?vol_type_id=1000')
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.disassociate, req, '333')
def test_disassociate_all(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_all',
return_disassociate_all)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/1/disassociate_all')
res = self.controller.disassociate_all(req, '1')
self.assertEqual(res.status_int, 202)
def test_disassociate_all_not_found(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_all',
return_disassociate_all)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/111/disassociate_all')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.disassociate_all, req, '111')
def test_disassociate_all_failed(self):
self.stubs.Set(qos_specs, 'get_qos_specs',
return_qos_specs_get_qos_specs)
self.stubs.Set(qos_specs, 'disassociate_all',
return_disassociate_all)
req = fakes.HTTPRequest.blank(
'/v2/fake/qos-specs/222/disassociate_all')
self.assertRaises(webob.exc.HTTPInternalServerError,
self.controller.disassociate_all, req, '222')
|
{
"content_hash": "15a53ce83e5585b10df9502d2932862e",
"timestamp": "",
"source": "github",
"line_count": 584,
"max_line_length": 75,
"avg_line_length": 39.00684931506849,
"alnum_prop": 0.5835381913959614,
"repo_name": "Thingee/cinder",
"id": "8abf1d070d2166b2339dd552ffc8f270ddad7930",
"size": "23443",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "cinder/tests/api/contrib/test_qos_specs_manage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6121923"
},
{
"name": "SQL",
"bytes": "9824"
},
{
"name": "Shell",
"bytes": "8998"
}
],
"symlink_target": ""
}
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def __repr__(self):
if self:
return "{} -> {}".format(self.val, repr(self.next))
# http://bookshadow.com/weblog/2015/05/05/leetcode-reverse-linked-list/
class Solution(object):
def reverseList(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
# dummy-->None
dummy = ListNode(0)
# loop over all nodes, and insert each right after dummy node
while head:
# 1. use a temp variable to store next node after head
next = head.next
# 2. insert head between dummy and node after dummy
# 2.1 point head to dummy.next
head.next = dummy.next
# 2.2 point dummy to head
dummy.next = head
# 3. advance head
head = next
return dummy.next
def reverseList(self, head):
return self.doReverse(head, None)
def doReverse(self, head, newHead):
if head is None:
return newHead
next = head.next
head.next = newHead
return self.doReverse(next, head)
def reverseList(self, head):
#dummy = ListNode(float("-inf"))
dummy = None
while head:
#dummy.next, head.next, head = head, dummy.next, head.next
dummy, head.next, head = head, dummy, head.next
#dummy.next = head
#head.next = dummy.next
#head = head.next
#return dummy.next
return dummy
class Solution2:
def reverseList(self, head):
prev = None
while head:
temp = head.next
head.next = prev
prev = head
head = temp
return prev
if __name__ == "__main__":
head = ListNode(1)
head.next = ListNode(2)
head.next.next = ListNode(3)
head.next.next.next = ListNode(4)
head.next.next.next.next = ListNode(5)
print head
print Solution().reverseList(head)
|
{
"content_hash": "172d935e412e02e86108fae32d219171",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 71,
"avg_line_length": 27.333333333333332,
"alnum_prop": 0.5424390243902439,
"repo_name": "gengwg/leetcode",
"id": "40588f9f41c6a4a52c8ea39182ecf1a2c24bffe3",
"size": "2389",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "206_reverse_linked_list.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "779"
},
{
"name": "Python",
"bytes": "627348"
},
{
"name": "SQLPL",
"bytes": "779"
},
{
"name": "Shell",
"bytes": "4149"
}
],
"symlink_target": ""
}
|
""" Model Plot Data Handler"""
from .simple import SimpleDataSource
def get_field_values(row, fields):
data = []
for field in fields:
data.append(getattr(row, field))
return data
class ModelDataSource(SimpleDataSource):
def __init__(self, queryset, fields=None):
self.queryset = queryset
if fields:
self.fields = fields
else:
self.fields = [el.name for el in self.queryset.model._meta.fields]
self.data = self.create_data()
def create_data(self):
data = [self.fields]
for row in self.queryset:
data.append(get_field_values(row, self.fields))
return data
|
{
"content_hash": "9b141cd9f1de6b7eb746a5141d80281a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 78,
"avg_line_length": 27.16,
"alnum_prop": 0.6126656848306333,
"repo_name": "aorzh/django-graphos",
"id": "e535ef8e6121e76d8fe8b65182807ca7d2bb4e8f",
"size": "679",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "graphos/sources/model.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "39125"
},
{
"name": "HTML",
"bytes": "36814"
},
{
"name": "JavaScript",
"bytes": "376438"
},
{
"name": "Python",
"bytes": "64711"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import tensorflow as tf
import zipfile
import cloudpickle
import numpy as np
import baselines.common.tf_util as U
from baselines.common.tf_util import load_variables, save_variables
from baselines import logger
from baselines.common.schedules import LinearSchedule
from baselines.common import set_global_seeds
from baselines import deepq
from baselines.deepq.replay_buffer import ReplayBuffer, PrioritizedReplayBuffer
from baselines.deepq.utils import ObservationInput
from baselines.common.tf_util import get_session
from baselines.deepq.models import build_q_func
class ActWrapper(object):
def __init__(self, act, act_params):
self._act = act
self._act_params = act_params
self.initial_state = None
@staticmethod
def load_act(path):
with open(path, "rb") as f:
model_data, act_params = cloudpickle.load(f)
act = deepq.build_act(**act_params)
sess = tf.Session()
sess.__enter__()
with tempfile.TemporaryDirectory() as td:
arc_path = os.path.join(td, "packed.zip")
with open(arc_path, "wb") as f:
f.write(model_data)
zipfile.ZipFile(arc_path, 'r', zipfile.ZIP_DEFLATED).extractall(td)
load_variables(os.path.join(td, "model"))
return ActWrapper(act, act_params)
def __call__(self, *args, **kwargs):
return self._act(*args, **kwargs)
def step(self, observation, **kwargs):
# DQN doesn't use RNNs so we ignore states and masks
kwargs.pop('S', None)
kwargs.pop('M', None)
return self._act([observation], **kwargs), None, None, None
def save_act(self, path=None):
"""Save model to a pickle located at `path`"""
if path is None:
path = os.path.join(logger.get_dir(), "model.pkl")
with tempfile.TemporaryDirectory() as td:
save_variables(os.path.join(td, "model"))
arc_name = os.path.join(td, "packed.zip")
with zipfile.ZipFile(arc_name, 'w') as zipf:
for root, dirs, files in os.walk(td):
for fname in files:
file_path = os.path.join(root, fname)
if file_path != arc_name:
zipf.write(file_path, os.path.relpath(file_path, td))
with open(arc_name, "rb") as f:
model_data = f.read()
with open(path, "wb") as f:
cloudpickle.dump((model_data, self._act_params), f)
def save(self, path):
save_variables(path)
def load_act(path):
"""Load act function that was returned by learn function.
Parameters
----------
path: str
path to the act function pickle
Returns
-------
act: ActWrapper
function that takes a batch of observations
and returns actions.
"""
return ActWrapper.load_act(path)
def learn(env,
network,
seed=None,
lr=5e-4,
total_timesteps=100000,
buffer_size=50000,
exploration_fraction=0.1,
exploration_final_eps=0.02,
train_freq=1,
batch_size=32,
print_freq=100,
checkpoint_freq=10000,
checkpoint_path=None,
learning_starts=1000,
gamma=1.0,
target_network_update_freq=500,
prioritized_replay=False,
prioritized_replay_alpha=0.6,
prioritized_replay_beta0=0.4,
prioritized_replay_beta_iters=None,
prioritized_replay_eps=1e-6,
param_noise=False,
callback=None,
load_path=None,
**network_kwargs
):
"""Train a deepq model.
Parameters
-------
env: gym.Env
environment to train on
network: string or a function
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
seed: int or None
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
lr: float
learning rate for adam optimizer
total_timesteps: int
number of env steps to optimizer for
buffer_size: int
size of the replay buffer
exploration_fraction: float
fraction of entire training period over which the exploration rate is annealed
exploration_final_eps: float
final value of random action probability
train_freq: int
update the model every `train_freq` steps.
set to None to disable printing
batch_size: int
size of a batched sampled from replay buffer for training
print_freq: int
how often to print out training progress
set to None to disable printing
checkpoint_freq: int
how often to save the model. This is so that the best version is restored
at the end of the training. If you do not wish to restore the best version at
the end of the training set this variable to None.
learning_starts: int
how many steps of the model to collect transitions for before learning starts
gamma: float
discount factor
target_network_update_freq: int
update the target network every `target_network_update_freq` steps.
prioritized_replay: True
if True prioritized replay buffer will be used.
prioritized_replay_alpha: float
alpha parameter for prioritized replay buffer
prioritized_replay_beta0: float
initial value of beta for prioritized replay buffer
prioritized_replay_beta_iters: int
number of iterations over which beta will be annealed from initial value
to 1.0. If set to None equals to total_timesteps.
prioritized_replay_eps: float
epsilon to add to the TD errors when updating priorities.
param_noise: bool
whether or not to use parameter space noise (https://arxiv.org/abs/1706.01905)
callback: (locals, globals) -> None
function called at every steps with state of the algorithm.
If callback returns true training stops.
load_path: str
path to load the model from. (default: None)
**network_kwargs
additional keyword arguments to pass to the network builder.
Returns
-------
act: ActWrapper
Wrapper over act function. Adds ability to save it and load it.
See header of baselines/deepq/categorical.py for details on the act function.
"""
# Create all the functions necessary to train the model
sess = get_session()
set_global_seeds(seed)
q_func = build_q_func(network, **network_kwargs)
# capture the shape outside the closure so that the env object is not serialized
# by cloudpickle when serializing make_obs_ph
observation_space = env.observation_space
def make_obs_ph(name):
return ObservationInput(observation_space, name=name)
act, train, update_target, debug = deepq.build_train(
make_obs_ph=make_obs_ph,
q_func=q_func,
num_actions=env.action_space.n,
optimizer=tf.train.AdamOptimizer(learning_rate=lr),
gamma=gamma,
grad_norm_clipping=10,
param_noise=param_noise
)
act_params = {
'make_obs_ph': make_obs_ph,
'q_func': q_func,
'num_actions': env.action_space.n,
}
act = ActWrapper(act, act_params)
# Create the replay buffer
if prioritized_replay:
replay_buffer = PrioritizedReplayBuffer(buffer_size, alpha=prioritized_replay_alpha)
if prioritized_replay_beta_iters is None:
prioritized_replay_beta_iters = total_timesteps
beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
initial_p=prioritized_replay_beta0,
final_p=1.0)
else:
replay_buffer = ReplayBuffer(buffer_size)
beta_schedule = None
# Create the schedule for exploration starting from 1.
exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction * total_timesteps),
initial_p=1.0,
final_p=exploration_final_eps)
# Initialize the parameters and copy them to the target network.
U.initialize()
update_target()
episode_rewards = [0.0]
saved_mean_reward = None
obs = env.reset()
reset = True
with tempfile.TemporaryDirectory() as td:
td = checkpoint_path or td
model_file = os.path.join(td, "model")
model_saved = False
if tf.train.latest_checkpoint(td) is not None:
load_variables(model_file)
logger.log('Loaded model from {}'.format(model_file))
model_saved = True
elif load_path is not None:
load_variables(load_path)
logger.log('Loaded model from {}'.format(load_path))
for t in range(total_timesteps):
if callback is not None:
if callback(locals(), globals()):
break
# Take action and update exploration to the newest value
kwargs = {}
if not param_noise:
update_eps = exploration.value(t)
update_param_noise_threshold = 0.
else:
update_eps = 0.
# Compute the threshold such that the KL divergence between perturbed and non-perturbed
# policy is comparable to eps-greedy exploration with eps = exploration.value(t).
# See Appendix C.1 in Parameter Space Noise for Exploration, Plappert et al., 2017
# for detailed explanation.
update_param_noise_threshold = -np.log(1. - exploration.value(t) + exploration.value(t) / float(env.action_space.n))
kwargs['reset'] = reset
kwargs['update_param_noise_threshold'] = update_param_noise_threshold
kwargs['update_param_noise_scale'] = True
action = act(np.array(obs)[None], update_eps=update_eps, **kwargs)[0]
env_action = action
reset = False
new_obs, rew, done, _ = env.step(env_action)
# Store transition in the replay buffer.
replay_buffer.add(obs, action, rew, new_obs, float(done))
obs = new_obs
episode_rewards[-1] += rew
if done:
obs = env.reset()
episode_rewards.append(0.0)
reset = True
if t > learning_starts and t % train_freq == 0:
# Minimize the error in Bellman's equation on a batch sampled from replay buffer.
if prioritized_replay:
experience = replay_buffer.sample(batch_size, beta=beta_schedule.value(t))
(obses_t, actions, rewards, obses_tp1, dones, weights, batch_idxes) = experience
else:
obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(batch_size)
weights, batch_idxes = np.ones_like(rewards), None
td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
if prioritized_replay:
new_priorities = np.abs(td_errors) + prioritized_replay_eps
replay_buffer.update_priorities(batch_idxes, new_priorities)
if t > learning_starts and t % target_network_update_freq == 0:
# Update target network periodically.
update_target()
mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
num_episodes = len(episode_rewards)
if done and print_freq is not None and len(episode_rewards) % print_freq == 0:
logger.record_tabular("steps", t)
logger.record_tabular("episodes", num_episodes)
logger.record_tabular("mean 100 episode reward", mean_100ep_reward)
logger.record_tabular("% time spent exploring", int(100 * exploration.value(t)))
logger.dump_tabular()
if (checkpoint_freq is not None and t > learning_starts and
num_episodes > 100 and t % checkpoint_freq == 0):
if saved_mean_reward is None or mean_100ep_reward > saved_mean_reward:
if print_freq is not None:
logger.log("Saving model due to mean reward increase: {} -> {}".format(
saved_mean_reward, mean_100ep_reward))
save_variables(model_file)
model_saved = True
saved_mean_reward = mean_100ep_reward
if model_saved:
if print_freq is not None:
logger.log("Restored model with mean reward: {}".format(saved_mean_reward))
load_variables(model_file)
return act
|
{
"content_hash": "e4499db539d90513b7bd66881ad169ef",
"timestamp": "",
"source": "github",
"line_count": 333,
"max_line_length": 145,
"avg_line_length": 39.54054054054054,
"alnum_prop": 0.6062884483937115,
"repo_name": "dsbrown1331/CoRL2019-DREX",
"id": "b7b9d1a04672e41787ac5172a1338619b5b1734b",
"size": "13167",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "drex-atari/baselines/baselines/deepq/deepq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "918"
},
{
"name": "HTML",
"bytes": "591968"
},
{
"name": "Jupyter Notebook",
"bytes": "1160596"
},
{
"name": "Python",
"bytes": "1438389"
}
],
"symlink_target": ""
}
|
from ....testing import assert_equal
from ..developer import JistBrainMp2rageSkullStripping
def test_JistBrainMp2rageSkullStripping_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inFilter=dict(argstr='--inFilter %s',
),
inSecond=dict(argstr='--inSecond %s',
),
inSkip=dict(argstr='--inSkip %s',
),
inT1=dict(argstr='--inT1 %s',
),
inT1weighted=dict(argstr='--inT1weighted %s',
),
null=dict(argstr='--null %s',
),
outBrain=dict(argstr='--outBrain %s',
hash_files=False,
),
outMasked=dict(argstr='--outMasked %s',
hash_files=False,
),
outMasked2=dict(argstr='--outMasked2 %s',
hash_files=False,
),
outMasked3=dict(argstr='--outMasked3 %s',
hash_files=False,
),
terminal_output=dict(nohash=True,
),
xDefaultMem=dict(argstr='-xDefaultMem %d',
),
xMaxProcess=dict(argstr='-xMaxProcess %d',
usedefault=True,
),
xPrefExt=dict(argstr='--xPrefExt %s',
),
)
inputs = JistBrainMp2rageSkullStripping.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_JistBrainMp2rageSkullStripping_outputs():
output_map = dict(outBrain=dict(),
outMasked=dict(),
outMasked2=dict(),
outMasked3=dict(),
)
outputs = JistBrainMp2rageSkullStripping.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
{
"content_hash": "1382244744044806fecae5085c64c0ae",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 78,
"avg_line_length": 27.307692307692307,
"alnum_prop": 0.632112676056338,
"repo_name": "iglpdc/nipype",
"id": "12b3232fa7d8e85337ec9b114de1d55b812f6576",
"size": "1829",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "nipype/interfaces/mipav/tests/test_auto_JistBrainMp2rageSkullStripping.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2106"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "4458175"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
from kokki import *
def tester():
print "FEWFEWFEW"
|
{
"content_hash": "4558d50973824a22c3f1f7b903925288",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 21,
"avg_line_length": 14.25,
"alnum_prop": 0.6666666666666666,
"repo_name": "samuel/kokki",
"id": "e06955f3c5d1c7969e9851f649358df6eb3e4f44",
"size": "58",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cookbooks/test/libraries/blah.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "211476"
}
],
"symlink_target": ""
}
|
from distutils.core import setup
setup(
name = 'quickconfig',
py_modules=['quickconfig'],
version = '2.2',
description = 'Python configuration tool.',
author = 'KJ',
author_email = '<see github for support>',
url = 'https://github.com/jdotpy/quickconfig',
download_url = 'https://github.com/jdotpy/quickconfig/tarball/2.1',
keywords = ['configuration', 'config', 'settings', 'tools'],
classifiers = [],
)
|
{
"content_hash": "e345d4741c5ce3e518d13c189e1adbb8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 71,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.6390134529147982,
"repo_name": "jdotpy/quickconfig",
"id": "6355be185b8c3e46f4cbed7987f2f5bb885995a7",
"size": "446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17045"
},
{
"name": "Shell",
"bytes": "25"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0007_alter_validators_add_error_messages'),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, related_name='profile', serialize=False, to=settings.AUTH_USER_MODEL)),
('camera_type', models.CharField(blank=True, max_length=30)),
('address', models.CharField(blank=True, max_length=60)),
('web_link', models.CharField(blank=True, max_length=70)),
('photo_type', models.CharField(blank=True, max_length=30)),
('social_media', models.CharField(blank=True, max_length=30)),
('region', models.CharField(choices=[('North America', 'North America'), ('Asia', 'Asia'), ('Africa', 'Africa'), ('South America', 'South America'), ('Europe', 'Europe')], default='North America', max_length=30)),
('friends', models.ManyToManyField(related_name='friend_of', to='imager_profile.UserProfile')),
],
),
]
|
{
"content_hash": "e1530fb22e9ddd0e6f4714806ab49c7f",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 229,
"avg_line_length": 44.733333333333334,
"alnum_prop": 0.6162444113263785,
"repo_name": "nadiabahrami/django-imager",
"id": "230ba4c7e8e40918c411c9581d92fc9ada6243bd",
"size": "1414",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_profile/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "68384"
},
{
"name": "HTML",
"bytes": "27306"
},
{
"name": "JavaScript",
"bytes": "14517"
},
{
"name": "Python",
"bytes": "37251"
}
],
"symlink_target": ""
}
|
DATA_FILENAME = 'MC2-training.tar.gz'
DOC_FILENAME = 'MC2-training-documents.tar.gz'
from .mobileclick_download_data import download_and_deploy, SUBSET_FILENAME
def main(istest=False):
docfilename = SUBSET_FILENAME if istest else DOC_FILENAME
download_and_deploy([DATA_FILENAME, docfilename])
if __name__ == '__main__':
main()
|
{
"content_hash": "0657ae06f0bed59178bfdc58511c0289",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 75,
"avg_line_length": 34.1,
"alnum_prop": 0.7272727272727273,
"repo_name": "mpkato/mobileclick",
"id": "4829f58f430769f7803aa075d7457eec4dac8cd5",
"size": "364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mobileclick/scripts/mobileclick_download_training_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "83970"
},
{
"name": "Shell",
"bytes": "619"
}
],
"symlink_target": ""
}
|
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TimeFrame.cleanup_time'
db.add_column(u'thermometer_timeframe', 'cleanup_time',
self.gf('django.db.models.fields.FloatField')(default=None, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'TimeFrame.cleanup_time'
db.delete_column(u'thermometer_timeframe', 'cleanup_time')
models = {
u'thermometer.exampletweet': {
'Meta': {'object_name': 'ExampleTweet', 'index_together': "[['feeling', 'created_at']]"},
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'feeling': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['thermometer.FeelingWord']"}),
'frame': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['thermometer.TimeFrame']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {}),
'user_id': ('django.db.models.fields.BigIntegerField', [], {}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'user_screen_name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'thermometer.feelingindicator': {
'Meta': {'object_name': 'FeelingIndicator'},
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phrase': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'thermometer.feelingpercent': {
'Meta': {'object_name': 'FeelingPercent', 'index_together': "[['missing_data', 'feeling'], ['start_time', 'missing_data', 'feeling']]"},
'feeling': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['thermometer.FeelingWord']", 'null': 'True', 'blank': 'True'}),
'feeling_tweets': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'frame': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['thermometer.TimeFrame']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missing_data': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percent': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'})
},
u'thermometer.feelingword': {
'Meta': {'object_name': 'FeelingWord'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'untracked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'word': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'thermometer.timeframe': {
'Meta': {'object_name': 'TimeFrame'},
'analysis_time': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'calculated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cleanup_time': ('django.db.models.fields.FloatField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'feeling_tweets': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'missing_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'total_tweets': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['thermometer']
|
{
"content_hash": "4faa6b4bc5b3eec8a41fecfb092b887d",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 169,
"avg_line_length": 64.50704225352112,
"alnum_prop": 0.5681222707423581,
"repo_name": "michaelbrooks/twitter-feels",
"id": "ecd1c4ecf09181c90a80e3d7d6ef26b2d9fd2289",
"size": "4604",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twitter_feels/apps/thermometer/migrations/0003_auto__add_field_timeframe_cleanup_time.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18147"
},
{
"name": "JavaScript",
"bytes": "156482"
},
{
"name": "Puppet",
"bytes": "14112"
},
{
"name": "Python",
"bytes": "228903"
},
{
"name": "Shell",
"bytes": "4621"
}
],
"symlink_target": ""
}
|
"""This file contains a parser for extracting metadata."""
# TODO: Add a unit test for this parser.
import datetime
import hachoir_core.config
# This is necessary to do PRIOR to loading up other parts of hachoir
# framework, otherwise console does not work and other "weird" behavior
# is observed.
hachoir_core.config.unicode_stdout = False
hachoir_core.config.quiet = True
import hachoir_core
import hachoir_parser
import hachoir_metadata
from plaso.events import time_events
from plaso.lib import errors
from plaso.lib import timelib
from plaso.parsers import interface
from plaso.parsers import manager
__author__ = 'David Nides (david.nides@gmail.com)'
class HachoirEvent(time_events.TimestampEvent):
"""Process timestamps from Hachoir Events."""
DATA_TYPE = u'metadata:hachoir'
def __init__(self, dt_timestamp, usage, attributes):
"""An EventObject created from a Hachoir entry.
Args:
dt_timestamp: A python datetime.datetime object.
usage: The description of the usage of the time value.
attributes: A dict containing metadata for the event.
"""
timestamp = timelib.Timestamp.FromPythonDatetime(dt_timestamp)
super(HachoirEvent, self).__init__(timestamp, usage, self.DATA_TYPE)
self.metadata = attributes
class HachoirParser(interface.SingleFileBaseParser):
"""Class to parse meta data from files using Hachoir."""
NAME = u'hachoir'
DESCRIPTION = u'Parser that wraps Hachoir.'
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a file-like object using Hachoir.
Args:
parser_mediator: A parser context object (instance of ParserContext).
file_object: A file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
file_name = parser_mediator.GetDisplayName()
try:
fstream = hachoir_core.stream.InputIOStream(file_object, None, tags=[])
except hachoir_core.error.HachoirError as exception:
raise errors.UnableToParseFile(
u'[{0:s}] unable to parse file {1:s}: {2:s}'.format(
self.NAME, file_name, exception))
if not fstream:
raise errors.UnableToParseFile(
u'[{0:s}] unable to parse file {1:s}: {2:s}'.format(
self.NAME, file_name, u'Not fstream'))
try:
doc_parser = hachoir_parser.guessParser(fstream)
except hachoir_core.error.HachoirError as exception:
raise errors.UnableToParseFile(
u'[{0:s}] unable to parse file {1:s}: {2:s}'.format(
self.NAME, file_name, exception))
if not doc_parser:
raise errors.UnableToParseFile(
u'[{0:s}] unable to parse file {1:s}: {2:s}'.format(
self.NAME, file_name, u'Not parser'))
try:
metadata = hachoir_metadata.extractMetadata(doc_parser)
except (AssertionError, AttributeError) as exception:
raise errors.UnableToParseFile(
u'[{0:s}] unable to parse file {1:s}: {2:s}'.format(
self.NAME, file_name, exception))
try:
metatext = metadata.exportPlaintext(human=False)
except AttributeError as exception:
raise errors.UnableToParseFile(
u'[{0:s}] unable to parse file {1:s}: {2:s}'.format(
self.NAME, file_name, exception))
if not metatext:
raise errors.UnableToParseFile(
u'[{0:s}] unable to parse file {1:s}: No metadata'.format(
self.NAME, file_name))
attributes = {}
extracted_events = []
for meta in metatext:
if not meta.startswith(u'-'):
continue
if len(meta) < 3:
continue
key, _, value = meta[2:].partition(': ')
key2, _, value2 = value.partition(': ')
if key2 == u'LastPrinted' and value2 != u'False':
date_object = timelib.Timestamp.FromTimeString(
value2, timezone=parser_mediator.timezone)
if isinstance(date_object, datetime.datetime):
extracted_events.append((date_object, key2))
try:
date = metadata.get(key)
if isinstance(date, datetime.datetime):
extracted_events.append((date, key))
except ValueError:
pass
if key in attributes:
if isinstance(attributes.get(key), list):
attributes[key].append(value)
else:
old_value = attributes.get(key)
attributes[key] = [old_value, value]
else:
attributes[key] = value
if not extracted_events:
raise errors.UnableToParseFile(
u'[{0:s}] unable to parse file {1:s}: {2:s}'.format(
self.NAME, file_name, u'No events discovered'))
for date, key in extracted_events:
event_object = HachoirEvent(date, key, attributes)
parser_mediator.ProduceEvent(event_object)
manager.ParsersManager.RegisterParser(HachoirParser)
|
{
"content_hash": "4f06d44d6dbaac11c29896bb780bf371",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 77,
"avg_line_length": 31.90728476821192,
"alnum_prop": 0.6552511415525114,
"repo_name": "jorik041/plaso",
"id": "683d68278c148d94f4658a2caf1a469a03b5cbfe",
"size": "4842",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "plaso/parsers/hachoir.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1276"
},
{
"name": "Makefile",
"bytes": "1151"
},
{
"name": "Protocol Buffer",
"bytes": "13905"
},
{
"name": "Python",
"bytes": "3032632"
},
{
"name": "Shell",
"bytes": "45900"
}
],
"symlink_target": ""
}
|
"""
This module is for the miscellaneous routines which do not fit somewhere else.
"""
import glob
import os
from PyInstaller import log as logging
from PyInstaller.compat import is_win
logger = logging.getLogger(__name__)
def dlls_in_subdirs(directory):
"""Returns *.dll, *.so, *.dylib in given directories and subdirectories."""
files = []
for root, dirs, files in os.walk(directory):
files.extend(dlls_in_dir(root))
def dlls_in_dir(directory):
"""Returns *.dll, *.so, *.dylib in given directory."""
files = []
files.extend(glob.glob(os.path.join(directory, '*.so')))
files.extend(glob.glob(os.path.join(directory, '*.dll')))
files.extend(glob.glob(os.path.join(directory, '*.dylib')))
return files
def find_executable(executable, path=None):
"""
Try to find 'executable' in the directories listed in 'path' (a
string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']).
Returns the complete filename or None if not found.
Code from http://snippets.dzone.com/posts/show/6313
"""
if path is None:
path = os.environ['PATH']
paths = path.split(os.pathsep)
extlist = ['']
if is_win:
(base, ext) = os.path.splitext(executable)
# Executable files on windows have an arbitrary extension, but
# .exe is automatically appended if not present in the name.
if not ext:
executable = executable + ".exe"
pathext = os.environ['PATHEXT'].lower().split(os.pathsep)
(base, ext) = os.path.splitext(executable)
if ext.lower() not in pathext:
extlist = pathext
for ext in extlist:
execname = executable + ext
if os.path.isfile(execname):
return execname
else:
for p in paths:
f = os.path.join(p, execname)
if os.path.isfile(f):
return f
else:
return None
def get_unicode_modules():
"""
Try importing codecs and encodings to include unicode support
in created binary.
"""
modules = []
try:
import codecs
modules = ['codecs']
import encodings
# `encodings` imports `codecs`, so only the first is required.
modules = ['encodings']
except ImportError:
pass
return modules
def get_code_object(filename):
"""
Convert source code from Python source file to code object.
"""
try:
source_code_string = open(filename, 'rU').read() + '\n'
code_object = compile(source_code_string, filename, 'exec')
return code_object
except SyntaxError, e:
logger.exception(e)
raise SystemExit(10)
|
{
"content_hash": "a98cf1bfc5a44ab3cd6db142ce4e2f02",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 79,
"avg_line_length": 28.427083333333332,
"alnum_prop": 0.6137779406375962,
"repo_name": "kholia/exetractor-clone",
"id": "23a5ff5860286c43c36ae29cb78f9ec705179106",
"size": "3557",
"binary": false,
"copies": "3",
"ref": "refs/heads/unstable",
"path": "PyInstaller/utils/misc.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "559156"
}
],
"symlink_target": ""
}
|
"""
This is an example of working with very large data. There are about
700,000 unduplicated donors in this database of Illinois political
campaign contributions.
With such a large set of input data, we cannot store all the comparisons
we need to make in memory. Instead, we will read the pairs on demand
from the PostgresSQL database.
__Note:__ You will need to run `python pgsql_big_dedupe_example_init_db.py`
before running this script.
For smaller datasets (<10,000), see our
[csv_example](http://datamade.github.io/dedupe-examples/docs/csv_example.html)
"""
import os
import time
import logging
import optparse
import locale
import itertools
import io
import csv
import dj_database_url
import psycopg2
import psycopg2.extras
import dedupe
import numpy
from psycopg2.extensions import register_adapter, AsIs
register_adapter(numpy.int32, AsIs)
register_adapter(numpy.int64, AsIs)
register_adapter(numpy.float32, AsIs)
register_adapter(numpy.float64, AsIs)
class Readable(object):
def __init__(self, iterator):
self.output = io.StringIO()
self.writer = csv.writer(self.output)
self.iterator = iterator
def read(self, size):
self.writer.writerows(itertools.islice(self.iterator, size))
chunk = self.output.getvalue()
self.output.seek(0)
self.output.truncate(0)
return chunk
def record_pairs(result_set):
for i, row in enumerate(result_set):
a_record_id, a_record, b_record_id, b_record = row
record_a = (a_record_id, a_record)
record_b = (b_record_id, b_record)
yield record_a, record_b
if i % 10000 == 0:
print(i)
def cluster_ids(clustered_dupes):
for cluster, scores in clustered_dupes:
cluster_id = cluster[0]
for donor_id, score in zip(cluster, scores):
yield donor_id, cluster_id, score
if __name__ == '__main__':
# ## Logging
# Dedupe uses Python logging to show or suppress verbose output. Added
# for convenience. To enable verbose output, run `python
# pgsql_big_dedupe_example.py -v`
optp = optparse.OptionParser()
optp.add_option('-v', '--verbose', dest='verbose', action='count',
help='Increase verbosity (specify multiple times for more)'
)
(opts, args) = optp.parse_args()
log_level = logging.WARNING
if opts.verbose:
if opts.verbose == 1:
log_level = logging.INFO
elif opts.verbose >= 2:
log_level = logging.DEBUG
logging.getLogger().setLevel(log_level)
# ## Setup
settings_file = 'pgsql_big_dedupe_example_settings'
training_file = 'pgsql_big_dedupe_example_training.json'
start_time = time.time()
# Set the database connection from environment variable using
# [dj_database_url](https://github.com/kennethreitz/dj-database-url)
# For example:
# export DATABASE_URL=postgres://user:password@host/mydatabase
db_conf = dj_database_url.config()
if not db_conf:
raise Exception(
'set DATABASE_URL environment variable with your connection, e.g. '
'export DATABASE_URL=postgres://user:password@host/mydatabase'
)
read_con = psycopg2.connect(database=db_conf['NAME'],
user=db_conf['USER'],
password=db_conf['PASSWORD'],
host=db_conf['HOST'],
cursor_factory=psycopg2.extras.RealDictCursor)
write_con = psycopg2.connect(database=db_conf['NAME'],
user=db_conf['USER'],
password=db_conf['PASSWORD'],
host=db_conf['HOST'])
# We'll be using variations on this following select statement to pull
# in campaign donor info.
#
# We did a fair amount of preprocessing of the fields in
# `pgsql_big_dedupe_example_init_db.py`
DONOR_SELECT = "SELECT donor_id, city, name, zip, state, address " \
"from processed_donors"
# ## Training
if os.path.exists(settings_file):
print('reading from ', settings_file)
with open(settings_file, 'rb') as sf:
deduper = dedupe.StaticDedupe(sf, num_cores=4)
else:
# Define the fields dedupe will pay attention to
#
# The address, city, and zip fields are often missing, so we'll
# tell dedupe that, and we'll learn a model that take that into
# account
fields = [{'field': 'name', 'type': 'String'},
{'field': 'address', 'type': 'String',
'has missing': True},
{'field': 'city', 'type': 'ShortString', 'has missing': True},
{'field': 'state', 'type': 'ShortString', 'has missing': True},
{'field': 'zip', 'type': 'ShortString', 'has missing': True},
]
# Create a new deduper object and pass our data model to it.
deduper = dedupe.Dedupe(fields, num_cores=4)
# Named cursor runs server side with psycopg2
with read_con.cursor('donor_select') as cur:
cur.execute(DONOR_SELECT)
temp_d = {i: row for i, row in enumerate(cur)}
# If we have training data saved from a previous run of dedupe,
# look for it an load it in.
#
# __Note:__ if you want to train from
# scratch, delete the training_file
if os.path.exists(training_file):
print('reading labeled examples from ', training_file)
with open(training_file) as tf:
deduper.prepare_training(temp_d, tf)
else:
deduper.prepare_training(temp_d)
del temp_d
# ## Active learning
print('starting active labeling...')
# Starts the training loop. Dedupe will find the next pair of records
# it is least certain about and ask you to label them as duplicates
# or not.
# use 'y', 'n' and 'u' keys to flag duplicates
# press 'f' when you are finished
dedupe.console_label(deduper)
# When finished, save our labeled, training pairs to disk
with open(training_file, 'w') as tf:
deduper.write_training(tf)
# Notice our argument here
#
# `recall` is the proportion of true dupes pairs that the learned
# rules must cover. You may want to reduce this if your are making
# too many blocks and too many comparisons.
deduper.train(recall=0.90)
with open(settings_file, 'wb') as sf:
deduper.write_settings(sf)
# We can now remove some of the memory hogging objects we used
# for training
deduper.cleanup_training()
# ## Blocking
print('blocking...')
# To run blocking on such a large set of data, we create a separate table
# that contains blocking keys and record ids
print('creating blocking_map database')
with write_con:
with write_con.cursor() as cur:
cur.execute("DROP TABLE IF EXISTS blocking_map")
cur.execute("CREATE TABLE blocking_map "
"(block_key text, donor_id INTEGER)")
# If dedupe learned a Index Predicate, we have to take a pass
# through the data and create indices.
print('creating inverted index')
for field in deduper.fingerprinter.index_fields:
with read_con.cursor('field_values') as cur:
cur.execute("SELECT DISTINCT %s FROM processed_donors" % field)
field_data = (row[field] for row in cur)
deduper.fingerprinter.index(field_data, field)
# Now we are ready to write our blocking map table by creating a
# generator that yields unique `(block_key, donor_id)` tuples.
print('writing blocking map')
with read_con.cursor('donor_select') as read_cur:
read_cur.execute(DONOR_SELECT)
full_data = ((row['donor_id'], row) for row in read_cur)
b_data = deduper.fingerprinter(full_data)
with write_con:
with write_con.cursor() as write_cur:
write_cur.copy_expert('COPY blocking_map FROM STDIN WITH CSV',
Readable(b_data),
size=10000)
# free up memory by removing indices
deduper.fingerprinter.reset_indices()
logging.info("indexing block_key")
with write_con:
with write_con.cursor() as cur:
cur.execute("CREATE UNIQUE INDEX ON blocking_map "
"(block_key text_pattern_ops, donor_id)")
# ## Clustering
with write_con:
with write_con.cursor() as cur:
cur.execute("DROP TABLE IF EXISTS entity_map")
print('creating entity_map database')
cur.execute("CREATE TABLE entity_map "
"(donor_id INTEGER, canon_id INTEGER, "
" cluster_score FLOAT, PRIMARY KEY(donor_id))")
with read_con.cursor('pairs', cursor_factory=psycopg2.extensions.cursor) as read_cur:
read_cur.execute("""
select a.donor_id,
row_to_json((select d from (select a.city,
a.name,
a.zip,
a.state,
a.address) d)),
b.donor_id,
row_to_json((select d from (select b.city,
b.name,
b.zip,
b.state,
b.address) d))
from (select DISTINCT l.donor_id as east, r.donor_id as west
from blocking_map as l
INNER JOIN blocking_map as r
using (block_key)
where l.donor_id < r.donor_id) ids
INNER JOIN processed_donors a on ids.east=a.donor_id
INNER JOIN processed_donors b on ids.west=b.donor_id""")
print('clustering...')
clustered_dupes = deduper.cluster(deduper.score(record_pairs(read_cur)),
threshold=0.5)
# ## Writing out results
# We now have a sequence of tuples of donor ids that dedupe believes
# all refer to the same entity. We write this out onto an entity map
# table
print('writing results')
with write_con:
with write_con.cursor() as write_cur:
write_cur.copy_expert('COPY entity_map FROM STDIN WITH CSV',
Readable(cluster_ids(clustered_dupes)),
size=10000)
with write_con:
with write_con.cursor() as cur:
cur.execute("CREATE INDEX head_index ON entity_map (canon_id)")
# Print out the number of duplicates found
# ## Payoff
# With all this done, we can now begin to ask interesting questions
# of the data
#
# For example, let's see who the top 10 donors are.
locale.setlocale(locale.LC_ALL, '') # for pretty printing numbers
# Create a temporary table so each group and unmatched record has
# a unique id
with read_con.cursor() as cur:
cur.execute("CREATE TEMPORARY TABLE e_map "
"AS SELECT COALESCE(canon_id, donor_id) AS canon_id, donor_id "
"FROM entity_map "
"RIGHT JOIN donors USING(donor_id)")
cur.execute(
"SELECT CONCAT_WS(' ', donors.first_name, donors.last_name) AS name, "
"donation_totals.totals AS totals "
"FROM donors INNER JOIN "
"(SELECT canon_id, SUM(CAST(amount AS FLOAT)) AS totals "
" FROM contributions INNER JOIN e_map "
" USING (donor_id) "
" GROUP BY (canon_id) "
" ORDER BY totals "
" DESC LIMIT 10) "
"AS donation_totals ON donors.donor_id=donation_totals.canon_id "
"WHERE donors.donor_id = donation_totals.canon_id"
)
print("Top Donors (deduped)")
for row in cur:
row['totals'] = locale.currency(row['totals'], grouping=True)
print('%(totals)20s: %(name)s' % row)
# Compare this to what we would have gotten if we hadn't done any
# deduplication
cur.execute(
"SELECT CONCAT_WS(' ', donors.first_name, donors.last_name) as name, "
"SUM(CAST(contributions.amount AS FLOAT)) AS totals "
"FROM donors INNER JOIN contributions "
"USING (donor_id) "
"GROUP BY (donor_id) "
"ORDER BY totals DESC "
"LIMIT 10"
)
print("Top Donors (raw)")
for row in cur:
row['totals'] = locale.currency(row['totals'], grouping=True)
print('%(totals)20s: %(name)s' % row)
read_con.close()
write_con.close()
print('ran in', time.time() - start_time, 'seconds')
|
{
"content_hash": "766b5c4094fe440c0bc56693e4f29528",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 89,
"avg_line_length": 36.130434782608695,
"alnum_prop": 0.5672382671480144,
"repo_name": "dedupeio/dedupe-examples",
"id": "e2c629573db4ef980dc864033c5ba577f44d585d",
"size": "13343",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pgsql_big_dedupe_example/pgsql_big_dedupe_example.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "82169"
}
],
"symlink_target": ""
}
|
import os
import sys
from roblib import read_fasta
import argparse
__author__ = 'Rob Edwards'
|
{
"content_hash": "9b6c1f920674968b55f06f993c89eb7a",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 29,
"avg_line_length": 12.125,
"alnum_prop": 0.7422680412371134,
"repo_name": "linsalrob/EdwardsLab",
"id": "8fdf0deb0d26eda6f314654fbadc8466f6d61d84",
"size": "97",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/blast2seq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "227276"
},
{
"name": "C++",
"bytes": "21508"
},
{
"name": "Jupyter Notebook",
"bytes": "490830"
},
{
"name": "Makefile",
"bytes": "936"
},
{
"name": "Perl",
"bytes": "280086"
},
{
"name": "Python",
"bytes": "1102051"
},
{
"name": "Shell",
"bytes": "13759"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blocklist', '0015_auto_20200520_1725'),
]
operations = [
migrations.AlterModelOptions(
name='legacyimport',
options={},
),
migrations.AlterField(
model_name='legacyimport',
name='kinto_id',
field=models.CharField(db_column='kinto_id', default='', max_length=255, unique=True),
),
migrations.RenameField(
model_name='legacyimport',
old_name='kinto_id',
new_name='legacy_id',
),
]
|
{
"content_hash": "786335f049bcb52ae47f127b43c2a085",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 98,
"avg_line_length": 26,
"alnum_prop": 0.5461538461538461,
"repo_name": "mozilla/addons-server",
"id": "f2f2d98266ab5aa70442bd77226d0a424e9dae2c",
"size": "700",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/olympia/blocklist/migrations/0016_auto_20200521_1710.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "245459"
},
{
"name": "Dockerfile",
"bytes": "3900"
},
{
"name": "HTML",
"bytes": "290496"
},
{
"name": "JavaScript",
"bytes": "750827"
},
{
"name": "Less",
"bytes": "212819"
},
{
"name": "Makefile",
"bytes": "564"
},
{
"name": "Python",
"bytes": "6811560"
},
{
"name": "Shell",
"bytes": "8638"
},
{
"name": "Smarty",
"bytes": "1261"
}
],
"symlink_target": ""
}
|
from gen_function import *
import string
header = '''// (C) Copyright David Abrahams 2001,2002. Permission to copy, use, modify, sell
// and distribute this software is granted provided this copyright notice appears
// in all copies. This software is provided "as is" without express or implied
// warranty, and with no claim as to its suitability for any purpose.
//
// This work was funded in part by Lawrence Berkeley National Labs
//
// This file generated for %d-argument member functions and %d-argument free
// functions by gen_returning.py
'''
body_sections = (
'''
#ifndef RETURNING_DWA20011201_HPP
# define RETURNING_DWA20011201_HPP
# include <boost/python/detail/wrap_python.hpp>
# include <boost/config.hpp>
# include <boost/python/detail/none.hpp>
# include <boost/python/from_python.hpp>
namespace boost { namespace python { namespace detail {
// Calling C++ from Python
template <class R>
struct returning
{
''',
'''
''',
''' // Free functions
''',
'''};
template <>
struct returning<void>
{
typedef void R;
''',
'''
''',
'''
// Free functions
''',
'''};
}}} // namespace boost::python::detail
#endif // RETURNING_DWA20011201_HPP
''')
#'
member_function = ''' template <class P, class A0%(, class A%+%)>
static PyObject* call(R (A0::*pmf)(%(A%+%:, %))%1, PyObject* args_, PyObject*, P const& policies)
{
// check that each of the arguments is convertible
from_python<A0%1*> c0(PyTuple_GET_ITEM(args_, 0));
if (!c0.convertible()) return 0;
%( from_python<A%+> c%+(PyTuple_GET_ITEM(args_, %+));
if (!c%+.convertible()) return 0;
%)
%[r%: // find the result converter
typedef typename P::result_converter result_converter;
typename eval<result_converter,R>::type cr;
if (!cr.convertible()) return 0;
%] if (!policies.precall(args_)) return 0;
%[r%:PyObject* result = cr( %]((c0(PyTuple_GET_ITEM(args_, 0)))->*pmf)(
%(c%+(PyTuple_GET_ITEM(args_, %+))%:
, %))%[r%: )%];
return policies.postcall(args_, %[r%:result%]%[v%:detail::none()%]);
}
'''
free_function = ''' template <class P%(, class A%n%%)>
static PyObject* call(R (*pf)(%(A%n%:, %)), PyObject* args_, PyObject*, P const& policies)
{%{
// check that each of the arguments is convertible
%}%( from_python<A%n> c%n(PyTuple_GET_ITEM(args_, %n));
if (!c%n.convertible()) return 0;
%)
%[r%: // find the result converter
typedef typename P::result_converter result_converter;
typename eval<result_converter,R>::type cr;
if (!cr.convertible()) return 0;
%]%[not-void-and-0-arg%: if (!policies.precall(args_)) return 0;
%] %[r%:PyObject* result = cr( %](*pf)(
%(c%n(PyTuple_GET_ITEM(args_, %n))%:
, %))%[r%: )%];
return policies.postcall(args_, %[r%:result%]%[v%:detail::none()%]);
}
'''
def _returns_value(key, n, args, value):
if key != 'v':
return value
else:
return ''
def _returns_void(key, n, args, value):
if key == 'v' or key == 'not-void-and-0-arg' and n != 0:
return value
else:
return ''
_cv_qualifiers = ('', ' const', ' volatile', ' const volatile')
_prefix = {
# ' const': '''
# // missing cv-qualified -> cv-unqualified member pointer conversions
# # if defined(__MWERKS__) && __MWERKS__ <=0x2406 || defined(BOOST_MSVC) && BOOST_MSVC <= 1200 || defined(__BORLANDC__)
# ''',
' const volatile': '''
// missing const volatile type traits
# ifndef BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION
'''};
def gen_returning(member_function_args, free_function_args = None):
if free_function_args is None:
free_function_args = member_function_args + 1
return_none = ''';
return detail::none();'''
return (header % (member_function_args, free_function_args)
+ body_sections[0]
#
# functions returning results
#
+ reduce(lambda x,y: x+y
, map(lambda cv:
_prefix.get(cv,'')
+ gen_functions(member_function,
member_function_args, cv,
fill = _returns_value) + '\n'
, _cv_qualifiers))
+ '''# endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION
'''
## endif // missing cv-qualified -> cv-unqualified member pointer conversions
#'''
# free functions
+ gen_functions(free_function, free_function_args, fill = _returns_value)
+ body_sections[3]
#
# functions returning void
#
+ reduce(lambda x,y: x+y
, map(lambda cv:
_prefix.get(cv,'')
+ gen_functions(member_function,
member_function_args, cv, fill =
_returns_void) + '\n'
, _cv_qualifiers))
+ '''# endif // BOOST_NO_TEMPLATE_PARTIAL_SPECIALIZATION
'''
## endif // missing cv-qualified -> cv-unqualified member pointer conversions
#'''
# free functions
+ gen_functions(free_function, free_function_args, fill = _returns_void)
+ body_sections[6]
)
if __name__ == '__main__':
import sys
if len(sys.argv) == 1:
member_function_args = 5
free_function_args = 6
else:
member_function_args = int(sys.argv[1])
if len(sys.argv) > 2:
free_function_args = int(sys.argv[2])
else:
free_function_args = member_function_args
print gen_returning(member_function_args, free_function_args)
|
{
"content_hash": "36bccfe462a6bac59fa6f427ac877652",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 119,
"avg_line_length": 30.603092783505154,
"alnum_prop": 0.5420245915445511,
"repo_name": "Ezeer/VegaStrike_win32FR",
"id": "8f7c866c6ebb79f4dcc97b87255fa16f1279afd6",
"size": "6319",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vegastrike/boost/1_28/src/gen_returning.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "4197693"
},
{
"name": "C++",
"bytes": "99169723"
},
{
"name": "Objective-C",
"bytes": "135840"
},
{
"name": "Perl",
"bytes": "21684"
},
{
"name": "Python",
"bytes": "186872"
},
{
"name": "Shell",
"bytes": "114240"
},
{
"name": "Standard ML",
"bytes": "2678"
}
],
"symlink_target": ""
}
|
"""Maintain moving averages of parameters."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.training import slot_creator
from tensorflow.python.util.tf_export import tf_export
# TODO(touts): switch to variables.Variable.
def assign_moving_average(variable, value, decay, zero_debias=True, name=None):
"""Compute the moving average of a variable.
The moving average of 'variable' updated with 'value' is:
variable * decay + value * (1 - decay)
The returned Operation sets 'variable' to the newly computed moving average,
by performing this subtraction:
variable -= (1 - decay) * (variable - value)
Since variables that are initialized to a `0` value will be `0` biased,
`zero_debias` optionally enables scaling by the mathematically correct
debiasing factor of
1 - decay ** num_updates
See Section 3 of (Kingma et al., 2015) for more details.
The names of the debias shadow variables, by default, include both the scope
they were created in and the scope of the variables they debias. They are also
given a uniquifying-suffix.
E.g.:
```
with tf.compat.v1.variable_scope('scope1'):
with tf.compat.v1.variable_scope('scope2'):
var = tf.compat.v1.get_variable('foo')
update_1 = tf.assign_moving_average(var, 0.0, 1.0)
update_2 = tf.assign_moving_average(var, 0.0, 0.9)
# var.name: 'scope1/scope2/foo'
# shadow var names: 'scope1/scope2/scope1/scope2/foo/biased'
# 'scope1/scope2/scope1/scope2/foo/biased_1'
```
Args:
variable: A Variable.
value: A tensor with the same shape as 'variable'.
decay: A float Tensor or float value. The moving average decay.
zero_debias: A python bool. If true, assume the variable is 0-initialized
and unbias it, as in (Kingma et al., 2015). See docstring in
`_zero_debias` for more details.
name: Optional name of the returned operation.
Returns:
A tensor which if evaluated will compute and return the new moving average.
References:
Adam - A Method for Stochastic Optimization:
[Kingma et al., 2015](https://arxiv.org/abs/1412.6980)
([pdf](https://arxiv.org/pdf/1412.6980.pdf))
"""
with ops.name_scope(name, "AssignMovingAvg",
[variable, value, decay]) as scope:
decay = ops.convert_to_tensor(1.0 - decay, name="decay")
if decay.dtype != variable.dtype.base_dtype:
decay = math_ops.cast(decay, variable.dtype.base_dtype)
def update_fn(v, value):
return state_ops.assign_sub(v, (v - value) * decay, name=scope)
def update(strategy, v, value):
if zero_debias:
return _zero_debias(strategy, v, value, decay)
else:
return strategy.extended.update(v, update_fn, args=(value,))
replica_context = distribution_strategy_context.get_replica_context()
if replica_context:
# In a replica context, we update variable using the mean of value across
# replicas.
def merge_fn(strategy, v, value):
value = strategy.extended.reduce_to(ds_reduce_util.ReduceOp.MEAN, value,
v)
return update(strategy, v, value)
return replica_context.merge_call(merge_fn, args=(variable, value))
else:
strategy = distribution_strategy_context.get_cross_replica_context()
return update(strategy, variable, value)
def weighted_moving_average(value,
decay,
weight,
truediv=True,
collections=None,
name=None):
"""Compute the weighted moving average of `value`.
Conceptually, the weighted moving average is:
`moving_average(value * weight) / moving_average(weight)`,
where a moving average updates by the rule
`new_value = decay * old_value + (1 - decay) * update`
Internally, this Op keeps moving average variables of both `value * weight`
and `weight`.
Args:
value: A numeric `Tensor`.
decay: A float `Tensor` or float value. The moving average decay.
weight: `Tensor` that keeps the current value of a weight. Shape should be
able to multiply `value`.
truediv: Boolean, if `True`, dividing by `moving_average(weight)` is
floating point division. If `False`, use division implied by dtypes.
collections: List of graph collections keys to add the internal variables
`value * weight` and `weight` to. Defaults to
`[GraphKeys.GLOBAL_VARIABLES]`.
name: Optional name of the returned operation. Defaults to
"WeightedMovingAvg".
Returns:
An Operation that updates and returns the weighted moving average.
"""
# Unlike assign_moving_average, the weighted moving average doesn't modify
# user-visible variables. It is the ratio of two internal variables, which are
# moving averages of the updates. Thus, the signature of this function is
# quite different than assign_moving_average.
if collections is None:
collections = [ops.GraphKeys.GLOBAL_VARIABLES]
with variable_scope.variable_scope(name, "WeightedMovingAvg",
[value, weight, decay]) as scope:
value_x_weight_var = variable_scope.get_variable(
"value_x_weight",
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
weight_var = variable_scope.get_variable(
"weight",
shape=weight.get_shape(),
dtype=weight.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False,
collections=collections)
numerator = assign_moving_average(
value_x_weight_var, value * weight, decay, zero_debias=False)
denominator = assign_moving_average(
weight_var, weight, decay, zero_debias=False)
if truediv:
return math_ops.truediv(numerator, denominator, name=scope.name)
else:
return math_ops.div(numerator, denominator, name=scope.name)
def _zero_debias(strategy, unbiased_var, value, decay):
"""Compute the delta required for a debiased Variable.
All exponential moving averages initialized with Tensors are initialized to 0,
and therefore are biased to 0. Variables initialized to 0 and used as EMAs are
similarly biased. This function creates the debias updated amount according to
a scale factor, as in (Kingma et al., 2015).
To demonstrate the bias the results from 0-initialization, take an EMA that
was initialized to `0` with decay `b`. After `t` timesteps of seeing the
constant `c`, the variable have the following value:
```
EMA = 0*b^(t) + c*(1 - b)*b^(t-1) + c*(1 - b)*b^(t-2) + ...
= c*(1 - b^t)
```
To have the true value `c`, we would divide by the scale factor `1 - b^t`.
In order to perform debiasing, we use two shadow variables. One keeps track of
the biased estimate, and the other keeps track of the number of updates that
have occurred.
Args:
strategy: `Strategy` used to create and update variables.
unbiased_var: A Variable representing the current value of the unbiased EMA.
value: A Tensor representing the most recent value.
decay: A Tensor representing `1-decay` for the EMA.
Returns:
The amount that the unbiased variable should be updated. Computing this
tensor will also update the shadow variables appropriately.
References:
Adam - A Method for Stochastic Optimization:
[Kingma et al., 2015](https://arxiv.org/abs/1412.6980)
([pdf](https://arxiv.org/pdf/1412.6980.pdf))
"""
with variable_scope.variable_scope(
unbiased_var.name[:-len(":0")], values=[unbiased_var, value, decay]):
with ops.init_scope():
biased_initializer = init_ops.zeros_initializer()
local_step_initializer = init_ops.zeros_initializer()
def _maybe_get_unique(name):
"""Get name for a unique variable, if not `reuse=True`."""
if variable_scope.get_variable_scope().reuse:
return name
vs_vars = [
x.op.name
for x in variable_scope.get_variable_scope().global_variables()
]
full_name = variable_scope.get_variable_scope().name + "/" + name
if full_name not in vs_vars:
return name
idx = 1
while full_name + ("_%d" % idx) in vs_vars:
idx += 1
return name + ("_%d" % idx)
with strategy.extended.colocate_vars_with(unbiased_var):
biased_var = variable_scope.get_variable(
_maybe_get_unique("biased"),
initializer=biased_initializer,
shape=unbiased_var.get_shape(),
dtype=unbiased_var.dtype,
trainable=False)
local_step = variable_scope.get_variable(
_maybe_get_unique("local_step"),
shape=[],
dtype=unbiased_var.dtype,
initializer=local_step_initializer,
trainable=False)
def update_fn(v, value, biased_var, local_step):
update_biased = state_ops.assign_sub(biased_var,
(biased_var - value) * decay)
update_local_step = local_step.assign_add(1)
# This function gets `1 - decay`, so use `1.0 - decay` in the exponent.
bias_factor = 1 - math_ops.pow(1.0 - decay, update_local_step)
return state_ops.assign(
v, update_biased / bias_factor, name=ops.get_name_scope() + "/")
return strategy.extended.update(
unbiased_var, update_fn, args=(value, biased_var, local_step))
@tf_export("train.ExponentialMovingAverage")
class ExponentialMovingAverage(object):
"""Maintains moving averages of variables by employing an exponential decay.
When training a model, it is often beneficial to maintain moving averages of
the trained parameters. Evaluations that use averaged parameters sometimes
produce significantly better results than the final trained values.
The `apply()` method adds shadow copies of trained variables and add ops that
maintain a moving average of the trained variables in their shadow copies.
It is used when building the training model. The ops that maintain moving
averages are typically run after each training step.
The `average()` and `average_name()` methods give access to the shadow
variables and their names. They are useful when building an evaluation
model, or when restoring a model from a checkpoint file. They help use the
moving averages in place of the last trained values for evaluations.
The moving averages are computed using exponential decay. You specify the
decay value when creating the `ExponentialMovingAverage` object. The shadow
variables are initialized with the same initial values as the trained
variables. When you run the ops to maintain the moving averages, each
shadow variable is updated with the formula:
`shadow_variable -= (1 - decay) * (shadow_variable - variable)`
This is mathematically equivalent to the classic formula below, but the use
of an `assign_sub` op (the `"-="` in the formula) allows concurrent lockless
updates to the variables:
`shadow_variable = decay * shadow_variable + (1 - decay) * variable`
Reasonable values for `decay` are close to 1.0, typically in the
multiple-nines range: 0.999, 0.9999, etc.
Example usage when creating a training model:
```python
# Create variables.
var0 = tf.Variable(...)
var1 = tf.Variable(...)
# ... use the variables to build a training model...
...
# Create an op that applies the optimizer. This is what we usually
# would use as a training op.
opt_op = opt.minimize(my_loss, [var0, var1])
# Create an ExponentialMovingAverage object
ema = tf.train.ExponentialMovingAverage(decay=0.9999)
with tf.control_dependencies([opt_op]):
# Create the shadow variables, and add ops to maintain moving averages
# of var0 and var1. This also creates an op that will update the moving
# averages after each training step. This is what we will use in place
# of the usual training op.
training_op = ema.apply([var0, var1])
...train the model by running training_op...
```
There are two ways to use the moving averages for evaluations:
* Build a model that uses the shadow variables instead of the variables.
For this, use the `average()` method which returns the shadow variable
for a given variable.
* Build a model normally but load the checkpoint files to evaluate by using
the shadow variable names. For this use the `average_name()` method. See
the `tf.compat.v1.train.Saver` for more
information on restoring saved variables.
Example of restoring the shadow variable values:
```python
# Create a Saver that loads variables from their saved shadow values.
shadow_var0_name = ema.average_name(var0)
shadow_var1_name = ema.average_name(var1)
saver = tf.compat.v1.train.Saver({shadow_var0_name: var0, shadow_var1_name:
var1})
saver.restore(...checkpoint filename...)
# var0 and var1 now hold the moving average values
```
"""
def __init__(self,
decay,
num_updates=None,
zero_debias=False,
name="ExponentialMovingAverage"):
"""Creates a new ExponentialMovingAverage object.
The `apply()` method has to be called to create shadow variables and add
ops to maintain moving averages.
The optional `num_updates` parameter allows one to tweak the decay rate
dynamically. It is typical to pass the count of training steps, usually
kept in a variable that is incremented at each step, in which case the
decay rate is lower at the start of training. This makes moving averages
move faster. If passed, the actual decay rate used is:
`min(decay, (1 + num_updates) / (10 + num_updates))`
Args:
decay: Float. The decay to use.
num_updates: Optional count of number of updates applied to variables.
zero_debias: If `True`, zero debias moving-averages that are initialized
with tensors.
name: String. Optional prefix name to use for the name of ops added in
`apply()`.
"""
self._decay = decay
self._num_updates = num_updates
self._zero_debias = zero_debias
self._name = name
self._averages = {}
@property
def name(self):
"""The name of this ExponentialMovingAverage object."""
return self._name
def apply(self, var_list=None):
"""Maintains moving averages of variables.
`var_list` must be a list of `Variable` or `Tensor` objects. This method
creates shadow variables for all elements of `var_list`. Shadow variables
for `Variable` objects are initialized to the variable's initial value.
They will be added to the `GraphKeys.MOVING_AVERAGE_VARIABLES` collection.
For `Tensor` objects, the shadow variables are initialized to 0 and zero
debiased (see docstring in `assign_moving_average` for more details).
shadow variables are created with `trainable=False` and added to the
`GraphKeys.ALL_VARIABLES` collection. They will be returned by calls to
`tf.compat.v1.global_variables()`.
Returns an op that updates all shadow variables from the current value of
their associated variables.
Note that `apply()` can be called multiple times. When eager execution is
enabled each call to apply will update the variables once, so this needs to
be called in a loop.
Args:
var_list: A list of Variable or Tensor objects. The variables and Tensors
must be of types bfloat16, float16, float32, or float64.
Returns:
An Operation that updates the moving averages.
Raises:
TypeError: If the arguments are not an allowed type.
"""
# TODO(touts): op_scope
if var_list is None:
var_list = variables.trainable_variables()
for v in var_list:
if isinstance(v, ops.EagerTensor):
raise TypeError(
"tf.train.ExponentialMovingAverage does not support non-Variable"
" tensors when eager execution is enabled.")
zero_debias_true = set() # set of vars to set `zero_debias=True`
for var in var_list:
if var.dtype.base_dtype not in [
dtypes.bfloat16, dtypes.float16, dtypes.float32, dtypes.float64
]:
raise TypeError("The variables must be half, float, or double: %s" %
var.name)
if var.experimental_ref() not in self._averages:
# For variables: to lower communication bandwidth across devices we keep
# the moving averages on the same device as the variables. For other
# tensors, we rely on the existing device allocation mechanism.
if isinstance(var, variables.Variable):
if ops.executing_eagerly_outside_functions():
init_value = var.read_value()
else:
init_value = var.initialized_value()
avg = slot_creator.create_slot(
var, init_value, self.name, colocate_with_primary=True)
# NOTE(mrry): We only add `tf.Variable` objects to the
# `MOVING_AVERAGE_VARIABLES` collection.
ops.add_to_collection(ops.GraphKeys.MOVING_AVERAGE_VARIABLES, var)
else:
avg = slot_creator.create_zeros_slot(
var,
self.name,
colocate_with_primary=(var.op.type in [
"Variable", "VariableV2", "VarHandleOp"
]))
if self._zero_debias:
zero_debias_true.add(avg.experimental_ref())
self._averages[var.experimental_ref()] = avg
with ops.name_scope(self.name) as scope:
decay = ops.convert_to_tensor(self._decay, name="decay")
if self._num_updates is not None:
num_updates = math_ops.cast(
self._num_updates, dtypes.float32, name="num_updates")
decay = math_ops.minimum(decay,
(1.0 + num_updates) / (10.0 + num_updates))
updates = []
for var in var_list:
avg = self._averages[var.experimental_ref()]
zero_debias = avg.experimental_ref() in zero_debias_true
updates.append(assign_moving_average(avg, var, decay, zero_debias))
return control_flow_ops.group(*updates, name=scope)
def average(self, var):
"""Returns the `Variable` holding the average of `var`.
Args:
var: A `Variable` object.
Returns:
A `Variable` object or `None` if the moving average of `var`
is not maintained.
"""
return self._averages.get(var.experimental_ref(), None)
def average_name(self, var):
"""Returns the name of the `Variable` holding the average for `var`.
The typical scenario for `ExponentialMovingAverage` is to compute moving
averages of variables during training, and restore the variables from the
computed moving averages during evaluations.
To restore variables, you have to know the name of the shadow variables.
That name and the original variable can then be passed to a `Saver()` object
to restore the variable from the moving average value with:
`saver = tf.compat.v1.train.Saver({ema.average_name(var): var})`
`average_name()` can be called whether or not `apply()` has been called.
Args:
var: A `Variable` object.
Returns:
A string: The name of the variable that will be used or was used
by the `ExponentialMovingAverage class` to hold the moving average of
`var`.
"""
if var.experimental_ref() in self._averages:
return self._averages[var.experimental_ref()].op.name
return ops.get_default_graph().unique_name(
var.op.name + "/" + self.name, mark_as_used=False)
def variables_to_restore(self, moving_avg_variables=None):
"""Returns a map of names to `Variables` to restore.
If a variable has a moving average, use the moving average variable name as
the restore name; otherwise, use the variable name.
For example,
```python
variables_to_restore = ema.variables_to_restore()
saver = tf.compat.v1.train.Saver(variables_to_restore)
```
Below is an example of such mapping:
```
conv/batchnorm/gamma/ExponentialMovingAverage: conv/batchnorm/gamma,
conv_4/conv2d_params/ExponentialMovingAverage: conv_4/conv2d_params,
global_step: global_step
```
Args:
moving_avg_variables: a list of variables that require to use of the
moving average variable name to be restored. If None, it will default to
variables.moving_average_variables() + variables.trainable_variables()
Returns:
A map from restore_names to variables. The restore_name is either the
original or the moving average version of the variable name, depending
on whether the variable name is in the `moving_avg_variables`.
"""
name_map = {}
if moving_avg_variables is None:
# Include trainable variables and variables which have been explicitly
# added to the moving_average_variables collection.
moving_avg_variables = variables.trainable_variables()
moving_avg_variables += variables.moving_average_variables()
# Remove duplicates
moving_avg_variables = set(moving_avg_variables)
# Collect all the variables with moving average,
for v in moving_avg_variables:
name_map[self.average_name(v)] = v
# Make sure we restore variables without moving averages as well.
moving_avg_variable_names = set([v.name for v in moving_avg_variables])
for v in list(set(variables.global_variables())):
if v.name not in moving_avg_variable_names and v.op.name not in name_map:
name_map[v.op.name] = v
return name_map
|
{
"content_hash": "b3431d0a72943780ef34064cfcec5f4c",
"timestamp": "",
"source": "github",
"line_count": 547,
"max_line_length": 80,
"avg_line_length": 40.654478976234,
"alnum_prop": 0.6768144617321702,
"repo_name": "arborh/tensorflow",
"id": "afd7a040db9934672e2e90e31eea62f7f3726745",
"size": "22927",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/moving_averages.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "5003"
},
{
"name": "Batchfile",
"bytes": "45988"
},
{
"name": "C",
"bytes": "773694"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "76730781"
},
{
"name": "CMake",
"bytes": "6545"
},
{
"name": "Dockerfile",
"bytes": "81136"
},
{
"name": "Go",
"bytes": "1679107"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "952944"
},
{
"name": "Jupyter Notebook",
"bytes": "567243"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "1299305"
},
{
"name": "Makefile",
"bytes": "61397"
},
{
"name": "Objective-C",
"bytes": "104706"
},
{
"name": "Objective-C++",
"bytes": "297753"
},
{
"name": "PHP",
"bytes": "24055"
},
{
"name": "Pascal",
"bytes": "3752"
},
{
"name": "Pawn",
"bytes": "17546"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "38757009"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "7459"
},
{
"name": "Shell",
"bytes": "643787"
},
{
"name": "Smarty",
"bytes": "34727"
},
{
"name": "Swift",
"bytes": "62814"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import binascii
import datetime
import os
import struct
from uuid import UUID
import lz4.block
import plugins.helpers.UnifiedLog.dsc_file as dsc_file
import plugins.helpers.UnifiedLog.logger as logger
import plugins.helpers.UnifiedLog.resources as resources
# FORMAT
# Timestamp Thread Type Activity PID PROC_NAME: (Library) [Subsystem:Category] MESSAGE
# Timesync in-memory and persist start values not found in Tracev3
def ReadAPFSTime(mac_apfs_time): # Mac APFS timestamp is nano second time epoch beginning 1970/1/1
'''Returns datetime object, or empty string upon error'''
if mac_apfs_time not in ( 0, None, ''):
try:
if isinstance(mac_apfs_time, str):
mac_apfs_time = float(mac_apfs_time)
return datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=mac_apfs_time/1000000000.)
except Exception as ex:
logger.error("ReadAPFSTime() Failed to convert timestamp from value " + str(mac_apfs_time) + " Error was: " + str(ex))
return ''
def DecompressTraceV3(trace_file, out_file):
''' Creates an uncompressed version of the .traceV3 file.
Input parameters:
trace_file = file pointer to .traceV3 file (opened as 'rb')
out_file = file pointer to blank file (opened as 'wb')
Returns True/False
'''
try:
index = 0
tag = trace_file.read(4)
while tag:
begin_pos = trace_file.tell() - 4
trace_file.seek(begin_pos + 8)
struct_len = struct.unpack('<Q', trace_file.read(8))[0]
logger.debug("index={} pos=0x{:X} tag=0x{}".format(index, begin_pos, binascii.hexlify(tag)[::-1]))
trace_file.seek(begin_pos)
chunk_data_incl_header = trace_file.read(16 + struct_len)
if tag == b'\x00\x10\x00\x00': # header
out_file.write(chunk_data_incl_header) # boot_uuid header, write to output directly
elif tag[0] == b'\x0B':
out_file.write(chunk_data_incl_header) # uncompressed, write to output directly
elif tag[0] == b'\x0D':
uncompressed = DecompressChunkData(chunk_data_incl_header[16:], struct_len)
out_file.write(chunk_data_incl_header[0:8]) # Same Header !
out_file.write(struct.pack('<Q', len(uncompressed))) # New size
out_file.write(uncompressed)
else:
logger.error('Unknown chunk tag value encountered : {}'.format(binascii.hexlify(tag)))
out_file.write(chunk_data_incl_header)
if struct_len % 8: # Go to QWORD boundary
struct_len += 8 - (struct_len % 8)
if out_file.tell() % 8: # Go to QWORD boundary on output
out_file.write(b'\x00\x00\x00\x00\x00\x00\x00'[0:(8-out_file.tell() % 8)])
trace_file.seek(begin_pos + 16 + struct_len)
tag = trace_file.read(4)
index += 1
except Exception as ex:
logger.exception('')
return False
return True
def DecompressChunkData(chunk_data, data_len):
'''Decompress an individual compressed chunk (tag=0x600D)'''
uncompressed = b''
if chunk_data[0:4] in [b'bv41', b'bv4-']:
last_uncompressed = b''
comp_start = 0 # bv** offset
comp_header = chunk_data[comp_start:comp_start + 4]
while (data_len > comp_start) and (comp_header != b'bv4$'):
if comp_header == b'bv41':
uncompressed_size, compressed_size = struct.unpack('<II', chunk_data[comp_start + 4:comp_start + 12])
last_uncompressed = lz4.block.decompress(chunk_data[comp_start + 12: comp_start + 12 + compressed_size], uncompressed_size, dict=last_uncompressed)
comp_start += 12 + compressed_size
uncompressed += last_uncompressed
elif comp_header == b'bv4-':
uncompressed_size = struct.unpack('<I', chunk_data[comp_start + 4:comp_start + 8])[0]
uncompressed += chunk_data[comp_start + 8:comp_start + 8 + uncompressed_size]
comp_start += 8 + uncompressed_size
else:
logger.error('Unknown compression value {} @ 0x{:X} - {}'.format(binascii.hexlify(comp_header), begin_pos + comp_start, comp_header))
break
comp_header = chunk_data[comp_start:comp_start + 4]
else:
logger.error('Unknown compression type {}'.format(binascii.hexlify(chunk_data[16:20])))
return uncompressed
class CachedFiles(object):
'''
Optimization measure to parse and hold open file pointers for uuidtext/dsc files,
so they are not parsed again and again
'''
def __init__(self, v_fs):
super(CachedFiles, self).__init__()
self.vfs = v_fs
self.cached_dsc = {} # Key = UUID string uppercase (no seperators), Val = Dsc object
self.cached_uuidtext = {} # Key = UUID string uppercase (no seperators), Val = Uuidtext object
def ParseFolder(self, uuidtext_folder_path):
'''Parse the uuidtext folder specified and parse all uuidtext/dsc files, adding them to the cache'''
try:
# dsc
dsc_path = self.vfs.path_join(uuidtext_folder_path, 'dsc')
entries = self.vfs.listdir(dsc_path)
for dsc_name in entries:
if len(dsc_name) == 32:
dsc_path_obj = self.vfs.get_virtual_file(self.vfs.path_join(dsc_path, dsc_name), 'Dsc')
dsc = dsc_file.Dsc(dsc_path_obj)
dsc.Parse()
self.cached_dsc[dsc_name] = dsc
# uuidtext - can't have this or python will complain of too many open files!
# entries = self.vfs.listdir(uuidtext_folder_path)
# index = 0
# for index in range(0x100):
# folder_name = '{:02X}'.format(index)
# #if vfs.path_exists(folder_path):
# if folder_name in entries:
# folder_path = self.vfs.path_join(uuidtext_folder_path, folder_name)
# uuid_names = self.vfs.listdir(folder_path)
# for uuid_name in uuid_names:
# if len(uuid_name) == 30: # filtering out possibly other files there!
# uuidtext_path = self.vfs.path_join(folder_path, uuid_name)
# file_object = self.vfs.get_virtual_file(uuidtext_path, 'Uuidtext')
# ut = uuidtext_file.Uuidtext(file_object, UUID(folder_name + uuid_name))
# ut.Parse()
# self.cached_uuidtext[folder_name + uuid_name] = ut
# else:
# logger.debug(folder_name + ' does not exist')
except Exception:
logger.exception('')
def ReadTimesyncFile(buffer, ts_list):
try:
pos = 0
size = len(buffer)
while pos < size:
sig, header_size, unk1 = struct.unpack("<HHI", buffer[pos:pos+8])
if sig != 0xBBB0:
logger.error("not the right signature for Timesync header, got 0x{:04X} instead of 0x{:04X}, pos was 0x{:08X}".format(sig, 0x0030BBB0, pos))
break
uuid = UUID(bytes=buffer[pos+8:pos+24])
ts_numer, ts_denom, t_stamp, tz, is_dst = struct.unpack("<IIqiI", buffer[pos+24:pos+48])
ts_header = resources.TimesyncHeader(sig, unk1, uuid, ts_numer, ts_denom, t_stamp, tz, is_dst)
pos += header_size # 0x30 (48) by default
if header_size != 0x30:
logger.info("Timesync header was 0x{:X} bytes instead of 0x30(48) bytes!".format(size))
logger.debug("TIMEHEAD {} 0x{:016X} {} {}".format(uuid, t_stamp, ReadAPFSTime(t_stamp), 'boot'))
#TODO - TEST search ts_list for existing, not seen so far
existing_ts = None
for ts in ts_list:
if ts.header.boot_uuid == uuid:
existing_ts = ts
break
if existing_ts:
ts_obj = existing_ts
else:
ts_obj = resources.Timesync(ts_header)
ts_list.append(ts_obj)
# Adding header timestamp as Ts type too with cont_time = 0
timesync_item = resources.TimesyncItem(0, 0, t_stamp, tz, is_dst)
ts_obj.items.append(timesync_item)
while pos < size:
if buffer[pos:pos+4] == b'Ts \x00':
ts_unknown, cont_time, t_stamp, bias, is_dst = struct.unpack("<IqqiI", buffer[pos+4:pos+32])
timesync_item = resources.TimesyncItem(ts_unknown, cont_time, t_stamp, bias, is_dst)
ts_obj.items.append(timesync_item)
logger.debug("TIMESYNC {} 0x{:016X} {} {}".format(uuid, t_stamp, ReadAPFSTime(t_stamp), ts_unknown))
else:
break # break this loop, parse as header
pos += 32
except Exception as ex:
logger.exception("Exception reading TimesyncFile")
def ReadTimesyncFolder(path, ts_list, vfs):
'''Reads files in the timesync folder specified by 'path' and populates ts_list
with timesync entries.
vfs = VirtualFileSystem object
'''
try:
entries = vfs.listdir(path)
for entry in sorted(entries): # sort the files by name, so continuous time will be sequential automatically
if entry.endswith(".timesync"):
file_path = vfs.path_join(path, entry)
logger.debug('Trying to read timesync file {}'.format(file_path))
f = vfs.get_virtual_file(file_path, 'TimeSync').open()
if f:
buffer = f.read() # should be a fairly small file!
ReadTimesyncFile(buffer, ts_list)
f.close()
else:
logger.error("In Timesync folder, found non-ts file {}".format(entry))
except Exception:
logger.exception('')
|
{
"content_hash": "76e0da4229b0e75480da31c06eee8ddb",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 163,
"avg_line_length": 49.57560975609756,
"alnum_prop": 0.5706976286529568,
"repo_name": "ydkhatri/mac_apt",
"id": "0e2cd9076084979b83af2fb7f4ecadc51aeb1530",
"size": "11866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/helpers/UnifiedLog/Lib.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Kaitai Struct",
"bytes": "19375"
},
{
"name": "Python",
"bytes": "1315164"
}
],
"symlink_target": ""
}
|
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark import keyword_only
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
from pyspark.taskcontext import TaskContext
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
# to be corrected with SPARK-11160
if not xmlrunner:
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class TaskContextTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
# Allow retries even though they are normally disabled in local mode
self.sc = SparkContext('local[4, 2]', class_name)
def test_stage_id(self):
"""Test the stage ids are available and incrementing as expected."""
rdd = self.sc.parallelize(range(10))
stage1 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
stage2 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
# Test using the constructor directly rather than the get()
stage3 = rdd.map(lambda x: TaskContext().stageId()).take(1)[0]
self.assertEqual(stage1 + 1, stage2)
self.assertEqual(stage1 + 2, stage3)
self.assertEqual(stage2 + 1, stage3)
def test_partition_id(self):
"""Test the partition id."""
rdd1 = self.sc.parallelize(range(10), 1)
rdd2 = self.sc.parallelize(range(10), 2)
pids1 = rdd1.map(lambda x: TaskContext.get().partitionId()).collect()
pids2 = rdd2.map(lambda x: TaskContext.get().partitionId()).collect()
self.assertEqual(0, pids1[0])
self.assertEqual(0, pids1[9])
self.assertEqual(0, pids2[0])
self.assertEqual(1, pids2[9])
def test_attempt_number(self):
"""Verify the attempt numbers are correctly reported."""
rdd = self.sc.parallelize(range(10))
# Verify a simple job with no failures
attempt_numbers = rdd.map(lambda x: TaskContext.get().attemptNumber()).collect()
map(lambda attempt: self.assertEqual(0, attempt), attempt_numbers)
def fail_on_first(x):
"""Fail on the first attempt so we get a positive attempt number"""
tc = TaskContext.get()
attempt_number = tc.attemptNumber()
partition_id = tc.partitionId()
attempt_id = tc.taskAttemptId()
if attempt_number == 0 and partition_id == 0:
raise Exception("Failing on first attempt")
else:
return [x, partition_id, attempt_number, attempt_id]
result = rdd.map(fail_on_first).collect()
# We should re-submit the first partition to it but other partitions should be attempt 0
self.assertEqual([0, 0, 1], result[0][0:3])
self.assertEqual([9, 3, 0], result[9][0:3])
first_partition = filter(lambda x: x[1] == 0, result)
map(lambda x: self.assertEqual(1, x[2]), first_partition)
other_partitions = filter(lambda x: x[1] != 0, result)
map(lambda x: self.assertEqual(0, x[2]), other_partitions)
# The task attempt id should be different
self.assertTrue(result[0][3] != result[9][3])
def test_tc_on_driver(self):
"""Verify that getting the TaskContext on the driver returns None."""
tc = TaskContext.get()
self.assertTrue(tc is None)
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_to_localiterator(self):
from time import sleep
rdd = self.sc.parallelize([1, 2, 3])
it = rdd.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it))
rdd2 = rdd.repartition(1000)
it2 = rdd2.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it2))
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_cartesian_chaining(self):
# Tests for SPARK-16589
rdd = self.sc.parallelize(range(10), 2)
self.assertSetEqual(
set(rdd.cartesian(rdd).cartesian(rdd).collect()),
set([((x, y), z) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.cartesian(rdd)).collect()),
set([(x, (y, z)) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.zip(rdd)).collect()),
set([(x, (y, y)) for x in range(10) for y in range(10)])
)
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy()
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions_asc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, True)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_repartitionAndSortWithinPartitions_desc(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2, False)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(2, 6), (0, 5), (0, 8)])
self.assertEqual(partitions[1], [(3, 8), (3, 8), (1, 3)])
def test_repartition_no_skewed(self):
num_partitions = 20
a = self.sc.parallelize(range(int(1000)), 2)
l = a.repartition(num_partitions).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
l = a.coalesce(num_partitions, True).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
def test_repartition_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
rdd = self.sc.textFile(path)
result = rdd.repartition(1).collect()
self.assertEqual(u"Hello World!", result[0])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('cc').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('cc', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
oldconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
newconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Text",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/newdataset"
}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
"""Submit and test a script with a dependency on a Spark Package"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
"""Submit and test a script with a dependency on a Spark Package on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master",
"local-cluster[1,1,1024]", script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_user_configuration(self):
"""Make sure user configuration is respected (SPARK-19307)"""
script = self.createTempFile("test.py", """
|from pyspark import SparkConf, SparkContext
|
|conf = SparkConf().set("spark.test_config", "1")
|sc = SparkContext(conf = conf)
|try:
| if sc._conf.get("spark.test_config") != "1":
| raise Exception("Cannot find spark.test_config in SparkContext's conf.")
|finally:
| sc.stop()
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local", script],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode, msg="Process failed with error:\n {0}".format(out))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
class ConfTests(unittest.TestCase):
def test_memory_conf(self):
memoryList = ["1T", "1G", "1M", "1024K"]
for memory in memoryList:
sc = SparkContext(conf=SparkConf().set("spark.python.worker.memory", memory))
l = list(range(1024))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class KeywordOnlyTests(unittest.TestCase):
class Wrapped(object):
@keyword_only
def set(self, x=None, y=None):
if "x" in self._input_kwargs:
self._x = self._input_kwargs["x"]
if "y" in self._input_kwargs:
self._y = self._input_kwargs["y"]
return x, y
def test_keywords(self):
w = self.Wrapped()
x, y = w.set(y=1)
self.assertEqual(y, 1)
self.assertEqual(y, w._y)
self.assertIsNone(x)
self.assertFalse(hasattr(w, "_x"))
def test_non_keywords(self):
w = self.Wrapped()
self.assertRaises(TypeError, lambda: w.set(0, y=1))
def test_kwarg_ownership(self):
# test _input_kwargs is owned by each class instance and not a shared static variable
class Setter(object):
@keyword_only
def set(self, x=None, other=None, other_x=None):
if "other" in self._input_kwargs:
self._input_kwargs["other"].set(x=self._input_kwargs["other_x"])
self._x = self._input_kwargs["x"]
a = Setter()
b = Setter()
a.set(x=1, other=b, other_x=2)
self.assertEqual(a._x, 1)
self.assertEqual(b._x, 2)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
if __name__ == "__main__":
from pyspark.tests import *
if not _have_scipy:
print("NOTE: Skipping SciPy tests as it does not seem to be installed")
if not _have_numpy:
print("NOTE: Skipping NumPy tests as it does not seem to be installed")
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
if not _have_numpy:
print("NOTE: NumPy tests were skipped as it does not seem to be installed")
|
{
"content_hash": "1352d809ced66192973f0203e23e4692",
"timestamp": "",
"source": "github",
"line_count": 2260,
"max_line_length": 100,
"avg_line_length": 41.20442477876106,
"alnum_prop": 0.5878095401731063,
"repo_name": "nilsgrabbert/spark",
"id": "73ab442dfd791f2ae7f3de78adba48b2a3cb7393",
"size": "93907",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/pyspark/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "33141"
},
{
"name": "Batchfile",
"bytes": "24294"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "23957"
},
{
"name": "HTML",
"bytes": "9846"
},
{
"name": "Java",
"bytes": "2964359"
},
{
"name": "JavaScript",
"bytes": "141213"
},
{
"name": "Makefile",
"bytes": "7774"
},
{
"name": "PLpgSQL",
"bytes": "6761"
},
{
"name": "PowerShell",
"bytes": "3751"
},
{
"name": "Python",
"bytes": "2237471"
},
{
"name": "R",
"bytes": "1063995"
},
{
"name": "Roff",
"bytes": "14650"
},
{
"name": "SQLPL",
"bytes": "6233"
},
{
"name": "Scala",
"bytes": "23095392"
},
{
"name": "Shell",
"bytes": "155167"
},
{
"name": "Thrift",
"bytes": "33605"
}
],
"symlink_target": ""
}
|
"""
糗事百科爬虫
根据PC网页端的热门栏目爬取段子
以供后续数据分析
"""
__author__ = 'William Yang <505741310@qq.com>'
import time
import scrapy
from scrapy import Request
from ..items import ArticleItem
from ..item_loaders import ArticleLoader
class ArticleSpider(scrapy.Spider):
name = 'article'
allowed_domains = ['qiushibaike.com']
start_url = 'https://qiushibaike.com/'
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Accept-Language': 'zh-CN,zh;q=0.8,en;q=0.6',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1'
}
def start_requests(self):
yield Request(url=self.start_url, headers=self.headers, dont_filter=True, meta={'page': 1})
def parse(self, response):
# 文章块xpath
article_block_xpath = '//div[@class="article block untagged mb15"]'
article_blocks = response.xpath(article_block_xpath)
if article_blocks:
# 预定义各种field的相对xpath, 前面增加了'.'代表了相对路径
article_id_xpath = './/a[@class="contentHerf"]/@href'
user_id_xpath = './/a[@rel]/@href'
user_name_xpath = './/h2/text()'
user_gender_css = '.articleGender::attr(class)'
user_age_css = '.articleGender::text'
user_img_xpath = './/a[@rel]/img/@src'
vote_count_xpath = './/span[@class="stats-vote"]/i/text()'
comment_count_xpath = './/span[@class="stats-comments"]//i/text()'
god_comment_xpath = './/div[@class="main-text"]/text()'
content_xpath = './/div[@class="content"]/span'
image_xpath = './/div[@class="thumb"]//img/@src'
# 循环遍历各个文章块, 从而导出item
for block in article_blocks:
# 实例ArticleLoader, 这里一定要用'selector=block', 因为是相对于block选择
l = ArticleLoader(item=ArticleItem(), selector=block)
l.add_xpath('article_id', article_id_xpath)
l.add_xpath('user_id', user_id_xpath)
l.add_xpath('user_name', user_name_xpath)
l.add_css('user_gender', user_gender_css)
l.add_css('user_age', user_age_css)
l.add_xpath('user_img', user_img_xpath)
l.add_xpath('vote_count', vote_count_xpath)
l.add_xpath('comment_count', comment_count_xpath)
l.add_xpath('god_comment', god_comment_xpath)
l.add_xpath('content', content_xpath)
l.add_xpath('image', image_xpath)
l.add_value('article_type', block.extract()) # 传入整个文章区域来判断文章类型
l.add_value('url', 'https://www.qiushibaike.com/article/{}'.format(l.get_output_value('article_id')))
yield l.load_item()
# 获得页数
page = response.meta.get('page', 1)
# 因为糗事百科默认最多只能看35页的内容, 因此只需要循环遍历到35页即可
if page < 35:
page += 1
url = 'https://www.qiushibaike.com/8hr/page/{}'.format(page)
yield Request(url=url, headers=self.headers, dont_filter=True, meta={'page': page})
|
{
"content_hash": "c301d846a5e129b7f76ca6cf4c7db796",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 117,
"avg_line_length": 40.47435897435897,
"alnum_prop": 0.5707950585999366,
"repo_name": "WilliamYang1992/qiushibaike-spider",
"id": "4f72ba48f8d879373c780bb937a82dd90e9bd187",
"size": "3440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qiushibaike_spider/spiders/article.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "17552"
}
],
"symlink_target": ""
}
|
"""Utilities for baseline tasks."""
from collections.abc import Callable
import attr
import tensorflow as tf
from tensorflow_federated.python.learning import model
from tensorflow_federated.python.simulation.baselines import task_data
@attr.s(frozen=True, init=True)
class BaselineTask:
"""Specification for a baseline learning simulation.
Attributes:
datasets: A `tff.simulation.baselines.BaselineTaskDatasets` object
specifying dataset-related aspects of the task, including training data
and preprocessing functions.
model_fn: A no-arg callable returning a `tff.learning.Model` used for the
task. Note that `model_fn().input_spec` must match
`datasets.element_type_structure`.
"""
datasets: task_data.BaselineTaskDatasets = attr.ib(
validator=attr.validators.instance_of(task_data.BaselineTaskDatasets))
model_fn: Callable[[], model.Model] = attr.ib(
validator=attr.validators.is_callable())
def __attrs_post_init__(self):
# Wrap model construction in a graph to avoid polluting the global context
# with variables created for this model.
with tf.Graph().as_default():
tff_model = self.model_fn()
if not isinstance(tff_model, model.Model):
raise TypeError('Expected model_fn to output a tff.learning.Model, '
'found {} instead'.format(type(tff_model)))
dataset_element_spec = self.datasets.element_type_structure
model_input_spec = tff_model.input_spec
if dataset_element_spec != model_input_spec:
raise ValueError(
'Dataset element spec and model input spec do not match.'
'Found dataset element spec {}, but model input spec {}'.format(
dataset_element_spec, model_input_spec))
|
{
"content_hash": "b7a0e8157b8ac8b78d1bc36659236b9a",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 78,
"avg_line_length": 38.888888888888886,
"alnum_prop": 0.7108571428571429,
"repo_name": "tensorflow/federated",
"id": "9f9c4f25ee7c6e1e419784bdce47a1dbe2d2a556",
"size": "2349",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_federated/python/simulation/baselines/baseline_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "729470"
},
{
"name": "Dockerfile",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "6700736"
},
{
"name": "Shell",
"bytes": "7123"
},
{
"name": "Starlark",
"bytes": "387382"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) 2015 brokensbone
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from threading import Thread
import unittest
import time
import slimline
import requests
import baselog
logger = baselog.get_logger(__name__)
class EchoCallback(slimline.SlimlineCallback):
def define_params(self):
return [slimline.SlimlineParameterString("text", "The client's sent text")]
def define_return(self):
return [slimline.SlimlineParameterString("response", "The server's response")]
def callback(self, call_params):
client_message = call_params['text']
return {"response": client_message}
class MultiplyCallback(slimline.SlimlineCallback):
def define_params(self):
ret = [slimline.SlimlineParameterInt("first", "any old number"),
slimline.SlimlineParameterInt("second", "a number to multiply the first by")]
return ret
def define_return(self):
return [slimline.SlimlineParameterInt("product", "The product of the two numbers")]
def callback(self, call_params):
return {"product": call_params["first"] * call_params["second"]}
class SlimlineSimpleTest(unittest.TestCase):
TEST_TEXT = "A sample message"
FIRST = 10
SECOND = 5
PORT = 8201
@classmethod
def setUpClass(cls):
config = slimline.get_config()
config.set_web_port(cls.PORT)
slimline.configure(config)
slimline.run()
slimline.add_callback(MultiplyCallback("multiply"))
def _get_address(self):
return "http://localhost:" + str(self.PORT)
def test_something(self):
slimline.add_callback(EchoCallback("echo_test"))
response = requests.post(self._get_address(), data={"signature": "echo_test", "text": self.TEST_TEXT})
self.assertEqual(response.status_code, 200)
json_response = response.json()
text = json_response['response']
logger.debug(str(text))
self.assertEqual(text, self.TEST_TEXT)
def test_multiply(self):
data_dict = {"signature": "multiply", "first": self.FIRST, "second": self.SECOND}
response = requests.post(self._get_address(), data=data_dict)
json = response.json()
text = json['product']
self.assertEqual(50, text)
def test_help(self):
data_dict = {"signature": "help"}
response = requests.post(self._get_address(), data=data_dict)
text = response.json()["text"]
logger.debug(text)
data_dict["command"] = "help"
response = requests.post(self._get_address(), data=data_dict)
text = response.json()["text"]
logger.debug(text)
data_dict["command"] = "multiply"
response = requests.post(self._get_address(), data=data_dict)
text = response.json()["text"]
logger.debug(text)
def test_many_threads(self):
for thread_id in range(self.FIRST):
t = Thread(target=self._run_thread, kwargs={'first': thread_id, 'second': self.SECOND})
t.start()
time.sleep(10)
logger.debug("Exit")
def _run_thread(self, first=1, second=1):
for count in range(second):
data_dict = {"signature": "multiply", "first": first, "second": count+1}
response = requests.post(self._get_address(), data=data_dict)
json = response.json()
text = json['product']
self.assertEqual(first*(count+1), text, msg="count=" + str(count+1) + " first=" + str(first) + " text=" + str(text))
def echo_callback(self, **kwargs):
r = kwargs.get("request")
t = r.get_argument("text")
return slimline.SlimlineResponseMessage(t)
def test_random_function(self):
r = slimline.execute_function_async(self._run_this_in_callback)
self.assertIsNotNone(r, msg="Server response is none")
@staticmethod
def _run_this_in_callback():
logger.debug("I was run by the slimline server")
@classmethod
def tearDownClass(cls):
slimline.stop()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "972720701edc573bfacdd6f535bf77c0",
"timestamp": "",
"source": "github",
"line_count": 147,
"max_line_length": 128,
"avg_line_length": 34.666666666666664,
"alnum_prop": 0.6613029827315542,
"repo_name": "brokensbone/slimline",
"id": "42b5028f69ac6f2765ae6ed9ca76c945c9a22ec6",
"size": "5097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "slimlinetests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "39257"
}
],
"symlink_target": ""
}
|
import pymel.core as pm
faceSel = pm.ls(sl=True,fl=True)
pm.mel.eval('ConvertSelectionToEdges')
allEd = pm.ls(sl=True,fl=True)
pm.select(faceSel,r=True)
pm.mel.eval('ConvertSelectionToContainedEdges')
insideEd = pm.ls(sl=True,fl=True)
pm.select(cl=True)
selBorderEd = []
for ed in allEd:
if ed not in insideEd:
selBorderEd.append(ed)
pm.select(selBorderEd,r=True)
|
{
"content_hash": "0e7642b91b5d0cd684b0aab0ebfba1ec",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 47,
"avg_line_length": 26.928571428571427,
"alnum_prop": 0.7347480106100795,
"repo_name": "aaronfang/personal_scripts",
"id": "02055812a1855295882b4092eeb76f347c364cf4",
"size": "377",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/afSeletionBorder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mathematica",
"bytes": "319303"
},
{
"name": "Python",
"bytes": "154066"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
class OCRModel(object):
"""This is the base model for other OCRModels"""
__metaclass__ = ABCMeta
@abstractmethod
def train(self, tests, ans): pass
@abstractmethod
def run(self, test): pass
|
{
"content_hash": "a289ac00dfd75faaf8c3e56438dc2fae",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 52,
"avg_line_length": 19.846153846153847,
"alnum_prop": 0.6744186046511628,
"repo_name": "USCSoftwareEngineeringClub/pyceratOpsRecs",
"id": "d5cce90bf0e2f0bec68d2e9984b09bea2545a78e",
"size": "258",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/models/OCRModel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3224"
},
{
"name": "Python",
"bytes": "9011"
},
{
"name": "Shell",
"bytes": "937"
}
],
"symlink_target": ""
}
|
CIPHERNAMES = set(('aes-128-ctr',))
import warnings
import os
import sys
if sys.platform not in ('darwin',):
import pyelliptic
else:
# FIX PATH ON OS X ()
# https://github.com/yann2192/pyelliptic/issues/11
_openssl_lib_paths = ['/usr/local/Cellar/openssl/']
for p in _openssl_lib_paths:
if os.path.exists(p):
p = os.path.join(p, os.listdir(p)[-1], 'lib')
os.environ['DYLD_LIBRARY_PATH'] = p
import pyelliptic
if CIPHERNAMES.issubset(set(pyelliptic.Cipher.get_all_cipher())):
break
if 'pyelliptic' not in dir() or not CIPHERNAMES.issubset(set(pyelliptic.Cipher.get_all_cipher())):
print('required ciphers %r not available in openssl library' % CIPHERNAMES)
if sys.platform == 'darwin':
print('use homebrew or macports to install newer openssl')
print('> brew install openssl / > sudo port install openssl')
sys.exit(1)
import bitcoin
from Crypto.Hash import keccak
from rlp.utils import str_to_bytes, safe_ord, ascii_chr
sha3_256 = lambda x: keccak.new(digest_bits=256, data=str_to_bytes(x))
from hashlib import sha256
import struct
from coincurve import PrivateKey, PublicKey
hmac_sha256 = pyelliptic.hmac_sha256
class ECIESDecryptionError(RuntimeError):
pass
class ECCx(pyelliptic.ECC):
"""
Modified to work with raw_pubkey format used in RLPx
and binding default curve and cipher
"""
ecies_ciphername = 'aes-128-ctr'
curve = 'secp256k1'
ecies_encrypt_overhead_length = 113
def __init__(self, raw_pubkey=None, raw_privkey=None):
if raw_privkey:
assert not raw_pubkey
raw_pubkey = privtopub(raw_privkey)
if raw_pubkey:
assert len(raw_pubkey) == 64
_, pubkey_x, pubkey_y, _ = self._decode_pubkey(raw_pubkey)
else:
pubkey_x, pubkey_y = None, None
while True:
pyelliptic.ECC.__init__(self, pubkey_x=pubkey_x, pubkey_y=pubkey_y,
raw_privkey=raw_privkey, curve=self.curve)
# XXX: when raw_privkey is generated by pyelliptic it sometimes
# has 31 bytes so we try again!
if self.raw_privkey and len(self.raw_privkey) != 32:
continue
try:
if self.raw_privkey:
bitcoin.get_privkey_format(self.raw_privkey) # failed for some keys
valid_priv_key = True
except AssertionError:
valid_priv_key = False
if len(self.raw_pubkey) == 64 and valid_priv_key:
break
elif raw_privkey or raw_pubkey:
raise Exception('invalid priv or pubkey')
assert len(self.raw_pubkey) == 64
@property
def raw_pubkey(self):
if self.pubkey_x and self.pubkey_y:
return str_to_bytes(self.pubkey_x + self.pubkey_y)
return self.pubkey_x + self.pubkey_y
@classmethod
def _decode_pubkey(cls, raw_pubkey):
assert len(raw_pubkey) == 64
pubkey_x = raw_pubkey[:32]
pubkey_y = raw_pubkey[32:]
return cls.curve, pubkey_x, pubkey_y, 64
def get_ecdh_key(self, raw_pubkey):
"Compute public key with the local private key and returns a 256bits shared key"
_, pubkey_x, pubkey_y, _ = self._decode_pubkey(raw_pubkey)
key = self.raw_get_ecdh_key(pubkey_x, pubkey_y)
assert len(key) == 32
return key
@property
def raw_privkey(self):
if self.privkey:
return str_to_bytes(self.privkey)
return self.privkey
def is_valid_key(self, raw_pubkey, raw_privkey=None):
try:
assert len(raw_pubkey) == 64
failed = bool(self.raw_check_key(raw_privkey, raw_pubkey[:32], raw_pubkey[32:]))
except (AssertionError, Exception):
failed = True
return not failed
@classmethod
def ecies_encrypt(cls, data, raw_pubkey, shared_mac_data=''):
"""
ECIES Encrypt, where P = recipient public key is:
1) generate r = random value
2) generate shared-secret = kdf( ecdhAgree(r, P) )
3) generate R = rG [same op as generating a public key]
4) send 0x04 || R || AsymmetricEncrypt(shared-secret, plaintext) || tag
currently used by go:
ECIES_AES128_SHA256 = &ECIESParams{
Hash: sha256.New,
hashAlgo: crypto.SHA256,
Cipher: aes.NewCipher,
BlockSize: aes.BlockSize,
KeyLen: 16,
}
"""
# 1) generate r = random value
ephem = ECCx()
# 2) generate shared-secret = kdf( ecdhAgree(r, P) )
key_material = ephem.raw_get_ecdh_key(pubkey_x=raw_pubkey[:32], pubkey_y=raw_pubkey[32:])
assert len(key_material) == 32
key = eciesKDF(key_material, 32)
assert len(key) == 32
key_enc, key_mac = key[:16], key[16:]
key_mac = sha256(key_mac).digest() # !!!
assert len(key_mac) == 32
# 3) generate R = rG [same op as generating a public key]
ephem_pubkey = ephem.raw_pubkey
# encrypt
iv = pyelliptic.Cipher.gen_IV(cls.ecies_ciphername)
assert len(iv) == 16
ctx = pyelliptic.Cipher(key_enc, iv, 1, cls.ecies_ciphername)
ciphertext = ctx.ciphering(data)
assert len(ciphertext) == len(data)
# 4) send 0x04 || R || AsymmetricEncrypt(shared-secret, plaintext) || tag
msg = ascii_chr(0x04) + ephem_pubkey + iv + ciphertext
# the MAC of a message (called the tag) as per SEC 1, 3.5.
tag = hmac_sha256(key_mac, msg[1 + 64:] + str_to_bytes(shared_mac_data))
assert len(tag) == 32
msg += tag
assert len(msg) == 1 + 64 + 16 + 32 + len(data) == 113 + len(data)
assert len(msg) - cls.ecies_encrypt_overhead_length == len(data)
return msg
def ecies_decrypt(self, data, shared_mac_data=b''):
"""
Decrypt data with ECIES method using the local private key
ECIES Decrypt (performed by recipient):
1) generate shared-secret = kdf( ecdhAgree(myPrivKey, msg[1:65]) )
2) verify tag
3) decrypt
ecdhAgree(r, recipientPublic) == ecdhAgree(recipientPrivate, R)
[where R = r*G, and recipientPublic = recipientPrivate*G]
"""
if data[:1] != b'\x04':
raise ECIESDecryptionError("wrong ecies header")
# 1) generate shared-secret = kdf( ecdhAgree(myPrivKey, msg[1:65]) )
_shared = data[1:1 + 64]
# FIXME, check that _shared_pub is a valid one (on curve)
key_material = self.raw_get_ecdh_key(pubkey_x=_shared[:32], pubkey_y=_shared[32:])
assert len(key_material) == 32
key = eciesKDF(key_material, 32)
assert len(key) == 32
key_enc, key_mac = key[:16], key[16:]
key_mac = sha256(key_mac).digest()
assert len(key_mac) == 32
tag = data[-32:]
assert len(tag) == 32
# 2) verify tag
if not pyelliptic.equals(hmac_sha256(key_mac, data[1 + 64:- 32] + shared_mac_data), tag):
raise ECIESDecryptionError("Fail to verify data")
# 3) decrypt
blocksize = pyelliptic.OpenSSL.get_cipher(self.ecies_ciphername).get_blocksize()
iv = data[1 + 64:1 + 64 + blocksize]
assert len(iv) == 16
ciphertext = data[1 + 64 + blocksize:- 32]
assert 1 + len(_shared) + len(iv) + len(ciphertext) + len(tag) == len(data)
ctx = pyelliptic.Cipher(key_enc, iv, 0, self.ecies_ciphername)
return ctx.ciphering(ciphertext)
encrypt = ecies_encrypt
decrypt = ecies_decrypt
def sign(self, data):
signature = ecdsa_sign(data, self.raw_privkey)
assert len(signature) == 65
return signature
def verify(self, signature, message):
assert len(signature) == 65
return ecdsa_verify(self.raw_pubkey, signature, message)
def lzpad32(x):
return '\x00' * (32 - len(x)) + x
def _encode_sig(v, r, s):
assert isinstance(v, (int, long))
assert v in (27, 28)
vb, rb, sb = chr(v - 27), bitcoin.encode(r, 256), bitcoin.encode(s, 256)
return lzpad32(rb) + lzpad32(sb) + vb
def _decode_sig(sig):
return safe_ord(sig[64]) + 27, bitcoin.decode(sig[0:32], 256), bitcoin.decode(sig[32:64], 256)
def ecdsa_verify(pubkey, signature, message):
assert len(pubkey) == 64
pk = PublicKey.from_signature_and_message(signature, message, hasher=None)
return pk.format(compressed=False) == b'\04' + pubkey
verify = ecdsa_verify
def ecdsa_sign(msghash, privkey):
pk = PrivateKey(privkey)
return pk.sign_recoverable(msghash, hasher=None)
sign = ecdsa_sign
def ecdsa_recover(message, signature):
assert len(signature) == 65
pk = PublicKey.from_signature_and_message(signature, message, hasher=None)
return pk.format(compressed=False)[1:]
recover = ecdsa_recover
def sha3(seed):
return sha3_256(seed).digest()
def mk_privkey(seed):
return sha3(seed)
def privtopub(raw_privkey):
raw_pubkey = bitcoin.encode_pubkey(bitcoin.privtopub(raw_privkey), 'bin_electrum')
assert len(raw_pubkey) == 64
return raw_pubkey
def encrypt(data, raw_pubkey):
"""
Encrypt data with ECIES method using the public key of the recipient.
"""
assert len(raw_pubkey) == 64, 'invalid pubkey of len {}'.format(len(raw_pubkey))
return ECCx.encrypt(data, raw_pubkey)
def eciesKDF(key_material, key_len):
"""
interop w/go ecies implementation
for sha3, blocksize is 136 bytes
for sha256, blocksize is 64 bytes
NIST SP 800-56a Concatenation Key Derivation Function (see section 5.8.1).
"""
s1 = b""
key = b""
hash_blocksize = 64
reps = ((key_len + 7) * 8) / (hash_blocksize * 8)
counter = 0
while counter <= reps:
counter += 1
ctx = sha256()
ctx.update(struct.pack('>I', counter))
ctx.update(key_material)
ctx.update(s1)
key += ctx.digest()
return key[:key_len]
|
{
"content_hash": "f3c6bbc5dfb1754e10118985a8e8bf39",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 98,
"avg_line_length": 33.31353135313531,
"alnum_prop": 0.6028333663562513,
"repo_name": "ethereum/pydevp2p",
"id": "9e5b268b4bdc08a28683eada0f301a117d29ded7",
"size": "10112",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "devp2p/crypto.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1509"
},
{
"name": "Python",
"bytes": "254444"
}
],
"symlink_target": ""
}
|
import requests
import datetime
import re
import json
import smtplib
from auto_nag.bugzilla.utils import get_config_path
REPLY_TO_EMAIL = 'release-mgmt@mozilla.com'
SMTP = 'smtp.mozilla.org'
subject = None
toaddrs = ['dev-planning@lists.mozilla.org', 'release-drivers@mozilla.com']
def sendMail(toaddr, options):
message = (
"From: %s\r\n" % options['username']
+ "To: %s\r\n" % toaddr
+ "CC: %s\r\n" % options['cclist']
+ "Reply-To: %s\r\n" % REPLY_TO_EMAIL
+ "Subject: %s\r\n" % options['subject']
+ "\r\n"
+ options['body'])
server = smtplib.SMTP_SSL(SMTP, 465)
server.set_debuglevel(1)
server.login(options['username'], options['password'])
# note: toaddrs is required for transport agents, the msg['To'] header is not modified
server.sendmail(options['username'], toaddr, message)
server.quit()
def getTemplateValue(url):
version_regex = re.compile(".*<p>(.*)</p>.*")
template_page = str(requests.get(url).text.encode('utf-8')).replace('\n', '')
parsed_template = version_regex.match(template_page)
return parsed_template.groups()[0]
if __name__ == '__main__':
CONFIG_JSON = get_config_path()
config = json.load(open(CONFIG_JSON, 'r'))
# Grab the release date, the beta version number
release_date = getTemplateValue("https://wiki.mozilla.org/Template:FIREFOX_SHIP_DATE")
beta_version = getTemplateValue("https://wiki.mozilla.org/Template:BETA_VERSION")
current_version = getTemplateValue("https://wiki.mozilla.org/Template:CURRENT_VERSION")
today = datetime.date.today()
release = datetime.datetime.strptime(release_date, "%B %d, %Y").date()
# Check the timedelta between today and releasedate and if:
# -7 days before release date Sign Off reminder for 'tomorrow': Thurs at 10am PT
# -29 days before next release date send Post-Mortem for the previous version 'tomorrow': Tues at 10am PT)
timedelta = today - release
if timedelta.days == -7:
# send the reminder email for sign off meeting
print "Sending Sign-off email reminder %s" % today
subject = "Automatic Reminder: Firefox %s Sign Off Meeting" % beta_version
body = """
This is a reminder that the FF%s sign-off meeting will be held tomorrow in the Release Coordination Vidyo room @ 10:00 am PT.
The wiki page is up and ready for you to add notes : https://wiki.mozilla.org/Releases/Firefox_%s/Final_Signoffs
-- Release Management
""" % (beta_version, beta_version)
if timedelta.days == -29:
# send the reminder email for post-mortem of curent release version
print "Sending post-mortem email reminder %s" % today
subject = "Reminder: Firefox %s Post Mortem Meeting Tomorrow" % current_version
body = """
Friendly Reminder that the FF%s.0 Post-Mortem will take place tomorrow @ 10:00 am PT during the Channel Meeting in the Release Co-ordination Vidyo room.
Etherpad - https://etherpad.mozilla.org/%s-0-Post-Mortem
-- Release Management
""" % (current_version, current_version)
if subject is not None:
options = {
"username": config['ldap_username'],
"password": config['ldap_password'],
"subject": subject,
"body": body,
"cclist": "release-mgmt@mozilla.com",
"toaddrs": toaddrs
}
for email in toaddrs:
sendMail(email, options)
else:
print "No command today: %s" % today
|
{
"content_hash": "47be30304d5052465db8379033f140bc",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 156,
"avg_line_length": 39.60674157303371,
"alnum_prop": 0.6504964539007092,
"repo_name": "anoopvalluthadam/bztools",
"id": "0ca1d67801de53cffcde7aaf8623ae613129dec6",
"size": "3596",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "auto_nag/scripts/automated_release_emails.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "92143"
}
],
"symlink_target": ""
}
|
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
DATABASE_ENGINE = 'sqlite3' # 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
DATABASE_NAME = 'test.db' # Or path to database file if using sqlite3.
DATABASE_USER = '' # Not used with sqlite3.
DATABASE_PASSWORD = '' # Not used with sqlite3.
DATABASE_HOST = '' # Set to empty string for localhost. Not used with sqlite3.
DATABASE_PORT = '' # Set to empty string for default. Not used with sqlite3.
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'a9r!c_2#z2elf45+fl1hem3h4r72zi)2fm&2xrs^vl)gx5wrgj'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
# 'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'firephp.firemiddleware.FireMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
)
ROOT_URLCONF = 'firephp.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
)
|
{
"content_hash": "6db5f384862c9c6ba2e21fb05663dc26",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 108,
"avg_line_length": 35.82051282051282,
"alnum_prop": 0.7100930565497494,
"repo_name": "schmidsi/firepy",
"id": "909b107398725c56929fde1406e79e6551cb5fea",
"size": "2834",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "proto/firephp/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23220"
}
],
"symlink_target": ""
}
|
import subprocess
import sys
import setup_util
from os.path import expanduser
import os
import getpass
home = expanduser("~")
def start(args, logfile, errfile):
setup_util.replace_text("kelp/app.pl", "localhost", ""+ args.database_host +"")
setup_util.replace_text("kelp/nginx.conf", "USR", getpass.getuser())
setup_util.replace_text("kelp/nginx.conf", "server unix:.*\/FrameworkBenchmarks", "server unix:" + home + "/FrameworkBenchmarks")
try:
subprocess.Popen("plackup -E production -s Starman --workers=" + str(args.max_threads) + " -l " + home + "/FrameworkBenchmarks/kelp/frameworks-benchmark.sock -a ./app.pl", shell=True, cwd="kelp", stderr=errfile, stdout=logfile)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c " + home + "/FrameworkBenchmarks/kelp/nginx.conf", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True, stderr=errfile, stdout=logfile)
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'starman' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
return 0
except subprocess.CalledProcessError:
return 1
|
{
"content_hash": "9e150cf299664920b5e06deaf7db763b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 231,
"avg_line_length": 42.03125,
"alnum_prop": 0.6973977695167286,
"repo_name": "Ocramius/FrameworkBenchmarks",
"id": "1bd7756afd08d55bda4ad4f666fca5b78c9b3e92",
"size": "1345",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "kelp/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "838"
},
{
"name": "C",
"bytes": "37652"
},
{
"name": "C#",
"bytes": "124505"
},
{
"name": "C++",
"bytes": "387157"
},
{
"name": "CSS",
"bytes": "234858"
},
{
"name": "Clojure",
"bytes": "18712"
},
{
"name": "Dart",
"bytes": "28519"
},
{
"name": "Erlang",
"bytes": "7670"
},
{
"name": "Go",
"bytes": "25148"
},
{
"name": "Groovy",
"bytes": "16501"
},
{
"name": "Haskell",
"bytes": "8924"
},
{
"name": "IDL",
"bytes": "1736"
},
{
"name": "Java",
"bytes": "238179"
},
{
"name": "JavaScript",
"bytes": "394315"
},
{
"name": "Lua",
"bytes": "6561"
},
{
"name": "MoonScript",
"bytes": "1726"
},
{
"name": "Nimrod",
"bytes": "31172"
},
{
"name": "PHP",
"bytes": "17085206"
},
{
"name": "Perl",
"bytes": "14344"
},
{
"name": "PowerShell",
"bytes": "34847"
},
{
"name": "Python",
"bytes": "334626"
},
{
"name": "Racket",
"bytes": "1375"
},
{
"name": "Ruby",
"bytes": "74759"
},
{
"name": "Scala",
"bytes": "57177"
},
{
"name": "Shell",
"bytes": "57516"
},
{
"name": "Volt",
"bytes": "677"
}
],
"symlink_target": ""
}
|
"""Control Flow Operations.
See the @{$python/control_flow_ops} guide.
"""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import functools
import six
from tensorflow.core.framework import attr_value_pb2
from tensorflow.core.protobuf import control_flow_pb2
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util as util
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_resource_variable_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import tensor_array_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_control_flow_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_should_use
from tensorflow.python.util.tf_export import tf_export
# We override the 'tuple' for a control flow op, so we keep python's
# existing 'tuple' for later use in this module.
_basetuple = tuple
def _summarize_eager(tensor, summarize=None):
"""Returns a summarized string representation of eager `tensor`.
Args:
tensor: EagerTensor to summarize
summarize: Include these many first elements of `array`
"""
# reshape((-1,)) is the fastest way to get a flat array view
if tensor._rank(): # pylint: disable=protected-access
flat = tensor.numpy().reshape((-1,))
lst = [str(x) for x in flat[:summarize]]
if len(lst) < flat.size:
lst.append("...")
else:
# tensor.numpy() returns a scalar for zero dimensional arrays
if summarize != 0:
lst = [str(tensor.numpy())]
else:
lst = []
return ", ".join(lst)
# pylint: disable=protected-access
# Assert and Print are special symbols in python, so we must
# use an upper-case version of them.
@tf_export("Assert")
@tf_should_use.should_use_result
def Assert(condition, data, summarize=None, name=None):
"""Asserts that the given condition is true.
If `condition` evaluates to false, print the list of tensors in `data`.
`summarize` determines how many entries of the tensors to print.
NOTE: In graph mode, to ensure that Assert executes, one usually attaches
a dependency:
```python
# Ensure maximum element of x is smaller or equal to 1
assert_op = tf.Assert(tf.less_equal(tf.reduce_max(x), 1.), [x])
with tf.control_dependencies([assert_op]):
... code using x ...
```
Args:
condition: The condition to evaluate.
data: The tensors to print out when condition is false.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional).
Returns:
assert_op: An `Operation` that, when executed, raises a
`tf.errors.InvalidArgumentError` if `condition` is not true.
@compatibility{eager} returns None.
Raises:
@compatibility{eager} `tf.errors.InvalidArgumentError` if `condition`
is not true
"""
if context.executing_eagerly():
if not condition:
xs = ops.convert_n_to_tensor(data)
data_str = [_summarize_eager(x, summarize) for x in xs]
raise errors.InvalidArgumentError(
node_def=None,
op=None,
message="Expected '%s' to be true. Summarized data: %s" %
(condition, "\n".join(data_str)))
return
with ops.name_scope(name, "Assert", [condition, data]) as name:
xs = ops.convert_n_to_tensor(data)
if all([x.dtype in {dtypes.string, dtypes.int32} for x in xs]):
# As a simple heuristic, we assume that string and int32 are
# on host to avoid the need to use cond. If it is not case,
# we will pay the price copying the tensor to host memory.
return gen_logging_ops._assert(condition, data, summarize, name="Assert")
else:
condition = ops.convert_to_tensor(condition, name="Condition")
def true_assert():
return gen_logging_ops._assert(
condition, data, summarize, name="Assert")
guarded_assert = cond(condition, no_op, true_assert, name="AssertGuard")
if context.executing_eagerly():
return
return guarded_assert.op
def _Identity(data, name=None):
"""Return a tensor with the same shape and contents as the input tensor.
Args:
data: A Tensor.
name: A name for this operation (optional).
Returns:
A Tensor with the same type and value as the input Tensor.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return gen_array_ops.ref_identity(data, name=name)
else:
return array_ops.identity(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Identity(data.values, name=name)
indices = array_ops.identity(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = array_ops.identity(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = array_ops.identity(data.dense_shape, name="dense_shape")
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _NextIteration(data, name=None):
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_next_iteration(data, name=name)
else:
return next_iteration(data, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _NextIteration(data.values, name=name)
indices = next_iteration(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = next_iteration(dense_shape, name="dense_shape")
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = next_iteration(data.dense_shape, name="dense_shape")
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def _Enter(data,
frame_name,
is_constant=False,
parallel_iterations=10,
use_ref=True,
use_input_shape=True,
name=None):
"""Creates or finds a child frame, and makes `data` available to it.
The unique `frame_name` is used by the `Executor` to identify frames. If
`is_constant` is true, `data` is a constant in the child frame; otherwise
it may be changed in the child frame. At most `parallel_iterations`
iterations are run in parallel in the child frame.
Args:
data: The tensor to be made available to the child frame.
frame_name: The name of the child frame.
is_constant: If true, the output is constant within the child frame.
parallel_iterations: The number of iterations allowed to run in parallel.
use_ref: If true, use ref_enter if data is of ref type.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype and use_ref: # pylint: disable=protected-access
result = gen_control_flow_ops.ref_enter(
data, frame_name, is_constant, parallel_iterations, name=name)
else:
result = gen_control_flow_ops.enter(
data, frame_name, is_constant, parallel_iterations, name=name)
if use_input_shape:
result.set_shape(data.get_shape())
return result
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = _Enter(
data.values,
frame_name,
is_constant,
parallel_iterations=parallel_iterations,
use_input_shape=use_input_shape,
name=name)
indices = gen_control_flow_ops.enter(
data.indices,
frame_name,
is_constant,
parallel_iterations,
name="indices")
if use_input_shape:
indices.set_shape(data.indices.get_shape())
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = gen_control_flow_ops.enter(
dense_shape,
frame_name,
is_constant,
parallel_iterations,
name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.dense_shape.get_shape())
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = gen_control_flow_ops.enter(
data.dense_shape,
frame_name,
is_constant,
parallel_iterations,
name="dense_shape")
if use_input_shape:
dense_shape.set_shape(data.dense_shape.get_shape())
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def exit(data, name=None): # pylint: disable=redefined-builtin
"""Exits the current frame to its parent frame.
Exit makes its input `data` available to the parent frame.
Args:
data: The tensor to be made available to the parent frame.
name: A name for this operation (optional).
Returns:
The same tensor as `data`.
"""
data = ops.internal_convert_to_tensor_or_indexed_slices(data, as_ref=True)
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return gen_control_flow_ops.ref_exit(data, name)
else:
return gen_control_flow_ops._exit(data, name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
values = exit(data.values, name=name)
indices = gen_control_flow_ops._exit(data.indices, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape = gen_control_flow_ops._exit(dense_shape, name)
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = gen_control_flow_ops._exit(data.dense_shape, name)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
def switch(data, pred, dtype=None, name=None):
"""Forwards `data` to an output determined by `pred`.
If `pred` is false, the `data` input is forwarded to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
dtype: Optional element type for the returned tensor. If missing,
the type is inferred from the type of `value`.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded
to `output_true`, otherwise it goes to `output_false`.
"""
with ops.name_scope(name, "Switch", [data, pred]) as name:
data = ops.internal_convert_to_tensor_or_indexed_slices(
data, dtype=dtype, name="data", as_ref=True)
pred = ops.convert_to_tensor(pred, name="pred")
if isinstance(data, ops.Tensor):
return gen_control_flow_ops.switch(data, pred, name=name)
else:
if not isinstance(data, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(data))
val, ind = data.values, data.indices
val_f, val_t = gen_control_flow_ops.switch(val, pred, name=name)
ind_f, ind_t = gen_control_flow_ops.switch(ind, pred, name="indices")
if isinstance(data, ops.IndexedSlices):
dense_shape = data.dense_shape
if dense_shape is not None:
dense_shape_f, dense_shape_t = gen_control_flow_ops.switch(
dense_shape, pred, name="dense_shape")
else:
dense_shape_f, dense_shape_t = None, None
return (ops.IndexedSlices(val_f, ind_f, dense_shape_f),
ops.IndexedSlices(val_t, ind_t, dense_shape_t))
else:
dense_shape = data.dense_shape
dense_shape_f, dense_shape_t = gen_control_flow_ops.switch(
data.dense_shape, pred, name="dense_shape")
return (sparse_tensor.SparseTensor(ind_f, val_f, dense_shape_f),
sparse_tensor.SparseTensor(ind_t, val_t, dense_shape_t))
def _SwitchRefOrTensor(data, pred, name="Switch"):
"""Forwards `data` to an output determined by `pred`.
If `pred` is false, the `data` input is forwarded to the first output.
Otherwise, the data goes to the second output.
This op handles `Tensor`s and `IndexedSlices`.
Args:
data: The tensor to be forwarded to the appropriate output.
pred: A scalar that specifies which output port will receive data.
name: A name for this operation (optional).
Returns:
`(output_false, output_true)`: If `pred` is true, data will be forwarded to
`output_true`, otherwise it goes to `output_false`.
Raises:
TypeError: if data is not a Tensor or IndexedSlices
"""
data = ops.convert_to_tensor_or_indexed_slices(data, name="data")
# NOTE(vrv): ops.colocate_with(data, ignore_existing=True) below
# addresses the following scenario.
#
# Assume you execute Optimizer.apply_gradients() in a branch of a cond().
#
# 1. The update op is created inside a `with ops.colocate(var):` block
#
# 2. Some tensor `data` is captured and a switch is created in a
# `with ops.colocate_with(data):` block.
#
# with ops.colocate_with(var):
# with ops.colocate_with(data):
# op = ...
#
# var and data may be pinned to different devices, so we want to ops
# created within ops.colocate_with(data) to ignore the existing stack.
with ops.colocate_with(data, ignore_existing=True):
if isinstance(data, ops.Tensor):
if data.dtype._is_ref_dtype: # pylint: disable=protected-access
return ref_switch(data, pred, name=name)
return switch(data, pred, name=name)
def merge(inputs, name=None):
"""Returns the value of an available element of `inputs`.
This op tests each of the tensors in `inputs` in turn to determine if any of
them is available. If it finds an available tensor, it returns it and its
index in `inputs`.
It is an error if more than one tensor in `inputs` is available. If no tensor
in `inputs` is available, the returned tensor and index are not set.
This op handles both `Tensor`s and `IndexedSlices`. If inputs has a mix of
`Tensor`s and `IndexedSlices`, all inputs are converted to IndexedSlices
before merging.
Args:
inputs: The input tensors, at most one of which is available.
name: A name for this operation (optional).
Returns:
A tuple containing the chosen input tensor and its index in `inputs`.
Raises:
ValueError: If any of the inputs is None, or inputs are IndexedSlices and
some but not all have a dense_shape property.
"""
if any([inp is None for inp in inputs]):
raise ValueError("At least one of the merge inputs is None: %s" % inputs)
with ops.name_scope(name, "Merge", inputs) as name:
inputs = [
ops.internal_convert_to_tensor_or_indexed_slices(inp, as_ref=True)
for inp in inputs
]
if all([isinstance(v, ops.Tensor) for v in inputs]):
if all([v.dtype._is_ref_dtype for v in inputs]): # pylint: disable=protected-access
return gen_control_flow_ops.ref_merge(inputs, name)
else:
return gen_control_flow_ops.merge(inputs, name)
elif all([isinstance(v, sparse_tensor.SparseTensor) for v in inputs]):
# Only handle the case when all inputs are SparseTensor.
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops.merge(
[inp.indices for inp in inputs], name="indices")
dense_shape, _ = gen_control_flow_ops.merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
return (sparse_tensor.SparseTensor(indices, values, dense_shape),
chosen_index)
else:
# For now convert all the inputs as IndexedSlices.
inputs = math_ops._as_indexed_slices_list(inputs, optimize=False)
values, _ = merge([inp.values for inp in inputs], name=name)
indices, chosen_index = gen_control_flow_ops.merge(
[inp.indices for inp in inputs], name="indices")
if any(inp.dense_shape is not None for inp in inputs):
if any(inp.dense_shape is None for inp in inputs):
raise ValueError("Either all merged IndexedSlices must have a "
"dense_shape, or none must have a dense_shape.")
dense_shape, _ = gen_control_flow_ops.merge(
[inp.dense_shape for inp in inputs], name="dense_shape")
else:
dense_shape = None
return ops.IndexedSlices(values, indices, dense_shape), chosen_index
# pylint: enable=protected-access
def _convert_tensorarray_to_flow(tensor_or_tensor_array):
if isinstance(tensor_or_tensor_array, tensor_array_ops.TensorArray):
return tensor_or_tensor_array.flow
else:
return tensor_or_tensor_array
def _make_tensor_array(ta, t_or_flow):
# pylint: disable=protected-access
new_ta = tensor_array_ops.TensorArray(
dtype=ta.dtype,
handle=ta.handle,
flow=t_or_flow,
infer_shape=ta._infer_shape,
colocate_with_first_write_call=ta._colocate_with_first_write_call)
new_ta._colocate_with = ta._colocate_with
new_ta._element_shape = ta._element_shape
# pylint: enable=protected-access
return new_ta
def _convert_flows_to_tensorarrays(tensors_or_tensorarrays, tensors_or_flows):
if len(tensors_or_tensorarrays) != len(tensors_or_flows):
raise ValueError(
"Lengths of original Tensor list and new list do not match: %d vs. %d" %
(len(tensors_or_tensorarrays), len(tensors_or_flows)))
return [
_make_tensor_array(ta, t_or_flow)
if isinstance(ta, tensor_array_ops.TensorArray) else t_or_flow
for (ta, t_or_flow) in zip(tensors_or_tensorarrays, tensors_or_flows)
]
def _ShapeLessThanOrEqual(shape1, shape2):
if shape2.dims is None:
return True
if shape1.ndims != shape2.ndims:
return False
for dim1, dim2 in zip(shape1.dims, shape2.dims):
if dim2.value is not None and dim1.value != dim2.value:
return False
return True
def _SetShapeInvariants(input_vars, enter_vars, shapes):
"""Set the shapes of the tensors in `enter_vars` to `shapes`.
Args:
input_vars: A list of tensors that are inputs to `enter_vars`.
enter_vars: A list of tensors whose shapes will be set.
shapes: A (possibly nested) list of shapes.
Raises:
ValueError: If any tensor in `enter_vars` has a less specific shape
than its corresponding shape in `shapes`.
"""
if shapes is None:
return
flat_shapes = nest.flatten(shapes)
if not all([isinstance(s, tensor_shape.TensorShape) for s in flat_shapes]):
raise ValueError("`shapes` must be a (possibly nested) list of shapes.")
# Check that the shapes of the inputs are less than the shape invariants,
# and set the shapes of `enter_vars` to the shape invariants.
for inp, var, shape in zip(input_vars, enter_vars, flat_shapes):
if isinstance(var, ops.Tensor):
if not _ShapeLessThanOrEqual(inp.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the loop variable. It enters the loop "
"with shape %s, but the specified shape invariant is %s." %
(inp.name, inp.get_shape(), shape))
var.set_shape(shape)
else:
if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
if not _ShapeLessThanOrEqual(inp.values.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the values tensor of this IndexedSlices. "
"It enters the loop with shape %s, but the specified shape "
"invariant is %s." % (inp.values.name, inp.values.get_shape(),
shape))
var.values.set_shape(shape)
var.indices.set_shape(tensor_shape.TensorShape([shape[0]]))
if var.dense_shape is not None:
var.dense_shape.set_shape(tensor_shape.TensorShape([shape.ndims]))
else:
if not _ShapeLessThanOrEqual(inp.dense_shape.get_shape(), shape):
raise ValueError(
"The shape invariant specified for %s is not compatible with "
"the initial shape of the shape tensor of this SparseTensor. "
"It enters the loop with shape %s, but the specified shape "
"invariant is %s." % (inp.dense_shape.name,
inp.dense_shape.get_shape(), shape))
var.values.set_shape(tensor_shape.TensorShape([None]))
var.indices.set_shape(tensor_shape.TensorShape([None, shape.ndims]))
var.dense_shape.set_shape(shape)
def _EnforceShapeInvariant(merge_var, next_var):
"""Check if the shapes of the loops variables are invariants.
Args:
merge_var: The list of tensors representing the initial values of the
loop variables.
next_var: The list of tensors representing the values of the loop
variables after one loop iteration.
Raises:
ValueError: If any tensor in `merge_var` has a more specific shape than
its correspnding tensor in `next_var`.
"""
if isinstance(merge_var, ops.Tensor):
m_shape = merge_var.get_shape()
n_shape = next_var.get_shape()
if not _ShapeLessThanOrEqual(n_shape, m_shape):
enter = merge_var.op.inputs[0].op
assert util.IsLoopEnter(enter)
input_t = enter.inputs[0]
assert input_t.shape == m_shape
raise ValueError(
"Input tensor '%s' enters the loop with shape %s, but has shape %s "
"after one iteration. To allow the shape to vary across iterations, "
"use the `shape_invariants` argument of tf.while_loop to specify a "
"less-specific shape." %
(input_t.name, input_t.shape, n_shape))
else:
if not isinstance(var, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(var))
if isinstance(var, ops.IndexedSlices):
m_values_shape = merge_var.values.get_shape()
m_indices_shape = merge_var.indices.get_shape()
m_shape_shape = tensor_shape.TensorShape(None)
if merge_var.dense_shape is not None:
m_shape_shape = merge_var.dense_shape.get_shape()
n_values_shape = next_var.values.get_shape()
n_indices_shape = next_var.indices.get_shape()
n_shape_shape = tensor_shape.TensorShape(None)
if next_var.dense_shape is not None:
n_shape_shape = next_var.dense_shape.get_shape()
if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or
not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape)):
if not _ShapeLessThanOrEqual(n_values_shape, m_values_shape):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) "
"after one iteration. Provide shape invariants using either the "
"`shape_invariants` argument of tf.while_loop or set_shape() "
"on the loop variables." %
(merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,
n_values_shape, n_indices_shape, n_shape_shape))
else:
m_values_shape = merge_var.values.get_shape()
m_indices_shape = merge_var.indices.get_shape()
m_shape_shape = merge_var.dense_shape.get_shape()
n_values_shape = next_var.values.get_shape()
n_indices_shape = next_var.indices.get_shape()
n_shape_shape = next_var.dense_shape.get_shape()
if (not _ShapeLessThanOrEqual(n_values_shape, m_values_shape) or
not _ShapeLessThanOrEqual(n_indices_shape, m_indices_shape) or
not _ShapeLessThanOrEqual(n_shape_shape, m_shape_shape)):
raise ValueError(
"The shape for %s is not an invariant for the loop. It enters "
"the loop with shape (%s, %s, %s), but has shape (%s, %s, %s) "
"after one iteration. Provide shape invariants using either "
"the `shape_invariants` argument of tf.while_loop or set_shape() "
"on the loop variables." %
(merge_var.name, m_values_shape, m_indices_shape, m_shape_shape,
n_values_shape, n_indices_shape, n_shape_shape))
def _AddNextAndBackEdge(m, v, enforce_shape_invariant=True):
"""Add NextIteration and back edge from v to m."""
if isinstance(m, ops.Tensor):
v = ops.convert_to_tensor(v)
v = _NextIteration(v)
if enforce_shape_invariant:
# Make sure the shapes of loop outputs are correct. We do this before
# calling _update_input, which will raise a less-helpful error message if
# the types don't match.
# TODO(skyewm): call this for other cases below (needs testing)
_EnforceShapeInvariant(m, v)
m.op._update_input(1, v) # pylint: disable=protected-access
elif isinstance(m, ops.IndexedSlices):
# pylint: disable=protected-access
v = math_ops._as_indexed_slices(v, optimize=False)
v = _NextIteration(v)
m.values.op._update_input(1, v.values)
m.indices.op._update_input(1, v.indices)
# pylint: enable=protected-access
if m.dense_shape is not None:
if v.dense_shape is None:
raise ValueError("Must have dense shape: %s" % v.name)
m.dense_shape.op._update_input(1, v.dense_shape)
elif isinstance(m, sparse_tensor.SparseTensor):
if not isinstance(v, sparse_tensor.SparseTensor):
raise ValueError("Must be a sparse tensor: %s" % v.name)
v = _NextIteration(v)
# pylint: disable=protected-access
m.values.op._update_input(1, v.values)
m.indices.op._update_input(1, v.indices)
m.dense_shape.op._update_input(1, v.dense_shape)
# pylint: enable=protected-access
else:
raise TypeError("Type %s not supported" % type(m))
return v
def GetMaxSizeFromNestedMaximumIterations(value, while_ctxt):
"""Calculate a max_size for use by stack ops inside an XLA while_loop.
Args:
value: The value inside the while_loop forward context. Used for printing
error messages.
while_ctxt: The forward context inside which value resides. This does
not always match the value's immediate context, as `value` may be
inside e.g. a cond context inside the while_loop.
Returns:
A tensor containing the `max_size` to feed to a Stack initializer.
Raises:
ValueError: If `value` is nested inside a `while_loop` that either
lacks a `maximum_iterations` parameter, or the `maximum_iterations`
parameter:
- is inside a `while_loop` that is a parent of the calling context, and
- cannot be evaluated at graph build time to a constant.
"""
value_name = value.name
# curr_ctxt is the context that tf.gradients was called in.
curr_ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
curr_ctxt_name = curr_ctxt.name if curr_ctxt is not None else ""
max_size = constant_op.constant(1)
# Loop through all containing while contexts between value and the
# current context, multiplying together each context's
# max_iterations to get the maximum stack size.
while while_ctxt not in (None, curr_ctxt):
max_iter = while_ctxt.maximum_iterations
if max_iter is None:
raise ValueError(
"Cannot create a gradient accumulator for tensor '%s' inside "
"XLA while_loop because maximum_iterations was not passed to "
"the tf.while_loop call ('%s')." % (value_name, while_ctxt.name))
# pylint: disable=protected-access
max_iter_ctxt = max_iter.op._get_control_flow_context()
# pylint: enable=protected-access
# If max_iter_ctxt (non-strictly) contains curr_ctxt, then it's OK to use.
if util.IsContainingContext(curr_ctxt, max_iter_ctxt):
max_size *= max_iter
else:
# We cannot use max_iter because it's defined in a nested while
# or cond context, so will fail if we try to use it as input to
# any ops in curr_ctxt (e.g. max_size or the final accumulator
# stack). Attempt to get a constant value out to use instead.
const_max_iter = tensor_util.constant_value(max_iter)
if const_max_iter is None:
raise ValueError(
"Cannot create a gradient accumulator for tensor '%s' inside XLA "
"while_loop. maximum_iterations tensor '%s' for while_loop context "
"'%s' must be statically known (e.g. a constant value or known "
"shape dimension), or be defined at or outside the while loop "
"context '%s' (currently defined in '%s')." %
(value_name, max_iter.name, while_ctxt.name, curr_ctxt_name,
max_iter_ctxt.name))
max_size *= const_max_iter
# Find the next outer WhileContext (or stop if we reach the
# tf.gradient's context).
while_ctxt = util.GetContainingWhileContext(
while_ctxt.outer_context, stop_ctxt=curr_ctxt)
return max_size
class GradLoopState(object):
"""The state used for constructing the gradient graph for a while loop.
We create a GradLoopState for each while loop in forward and its
corresponding while loop in backprop. This gives us access to both
the forward and the backprop WhileContexts.
During the construction of gradient graph, any time when we detect
a forward value that is needed for backprop, we create a history
accumulator and add it to `history_map`. Any time when we backprop
a loop switch op (in _SwitchGrad), we add the grad merge op in
`switch_map`.
"""
def __init__(self, forward_ctxt, outer_grad_state):
# The grad loop state for the outer while loop.
self._outer_grad_state = None
# The while loop context for forward.
self._forward_context = None
# The loop counter added by AddForwardLoopCounter. It is the value
# of the loop counter for the next iteration.
self._forward_index = None
# A sync op for forward.
self._forward_sync = None
# The while loop context for backprop.
self._grad_context = None
# The loop counter added by AddBackpropLoopCounter. It is the value
# of the loop counter for the current iteration.
self._grad_index = None
# A sync op for backprop.
self._grad_sync = None
# Information needed by backprop.
self._history_map = {}
self._switch_map = {}
self._unused_exits = []
self._deferred_exits = []
self._forward_loop_exits = list(forward_ctxt.loop_exits)
self._pending_exits_count = len(forward_ctxt.loop_exits)
self._outer_grad_state = outer_grad_state
if outer_grad_state:
outer_forward_ctxt = outer_grad_state.forward_context
else:
if not hasattr(forward_ctxt, "outer_context"):
raise ValueError("Failed to call gradients on a while loop without"
"properly serializing graph via MetaGraphDef")
outer_forward_ctxt = forward_ctxt.outer_context
# Add the forward loop counter.
if outer_forward_ctxt:
outer_forward_ctxt.Enter()
cnt, forward_index = forward_ctxt.AddForwardLoopCounter(outer_grad_state)
if outer_forward_ctxt:
outer_forward_ctxt.Exit()
self._forward_context = forward_ctxt
self._forward_index = forward_index
# Add the backprop WhileContext, and the backprop loop counter.
if outer_grad_state:
# This is a nested loop. Remember the iteration counts for each
# execution of this inner loop.
outer_forward_ctxt.AddName(cnt.name)
history_cnt = outer_grad_state.AddForwardAccumulator(cnt)
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
self._grad_context = WhileContext(
maximum_iterations=forward_ctxt.maximum_iterations,
parallel_iterations=forward_ctxt.parallel_iterations,
back_prop=forward_ctxt.back_prop,
swap_memory=forward_ctxt.swap_memory,
name=forward_ctxt.name,
grad_state=self)
real_cnt = outer_grad_state.AddBackpropAccumulatedValue(history_cnt, cnt)
self._grad_index = self._grad_context.AddBackpropLoopCounter(
real_cnt, outer_grad_state)
outer_grad_ctxt.Exit()
else:
if outer_forward_ctxt:
outer_forward_ctxt.Enter()
self._grad_context = WhileContext(
maximum_iterations=forward_ctxt.maximum_iterations,
parallel_iterations=forward_ctxt.parallel_iterations,
back_prop=forward_ctxt.back_prop,
swap_memory=forward_ctxt.swap_memory,
name=forward_ctxt.name,
grad_state=self)
self._grad_index = self._grad_context.AddBackpropLoopCounter(
cnt, outer_grad_state)
if outer_forward_ctxt:
outer_forward_ctxt.Exit()
@property
def outer_grad_state(self):
"""The grad loop state for outer loop."""
return self._outer_grad_state
@property
def forward_context(self):
"""The while loop context for forward."""
return self._forward_context
@property
def forward_index(self):
"""The loop index of forward loop."""
return self._forward_index
@property
def forward_sync(self):
"""A control trigger node for synchronization in the forward loop.
One main use is to keep the push ops of a stack executed in the
iteration order.
"""
if self._forward_sync is None:
with ops.control_dependencies(None):
self._forward_sync = control_trigger(name="f_sync")
self._forward_sync._set_control_flow_context(self._forward_context)
self._forward_index.op._add_control_input(self._forward_sync)
return self._forward_sync
@property
def grad_context(self):
"""The corresponding WhileContext for gradient."""
return self._grad_context
@property
def grad_index(self):
"""The loop index of backprop loop."""
return self._grad_index
@property
def grad_sync(self):
"""A control trigger node for synchronization in the grad loop.
One main use is to keep the pop ops of a stack executed in the
iteration order.
"""
if self._grad_sync is None:
with ops.control_dependencies(None):
self._grad_sync = control_trigger(name="b_sync")
self._grad_sync._set_control_flow_context(self._grad_context)
self._grad_index.op._add_control_input(self._grad_sync)
if self._grad_context.outer_context:
self._grad_context.outer_context.AddInnerOp(self._grad_sync)
return self._grad_sync
@property
def history_map(self):
"""The map that records all the tensors needed for backprop."""
return self._history_map
@property
def switch_map(self):
"""The map that records all the Switch ops for the while loop."""
return self._switch_map
@property
def unused_exits(self):
"""The list of "unused" exits."""
return self._unused_exits
@property
def deferred_exits(self):
"""The list of "deferred" exits."""
return self._deferred_exits
@property
def forward_loop_exits(self):
"""The list of exits of the forward loop."""
return self._forward_loop_exits
@property
def pending_exits_count(self):
"""The number of exits we expect to see but haven't."""
return self._pending_exits_count
@pending_exits_count.setter
def pending_exits_count(self, cnt):
"""Set the pending count to cnt."""
self._pending_exits_count = cnt
def AddForwardAccumulator(self, value, dead_branch=False):
"""Add an accumulator for each forward tensor that is needed in backprop.
This is added to the forward loop at the first time when a tensor
in the forward loop is used by backprop gradient computation loop.
We create an accumulator that accumulates the value of tensor at each
iteration. Called in the control flow context where gradients() is called.
The pseudocode is:
```
acc = stack();
while (_pivot) {
acc = stack_push(acc, value);
}
```
We make sure that the stack push op in one iteration is executed before
next iteration. This is achieved by adding a control edge from
`forward_index.op.inputs[0].op` to the push op, and another control
edge from the push op to either `forward_index.op` or `forward_sync`.
Args:
value: The source tensor in forward that is to be accumulated.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The stack that contains the accumulated history of the tensor.
Raises:
TypeError: For internal errors involving the value condition context.
ValueError: If `value` is inside a XLA scope and a valid max size
for the stack can't be found.
"""
# curr_ctxt is the context that tf.gradients was called in.
curr_ctxt = ops.get_default_graph()._get_control_flow_context() # pylint: disable=protected-access
with ops.control_dependencies(None):
if curr_ctxt:
curr_ctxt.Enter()
with ops.colocate_with(value):
# We only need to pass maximum_iterations to the stack if
# we're inside an XLA context.
if not util.IsInXLAContext(value.op):
max_size = constant_op.constant(-1, dtypes.int32)
else:
max_size = GetMaxSizeFromNestedMaximumIterations(
value, self.forward_context)
acc = gen_data_flow_ops.stack_v2(
max_size=max_size, elem_type=value.dtype.base_dtype, name="f_acc")
if curr_ctxt:
curr_ctxt.Exit()
# Make acc available in the forward context.
enter_acc = self.forward_context.AddValue(acc)
# Add the stack_push op in the context of value.op.
swap_enabled = self.forward_context.swap_memory
value_ctxt = util.GetOutputContext(value.op)
if value_ctxt == self.forward_context:
# value is not nested in the forward context.
self.forward_context.Enter()
push = gen_data_flow_ops.stack_push_v2(
enter_acc, value, swap_memory=swap_enabled)
self.forward_context.Exit()
# Protect stack push and order it before forward_index.
self.forward_index.op._add_control_input(push.op)
else:
# value is in a cond context within the forward context.
if not isinstance(value_ctxt, CondContext):
raise TypeError("value_ctxt is not a CondContext: %s" % value_ctxt)
if dead_branch:
# The special case for creating a zero tensor for a dead
# branch of a switch. See ControlFlowState.ZerosLike().
value_ctxt.outer_context.Enter()
push = gen_data_flow_ops.stack_push_v2(
enter_acc, value, swap_memory=swap_enabled)
value_ctxt.outer_context.Exit()
push.op._set_control_flow_context(value_ctxt)
else:
value_ctxt.Enter()
push = gen_data_flow_ops.stack_push_v2(
enter_acc, value, swap_memory=swap_enabled)
value_ctxt.Exit()
# Protect stack push and order it before forward_sync.
self.forward_sync._add_control_input(push.op)
# Order stack push after the successor of forward_index
add_op = self.forward_index.op.inputs[0].op
push.op._add_control_input(add_op)
return acc
def AddBackpropAccumulatedValue(self, history_value, value,
dead_branch=False):
"""Add the getter for an accumulated value in the grad context.
This is added to the backprop loop. Called in the grad context to
get the value of an accumulated value. The stack pop op must be guarded
by the pred of the controlling cond.
Args:
history_value: The history (a stack) of a value.
value: The value that is pushed onto the stack.
dead_branch: True iff the tensor is on a dead branch of a cond.
Returns:
The current value (the top of the stack).
"""
history_ctxt = history_value.op._get_control_flow_context()
# Find the cond context that controls history_value if any.
cond_ctxt = None
value_ctxt = value.op._get_control_flow_context()
while value_ctxt and value_ctxt != history_ctxt:
if isinstance(value_ctxt, CondContext):
cond_ctxt = value_ctxt
break
value_ctxt = value_ctxt.outer_context
with ops.control_dependencies(None):
self.grad_context.Enter()
if cond_ctxt:
# Guard stack pop with a switch if it is controlled by a cond.
grad_state = self
pred = None
while pred is None and grad_state:
pred = grad_state.history_map.get(cond_ctxt.pred.name)
grad_state = grad_state.outer_grad_state
if pred is None:
pred = cond_ctxt.pred
branch = (1 - cond_ctxt.branch) if dead_branch else cond_ctxt.branch
history_value = _SwitchRefOrTensor(history_value, pred)[branch]
pop = gen_data_flow_ops.stack_pop_v2(history_value,
value.dtype.base_dtype)
pop.set_shape(value.get_shape())
self.grad_context.Exit()
parallel_iterations = self.grad_context.parallel_iterations
if parallel_iterations > 1:
# All pops are ordered after pivot_for_body and before grad_sync.
self.grad_sync._add_control_input(pop.op)
return pop
def GetRealValue(self, value):
"""Get the real value of `value`.
If backprop "uses" a value produced by forward inference, an accumulator
is added in the forward loop to accumulate its values. We use the
accumulated value. This method must be called in the grad loop context.
`value` must be in forward and needed for backprop.
Args:
value: A tensor to be captured.
Returns:
The same tensor obtained from the saved history.
"""
assert value.op.type not in ["Variable", "VariableV2"]
real_value = self._history_map.get(value.name)
if real_value is None:
cur_value = value
cur_grad_state = self
while True:
enter_op = util.GetLoopConstantEnter(cur_value)
if enter_op:
# Special case: cur_value comes from a constant Enter node.
cur_value = enter_op.inputs[0]
cur_grad_state = cur_grad_state.outer_grad_state
if cur_grad_state is None:
# We are now outside all nested loops for this gradient(),
# so `value` is a loop invariant and there is no need to
# save the history of value. Just make cur_value to enter
# the right control flow context.
real_value = self._grad_context.AddValue(cur_value)
break
elif constant_op.is_constant(cur_value):
# If the value to be forwarded is a constant, clone the constant in
# the gradient loop rather than using a stack.
# TODO(phawkins): consider hoisting the constant out of the loop
# instead.
real_value = constant_op.constant(
tensor_util.constant_value(cur_value), dtype=cur_value.dtype)
break
else:
# Record the history of this value in forward_ctxt.
self._grad_context.Exit()
history_value = cur_grad_state.AddForwardAccumulator(cur_value)
self._grad_context.Enter()
break
if real_value is None:
# Add the stack pop op in the grad context.
real_value = cur_grad_state.AddBackpropAccumulatedValue(
history_value, cur_value)
if cur_grad_state != self:
real_value = self._grad_context.AddValue(real_value)
self._history_map[value.name] = real_value
return real_value
def _GetWhileContext(op):
"""Get the WhileContext to which this op belongs."""
ctxt = op._get_control_flow_context()
if ctxt:
ctxt = ctxt.GetWhileContext()
return ctxt
class ControlFlowState(object):
"""Maintain the mapping from the loops to their grad states."""
def __init__(self):
self._map = {} # maps forward loop context to GradLoopState
def GetGradState(self, op, before):
"""Return the grad state for this op if it's in a forward loop context."""
if before and util.IsLoopExit(op):
forward_ctxt = op._get_control_flow_context()
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt:
forward_ctxt = forward_ctxt.GetWhileContext()
else:
forward_ctxt = _GetWhileContext(op)
if forward_ctxt:
return self._map.get(forward_ctxt)
return None
def ProcessUnusedLoopExits(self, pending_count, to_ops_set):
"""Process all the "unused" loop exits.
The "unused" exits of the loops are added to `unused_exits`. An exit is
unused if its pending_count is 0. If there is an exit with real gradient,
all these deferred exits will enter the backprop loop with zero gradient.
Otherwise, they will enter the backprop loop with None. As an example,
people often write:
```python
v1, _ = tf.while_loop(p, b, [x1, x2])
result = gradients(v1, x1)
```
The exit node for x2 is not included by the betweenness analysis. But we
need to backprop x2 if x2 is involved in computing v1.
Args:
pending_count: The number of backprop inputs for every op.
to_ops_set: The set of ops for ys in gradients(ys, xs)
Returns:
The set of unused loop exits that we know at this point we need
to backprop.
"""
loop_exits = []
for grad_state in self._map.values():
for y in grad_state.forward_loop_exits:
if pending_count[y.op] == 0:
grad_state.pending_exits_count -= 1
if y.op not in to_ops_set:
grad_state.unused_exits.append(y)
if grad_state.pending_exits_count == 0:
loop_exits.extend(grad_state.unused_exits)
# Need to include Enters in backprop for higher-order gradients.
for y in grad_state.forward_context.loop_enters:
if pending_count[y.op] == 0:
pending_count[y.op] = 1
return loop_exits
def EnterGradWhileContext(self, op, before):
"""Enter the WhileContext for gradient computation."""
grad_state = self.GetGradState(op, before)
if grad_state:
grad_state.grad_context.Enter()
def ExitGradWhileContext(self, op, before):
"""Exit the WhileContext for gradient computation."""
grad_state = self.GetGradState(op, before)
if grad_state:
grad_state.grad_context.Exit()
def AddWhileContext(self, op, between_op_list, between_ops):
"""Add the grad state for the while loop that op belongs to.
Note that op is an Exit, and this method must be called in
the control flow context where gradients() is called.
Note that this method modifies `between_op_list` and `between_ops`.
"""
forward_ctxt = _GetWhileContext(op)
grad_state = self._map.get(forward_ctxt)
if grad_state is None:
# This is a new while loop so create a grad state for it.
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
grad_state = GradLoopState(forward_ctxt, outer_grad_state)
self._map[forward_ctxt] = grad_state
# We need to include all exits of a loop for backprop.
for loop_exit in grad_state.forward_loop_exits:
if loop_exit.op not in between_ops:
between_ops.add(loop_exit.op)
between_op_list.append(loop_exit.op)
def ZerosLikeForExit(self, val):
"""Create zeros_like gradient for a loop exit.
If the result of a loop variable is not used but is involved in
computing the result of some needed loop variable, we create a
zero-valued tensor that is fed as gradient for the Exit node of that
loop variable. Note that val.op is an Exit, and this method must be
called in the control flow context where gradients() is called.
Args:
val: The output tensor of an Exit op.
Returns:
A zero tensor of the same shape of val.
"""
val_shape = val.get_shape()
forward_ctxt = val.op._get_control_flow_context()
outer_forward_ctxt = forward_ctxt.outer_context
if outer_forward_ctxt:
outer_forward_ctxt = outer_forward_ctxt.GetWhileContext()
outer_grad_state = None
if outer_forward_ctxt:
outer_grad_state = self._map.get(outer_forward_ctxt)
if outer_grad_state:
# This is a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape in the right context.
outer_grad_state.grad_context.Enter()
result = array_ops.zeros(val_shape.dims, val.dtype)
outer_grad_state.grad_context.Exit()
else:
# Only the shape of value is needed for backprop.
forward_ctxt.outer_context.Enter()
shape = array_ops.shape_internal(val, optimize=False)
forward_ctxt.outer_context.Exit()
# Save the shape to a stack.
history_shape = outer_grad_state.AddForwardAccumulator(shape)
# Get the shape back from the stack.
outer_grad_ctxt = outer_grad_state.grad_context
outer_grad_ctxt.Enter()
real_shape = outer_grad_state.AddBackpropAccumulatedValue(
history_shape, shape)
result = array_ops.zeros(real_shape, val.dtype)
outer_grad_ctxt.Exit()
else:
# This is not a nested loop.
if val_shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor
# with the right shape.
result = array_ops.zeros(val_shape.dims, val.dtype)
else:
result = array_ops.zeros_like(val, optimize=False)
return result
def ZerosLike(self, op, index):
"""Create zeros_like for the specified output of an op.
If op is in a while loop that is part of gradients(), this method
must be called in its grad loop context.
Args:
op: A tensorflow operation.
index: the index for a specific output of the op.
Returns:
A zero tensor of the same shape of op.outputs[index].
"""
if util.IsLoopSwitch(op):
return None
dead_branch = util.IsSwitch(op)
forward_ctxt = _GetWhileContext(op)
grad_state = self._map.get(forward_ctxt)
if grad_state is None:
# op is not in a while loop that is part of gradients().
return ZerosLikeOutsideLoop(op, index)
op_ctxt = op._get_control_flow_context()
val = ops.convert_to_tensor(op.outputs[index], name="tensor")
shape = val.get_shape()
if shape.is_fully_defined():
# If the shape is known statically, just create a zero tensor with
# the right shape in the grad loop context.
result = constant_op.constant(0, shape=shape.dims, dtype=val.dtype)
if dead_branch:
# op is a cond switch. Guard the zero tensor with a switch.
pred = grad_state.history_map.get(op_ctxt.pred.name)
branch = op_ctxt.branch
result = _SwitchRefOrTensor(result, pred)[1 - branch]
else:
# Unknown shape so keep a history of the shape at runtime.
if dead_branch:
# Need to add a special switch to guard the value.
pred = op_ctxt.pred
branch = op_ctxt.branch
op_ctxt.outer_context.Enter()
val = _SwitchRefOrTensor(op.inputs[0], pred)[1 - branch]
zeros_shape = array_ops.shape_internal(val, optimize=False)
op_ctxt.outer_context.Exit()
val.op._set_control_flow_context(op_ctxt)
zeros_shape.op._set_control_flow_context(op_ctxt)
else:
op_ctxt.Enter()
zeros_shape = array_ops.shape_internal(val, optimize=False)
op_ctxt.Exit()
# Add forward accumulator for shape.
grad_state.grad_context.Exit()
history_zeros_shape = grad_state.AddForwardAccumulator(
zeros_shape, dead_branch=dead_branch)
grad_state.grad_context.Enter()
# Create a zero tensor with the right shape.
shape = grad_state.AddBackpropAccumulatedValue(history_zeros_shape,
zeros_shape, dead_branch)
result = array_ops.zeros(shape, val.dtype)
return result
def PostProcessing(self):
"""Perform postprocessing at the end of gradients().
We have created the gradient graph at this point. So this function
can be used to perform any postprocessing on the gradient graph.
We currently perform the following postprocessing:
1. Patch the gradient graph if the output of a loop variable
doesn't depend on its input.
"""
for _, grad_state in self._map.items():
for _, b_merge in grad_state.switch_map.items():
if b_merge.op.inputs[0] == b_merge.op.inputs[1]:
# The value of this loop variable at iteration i+1 doesn't
# depend on its value at iteration i. So use zeros as the
# gradients for all iterations > 0.
dtype = b_merge.op.inputs[0].dtype
shape = b_merge.op.inputs[0].get_shape()
# pylint: disable=protected-access
if shape.is_fully_defined():
grad_state.grad_context.Enter()
# Create a zeros and use it for iterations > 0.
grad_val = constant_op.constant(0, dtype=dtype, shape=shape)
next_grad_val = _NextIteration(grad_val)
grad_state.grad_context.Exit()
else:
# Create a zeros in the outer grad context.
outer_grad_ctxt = grad_state.grad_context.outer_context
if outer_grad_ctxt:
outer_grad_ctxt.Enter()
enter_grad_op = b_merge.op.inputs[0].op
enter_grad = enter_grad_op.inputs[0]
grad_shape = array_ops.shape_internal(enter_grad, optimize=False)
grad_val = array_ops.zeros(grad_shape)
if outer_grad_ctxt:
outer_grad_ctxt.Exit()
# Use the zeros for iterations > 0.
grad_state.grad_context.Enter()
next_grad_val = _NextIteration(grad_val)
grad_state.grad_context.Exit()
b_merge.op._update_input(1, next_grad_val)
# pylint: enable=protected-access
def MaybeCreateControlFlowState(between_op_list, between_ops,
colocate_gradients_with_ops):
"""Create the state for all the while loops involved in one gradients().
We create a ControlFlowState when there are while loops involved in
gradients(). In gradients(), control flow logic is only invoked when
the ControlFlowState is not None.
Note that this method modifies `between_op_list` and `between_ops`.
"""
loop_state = None
for op in between_op_list:
if util.IsLoopExit(op):
if loop_state is None:
loop_state = ControlFlowState()
if colocate_gradients_with_ops:
with ops.colocate_with(op):
loop_state.AddWhileContext(op, between_op_list, between_ops)
else:
loop_state.AddWhileContext(op, between_op_list, between_ops)
return loop_state
def ZerosLikeOutsideLoop(op, index):
"""Create zeros_like for the specified output of an op."""
val = op.outputs[index]
if not util.IsSwitch(op):
if val.dtype == dtypes.resource:
return array_ops.zeros(gen_resource_variable_ops.variable_shape(val))
return array_ops.zeros_like(val, optimize=False)
else:
op_ctxt = op._get_control_flow_context()
if op_ctxt:
# We are in a cond context. Use a switch to create zeros only when needed.
pred = op_ctxt.pred
branch = op_ctxt.branch
switch_val = switch(op.inputs[0], pred)[1 - branch]
if val.dtype == dtypes.resource:
with ops.control_dependencies([switch_val]):
return array_ops.zeros(
gen_resource_variable_ops.variable_shape(switch_val))
zeros_shape = array_ops.shape_internal(switch_val, optimize=False)
# Ensure ops created within array_ops.zeros are dominated by switch in
# cond context.
with ops.control_dependencies([switch_val]):
return array_ops.zeros(zeros_shape, dtype=val.dtype)
else:
return array_ops.zeros_like(val, optimize=False)
class ControlFlowContext(object):
"""The base class for control flow context.
The usage pattern is a sequence of (Enter, Exit) followed by a final
ExitResult.
We maintain the following state for control flow contexts during graph
construction:
1. graph has _control_flow_context: the current context used to
construct new nodes. Changed by ctxt.Enter() and ctxt.Exit()
2. op has _control_flow_context: the context to which the op belongs.
Set at the time the op is created. Immutable.
3. A ControlFlowContext has _outer_context: the context in which this
context is created. Set at the time a context is created. Immutable.
4. A ControlFlowContext has _context_stack.
Pushed and popped by ctxt.Enter() and ctxt.Exit()
"""
def __init__(self, values_def=None, import_scope=None):
self._nested_contexts = []
self._outer_context = ops.get_default_graph()._get_control_flow_context()
if self._outer_context:
self._outer_context._nested_contexts.append(self) # pylint: disable=protected-access
self._context_stack = []
if values_def:
self._init_values_from_proto(values_def, import_scope=import_scope)
else:
# The names of tensors that have been already seen in this context.
self._values = set()
# The keys are the names of tensors referenced by but external to this
# context. Each value is the Tensor that should be used by this context to
# access the key value (e.g. a switch output guarding a cond input value).
self._external_values = {}
def _init_values_from_proto(self, values_def, import_scope=None):
"""Initializes values and external_values from `ValuesDef` protocol buffer.
Args:
values_def: `ValuesDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(values_def, control_flow_pb2.ValuesDef)
self._values = set(
ops.prepend_name_scope(value, import_scope)
for value in values_def.values)
g = ops.get_default_graph()
self._external_values = {}
for k, v in values_def.external_values.items():
k = ops.prepend_name_scope(k, import_scope)
self._external_values[k] = g.as_graph_element(
ops.prepend_name_scope(v, import_scope))
op_names = set([
op.split(":")[0]
for op in self._values - set(self._external_values.keys())
])
for op in op_names:
# pylint: disable=protected-access
g.as_graph_element(op)._set_control_flow_context(self)
# pylint: enable=protected-access
@property
def name(self):
return self._name
@property
def outer_context(self):
"""Return the context containing this context."""
return self._outer_context
@property
def grad_state(self):
raise NotImplementedError("Abstract method")
@property
def back_prop(self):
raise NotImplementedError("Abstract method")
@abc.abstractmethod
def to_control_flow_context_def(self, context_def, export_scope=None):
"""Serializes this into `context_def`.
Args:
context_def: a `ControlFlowContextDef` protocol buffer.
export_scope: Optional `string`. Name scope to remove.
"""
raise NotImplementedError("Abstract method")
def _to_values_def(self, export_scope=None):
"""Converts the values to a `ValuesDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `ValuesDef` protocol buffer.
"""
values_def = control_flow_pb2.ValuesDef()
values_def.values.extend(
[ops.strip_name_scope(v, export_scope) for v in sorted(self._values)])
for k, v in self._external_values.items():
k = ops.strip_name_scope(k, export_scope)
values_def.external_values[k] = ops.strip_name_scope(v.name, export_scope)
return values_def
def AddName(self, name):
self._values.add(name)
# pylint: disable=protected-access
def Enter(self):
"""Enter this control flow context."""
graph = ops.get_default_graph()
self._context_stack.append(graph._get_control_flow_context())
graph._set_control_flow_context(self)
def Exit(self):
"""Exit this control flow context."""
graph = ops.get_default_graph()
last_context = self._context_stack.pop()
graph._set_control_flow_context(last_context)
def EnterGradientColocation(self, op, gradient_uid):
"""Start building a gradient colocated with an op."""
if self._outer_context:
self._outer_context.EnterGradientColocation(op, gradient_uid)
def ExitGradientColocation(self, op, gradient_uid):
"""Start building a gradient colocated with an op."""
if self._outer_context:
self._outer_context.ExitGradientColocation(op, gradient_uid)
def ExitResult(self, result):
"""Make a list of tensors available in the outer context."""
if self._outer_context:
nest.map_structure(lambda x: self._outer_context.AddName(x.name), result)
def GetWhileContext(self):
"""Return the while context containing this context."""
if self._outer_context:
return self._outer_context.GetWhileContext()
return None
def _IsInOuterContext(self, op):
op_ctxt = util.GetOutputContext(op)
outer_ctxt = self.outer_context
while outer_ctxt != op_ctxt:
if outer_ctxt is None:
return False
outer_ctxt = outer_ctxt.outer_context
return True
def _RemoveExternalControlEdges(self, op):
"""Remove any external control dependency on this op."""
while_ctxt = self.GetWhileContext()
# A control input of `op` is internal if it is in the same while
# loop context as the enclosing while loop context of self.
if while_ctxt is None:
internal_control_inputs = op.control_inputs
else:
internal_control_inputs = []
for x in op.control_inputs:
ctxt = util.GetOutputContext(x)
if ctxt is not None and ctxt.GetWhileContext() == while_ctxt:
internal_control_inputs.append(x)
external_control_inputs = []
if len(internal_control_inputs) != len(op.control_inputs):
external_control_inputs = list(set(op.control_inputs)
- set(internal_control_inputs))
op._remove_all_control_inputs()
op._add_control_inputs(internal_control_inputs)
return internal_control_inputs, external_control_inputs
# pylint: enable=protected-access
def AddInnerOp(self, op):
"""Notifies a scope about an operator added to an inner scope."""
if self._outer_context:
self._outer_context.AddInnerOp(op)
def GetControlPivot(self):
"""Returns the pivot node for this context, or None."""
return None
def IsWhileContext(self):
return False
def IsCondContext(self):
return False
def IsXLAContext(self):
return False
def __str__(self):
return self.name
class CondContext(ControlFlowContext):
"""The context for the conditional construct."""
def __init__(self,
pred=None,
pivot=None,
branch=None,
name="cond_text",
context_def=None,
import_scope=None):
"""Creates a `CondContext`.
Args:
pred: The `boolean` tensor for the conditional predicate.
pivot: The predicate tensor in this branch.
branch: 0 or 1 representing this branch.
name: Name of the `CondContext` python object.
context_def: Optional `ContextDef` protocol buffer to initialize the
`CondContext` object from.
import_scope: Optional `string`. Name scope to add. Only used when
initialing from protocol buffer.
"""
self._name = ops.get_default_graph().unique_name(name)
if context_def:
self._init_from_proto(context_def, import_scope=import_scope)
else:
# Initializes the default fields.
ControlFlowContext.__init__(self)
self._pred = pred # The boolean tensor for the cond predicate
self._pivot = pivot # The predicate tensor in this branch
self._branch = branch # 0 or 1 representing this branch
# Values considered to have been already seen in this context. pred is not
# included in this context.
self._values.add(pred.name)
self._external_values[pred.name] = pred
self._values.add(pivot.name)
pivot.op._set_control_flow_context(self) # pylint: disable=protected-access
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `CondContext` from protocol buffer.
Args:
context_def: `CondContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.CondContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(context_def.context_name, import_scope)
self._pred = g.as_graph_element(
ops.prepend_name_scope(context_def.pred_name, import_scope))
self._pivot = g.as_graph_element(
ops.prepend_name_scope(context_def.pivot_name, import_scope))
self._branch = context_def.branch
super(CondContext, self).__init__(values_def=context_def.values_def,
import_scope=import_scope)
@property
def pred(self):
return self._pred
@property
def pivot(self):
return self._pivot
@property
def branch(self):
return self._branch
@property
def grad_state(self):
if self.GetWhileContext():
return self.GetWhileContext().grad_state
return None
@property
def back_prop(self):
if self.GetWhileContext():
self.GetWhileContext().back_prop
return False
def GetControlPivot(self):
return self._pivot
def to_proto(self, export_scope=None):
"""Converts a `CondContext` to a `CondContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `CondContextDef` protocol buffer.
"""
if (export_scope is None or self.name.startswith(export_scope)):
context_def = control_flow_pb2.CondContextDef()
context_def.context_name = ops.strip_name_scope(self.name, export_scope)
context_def.pred_name = ops.strip_name_scope(self._pred.name,
export_scope)
context_def.pivot_name = ops.strip_name_scope(self._pivot.name,
export_scope)
context_def.branch = self._branch
context_def.values_def.MergeFrom(super(CondContext, self)._to_values_def(
export_scope))
for nested in self._nested_contexts:
nested_def = context_def.nested_contexts.add()
nested.to_control_flow_context_def(nested_def)
return context_def
else:
return None
@staticmethod
def from_proto(context_def, import_scope=None):
"""Returns a `CondContext` object created from `context_def`."""
ret = CondContext(context_def=context_def,
import_scope=import_scope)
ret.Enter()
for nested_def in context_def.nested_contexts:
from_control_flow_context_def(nested_def, import_scope=import_scope)
ret.Exit()
return ret
def to_control_flow_context_def(self, context_def, export_scope=None):
context_def.cond_ctxt.CopyFrom(self.to_proto(export_scope=export_scope))
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
if val.name in self._values:
# Use the real value if it comes from outer context. This is needed in
# particular for nested conds.
result = self._external_values.get(val.name)
result = val if result is None else result
else:
result = val
self._values.add(val.name)
if self._outer_context:
result = self._outer_context.AddValue(val)
self._values.add(result.name)
self._external_values[result.name] = result
with ops.control_dependencies(None):
result = _SwitchRefOrTensor(result, self._pred)[self._branch]
if self._outer_context:
self._outer_context.AddInnerOp(result.op)
result.op.graph.prevent_fetching(result.op)
# pylint: disable=protected-access
result.op._set_control_flow_context(self)
# pylint: enable=protected-access
self._values.add(result.name)
self._external_values[val.name] = result
return result
def AddOp(self, op):
self._AddOpInternal(op)
def _AddOpInternal(self, op):
"""Add `op` to the current context."""
if not op.inputs:
# Remove any external control dependency on this op
self._RemoveExternalControlEdges(op)
# pylint: disable=protected-access
op._add_control_input(self._pivot.op)
# pylint: enable=protected-access
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
# pylint: disable=protected-access
op._update_input(index, real_x)
# pylint: enable=protected-access
# Remove any external control dependency on this op.
self._RemoveExternalControlEdges(op)
# pylint: disable=protected-access
if op.graph._is_function(op.type) or op.type == "SymbolicGradient":
op._add_control_input(self._pivot.op)
# pylint: enable=protected-access
# Mark op's outputs as seen by this context and any outer contexts.
output_names = [x.name for x in op.outputs]
ctxt = self
while ctxt is not None:
# pylint: disable=protected-access
ctxt._values.update(output_names)
ctxt = ctxt._outer_context
# pylint: enable=protected-access
if self._outer_context or not util.IsLoopExit(op):
op.graph.prevent_fetching(op)
if self._outer_context:
self._outer_context.AddInnerOp(op)
def _ProcessOutputTensor(self, val):
"""Process an output tensor of a conditional branch."""
real_val = val
if val.name not in self._values:
# Handle the special case of lambda: x
self._values.add(val.name)
if self._outer_context:
real_val = self._outer_context.AddValue(val)
self._values.add(real_val.name)
self._external_values[real_val.name] = real_val
real_val = _SwitchRefOrTensor(real_val, self._pred)[self._branch]
self._external_values[val.name] = real_val
else:
external_val = self._external_values.get(val.name)
if external_val is not None:
real_val = external_val
return real_val
def _BuildCondTensor(self, v):
if isinstance(v, ops.Operation):
# Use pivot as the proxy for this op.
return with_dependencies([v], self._pivot)
elif isinstance(v, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
values = self._ProcessOutputTensor(v.values)
indices = self._ProcessOutputTensor(v.indices)
if isinstance(v, ops.IndexedSlices):
dense_shape = v.dense_shape
if dense_shape is not None:
dense_shape = self._ProcessOutputTensor(dense_shape)
return ops.IndexedSlices(values, indices, dense_shape)
else:
dense_shape = self._ProcessOutputTensor(v.dense_shape)
return sparse_tensor.SparseTensor(indices, values, dense_shape)
else:
v = nest.map_structure(_convert_tensorarray_to_flow, v)
return self._ProcessOutputTensor(ops.convert_to_tensor(v))
def BuildCondBranch(self, fn):
"""Add the subgraph defined by fn() to the graph."""
pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
original_result = fn()
post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
if len(post_summaries) > len(pre_summaries):
new_summaries = post_summaries[len(pre_summaries):]
summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
summary_ref[:] = pre_summaries
with ops.control_dependencies(new_summaries):
if original_result is None:
return no_op(), None
else:
original_result = nest.map_structure(array_ops.identity,
original_result)
if original_result is None:
return None, None
result = nest.map_structure(self._BuildCondTensor, original_result)
if not isinstance(result, (list, _basetuple)):
result = [result]
return original_result, result
def IsCondContext(self):
return True
def _UnpackIfSingleton(res):
if isinstance(res, (list, _basetuple)) and len(res) == 1:
return res[0]
else:
return res
# pylint: disable=redefined-outer-name
# pylint: disable=g-doc-args
@tf_export("cond")
@deprecation.deprecated_args(
None, "fn1/fn2 are deprecated in favor of the true_fn/false_fn arguments.",
"fn1", "fn2")
def cond(pred,
true_fn=None,
false_fn=None,
strict=False,
name=None,
fn1=None,
fn2=None):
"""Return `true_fn()` if the predicate `pred` is true else `false_fn()`.
`true_fn` and `false_fn` both return lists of output tensors. `true_fn` and
`false_fn` must have the same non-zero number and type of outputs.
Note that the conditional execution applies only to the operations defined in
`true_fn` and `false_fn`. Consider the following simple program:
```python
z = tf.multiply(a, b)
result = tf.cond(x < y, lambda: tf.add(x, z), lambda: tf.square(y))
```
If `x < y`, the `tf.add` operation will be executed and `tf.square`
operation will not be executed. Since `z` is needed for at least one
branch of the `cond`, the `tf.multiply` operation is always executed,
unconditionally.
Although this behavior is consistent with the dataflow model of TensorFlow,
it has occasionally surprised some users who expected a lazier semantics.
Note that `cond` calls `true_fn` and `false_fn` *exactly once* (inside the
call to `cond`, and not at all during `Session.run()`). `cond`
stitches together the graph fragments created during the `true_fn` and
`false_fn` calls with some additional graph nodes to ensure that the right
branch gets executed depending on the value of `pred`.
`tf.cond` supports nested structures as implemented in
`tensorflow.python.util.nest`. Both `true_fn` and `false_fn` must return the
same (possibly nested) value structure of lists, tuples, and/or named tuples.
Singleton lists and tuples form the only exceptions to this: when returned by
`true_fn` and/or `false_fn`, they are implicitly unpacked to single values.
This behavior is disabled by passing `strict=True`.
Args:
pred: A scalar determining whether to return the result of `true_fn` or
`false_fn`.
true_fn: The callable to be performed if pred is true.
false_fn: The callable to be performed if pred is false.
strict: A boolean that enables/disables 'strict' mode; see above.
name: Optional name prefix for the returned tensors.
Returns:
Tensors returned by the call to either `true_fn` or `false_fn`. If the
callables return a singleton list, the element is extracted from the list.
Raises:
TypeError: if `true_fn` or `false_fn` is not callable.
ValueError: if `true_fn` and `false_fn` do not return the same number of
tensors, or return tensors of different types.
Example:
```python
x = tf.constant(2)
y = tf.constant(5)
def f1(): return tf.multiply(x, 17)
def f2(): return tf.add(y, 23)
r = tf.cond(tf.less(x, y), f1, f2)
# r is set to f1().
# Operations in f2 (e.g., tf.add) are not executed.
```
"""
# We needed to make true_fn/false_fn keyword arguments for
# backwards-compatibility. This check exists so that we can convert back to
# having them be positional arguments.
# TODO(josh11b): Make `true_fn` and `false_fn` positional arguments after
# `fn1` and `fn2` are deleted.
if fn1 is not None:
if true_fn is not None:
raise TypeError("cond(): true_fn and fn1 may not be set simultaneously.")
true_fn = fn1
elif true_fn is None:
raise TypeError("cond(): true_fn argument required")
if fn2 is not None:
if false_fn is not None:
raise TypeError("cond(): false_fn and fn2 may not be set simultaneously.")
false_fn = fn2
elif false_fn is None:
raise TypeError("cond(): false_fn argument required")
if not callable(true_fn):
raise TypeError("true_fn must be callable.")
if not callable(false_fn):
raise TypeError("false_fn must be callable.")
with ops.name_scope(name, "cond", [pred]):
if context.executing_eagerly():
if pred:
return _UnpackIfSingleton(true_fn())
return _UnpackIfSingleton(false_fn())
# Add the Switch to the graph.
if isinstance(pred, bool):
raise TypeError("pred must not be a Python bool")
p_2, p_1 = switch(pred, pred)
pivot_1 = array_ops.identity(p_1, name="switch_t")
pivot_2 = array_ops.identity(p_2, name="switch_f")
pred = array_ops.identity(pred, name="pred_id")
# Disable the fetching of tensors that are only on one branch of cond.
for tensor in [p_1, p_2, pivot_1, pivot_2, pred]:
tensor.op.graph.prevent_fetching(tensor.op)
# Build the graph for the true branch in a new context.
context_t = CondContext(pred, pivot_1, branch=1)
context_t.Enter()
orig_res_t, res_t = context_t.BuildCondBranch(true_fn)
if orig_res_t is None:
raise ValueError("true_fn must have a return value.")
context_t.ExitResult(res_t)
context_t.Exit()
# Build the graph for the false branch in a new context.
context_f = CondContext(pred, pivot_2, branch=0)
context_f.Enter()
orig_res_f, res_f = context_f.BuildCondBranch(false_fn)
if orig_res_f is None:
raise ValueError("false_fn must have a return value.")
context_f.ExitResult(res_f)
context_f.Exit()
if not strict:
orig_res_t = _UnpackIfSingleton(orig_res_t)
orig_res_f = _UnpackIfSingleton(orig_res_f)
# Check that the return values of the two branches have the same structure.
try:
nest.assert_same_structure(orig_res_t, orig_res_f)
except TypeError as e:
raise TypeError(
"Incompatible return types of true_fn and false_fn: {}".format(e))
except ValueError as e:
raise ValueError(
"Incompatible return values of true_fn and false_fn: {}".format(e))
# Add the final merge to the graph.
if not res_t:
raise ValueError("true_fn and false_fn must return at least one result.")
res_t_flat = nest.flatten(res_t)
res_f_flat = nest.flatten(res_f)
for x, y in zip(res_t_flat, res_f_flat):
assert ((isinstance(x, ops.IndexedSlices) and
isinstance(y, ops.IndexedSlices)) or
(isinstance(x, sparse_tensor.SparseTensor) and
isinstance(y, sparse_tensor.SparseTensor)) or
(isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor)))
val_x = x if isinstance(x, ops.Tensor) else x.values
val_y = y if isinstance(y, ops.Tensor) else y.values
if val_x.dtype.base_dtype != val_y.dtype.base_dtype:
raise ValueError(
"Outputs of true_fn and false_fn must have the same type: %s, %s" %
(val_x.dtype.name, val_y.dtype.name))
merges = [merge(pair)[0] for pair in zip(res_f_flat, res_t_flat)]
merges = _convert_flows_to_tensorarrays(nest.flatten(orig_res_t), merges)
# Only add non-nested conds to the collection. Any nested control flow will
# be encapsulated in the root context.
assert context_t.outer_context == context_f.outer_context
if context_t.outer_context is None:
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_t)
ops.add_to_collection(ops.GraphKeys.COND_CONTEXT, context_f)
merges = nest.pack_sequence_as(structure=orig_res_t, flat_sequence=merges)
# Singleton lists and tuples are automatically unpacked if strict == False.
if not strict:
merges = _UnpackIfSingleton(merges)
return merges
# pylint: enable=g-doc-args
# pylint: enable=redefined-outer-name
def _resource_safe_shape(t):
"""Returns the shape of t or the variable it points to."""
if t.dtype == dtypes.resource:
while t.op.inputs:
t = t.op.inputs[0]
return tensor_shape.TensorShape(t.op.get_attr("shape"))
return array_ops.shape_internal(t, optimize=False)
# TODO(yuanbyu): Consider having a unified notion of context for
# not only conditionals and loops but also control dependency and
# subgraphs.
class WhileContext(ControlFlowContext):
"""The context for the loop construct."""
def __init__(self,
maximum_iterations=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
name="while_context",
grad_state=None,
context_def=None,
import_scope=None):
""""Creates a `WhileContext`.
Args:
maximum_iterations: Optional upper bound on number of loop iterations.
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
grad_state: The gradient loop state.
context_def: Optional `WhileContextDef` protocol buffer to initialize
the `Whilecontext` python object from.
import_scope: Optional `string`. Name scope to add. Only used when
initialing from protocol buffer.
"""
if context_def:
self._init_from_proto(context_def, import_scope=import_scope)
else:
ControlFlowContext.__init__(self)
self._init_from_args(maximum_iterations, parallel_iterations, back_prop,
swap_memory, name)
# The gradient loop state.
self._grad_state = grad_state
def _init_from_args(self, maximum_iterations, parallel_iterations, back_prop,
swap_memory, name):
"""Creates a new `WhileContext` from arguments.
Args:
maximum_iterations: Optional upper bound on number of loop iterations.
parallel_iterations: The number of iterations allowed to run in parallel.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
Raises:
ValueError: If `parallel_iterations` has invalid value.
"""
if not isinstance(parallel_iterations, int) or (parallel_iterations <= 0):
raise ValueError("`parallel_iterations` must be a positive integer: "
"%s" % parallel_iterations)
self._name = ops.get_default_graph().unique_name(name)
self._maximum_iterations = maximum_iterations
self._parallel_iterations = parallel_iterations
self._back_prop = back_prop
self._swap_memory = swap_memory
# We use this node to control constants created by the pred lambda.
self._pivot_for_pred = None
# We use this node to control constants created by the body lambda.
self._pivot_for_body = None
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation
self._pivot = None
# The list of exit tensors for loop variables.
self._loop_exits = []
# The list of enter tensors for loop variables.
self._loop_enters = []
def _init_from_proto(self, context_def, import_scope=None):
"""Creates a new `WhileContext` from protocol buffer.
Args:
context_def: `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
"""
assert isinstance(context_def, control_flow_pb2.WhileContextDef)
# Create from context_def.
g = ops.get_default_graph()
self._name = ops.prepend_name_scope(context_def.context_name, import_scope)
if context_def.maximum_iterations_name:
self._maximum_iterations = g.as_graph_element(
ops.prepend_name_scope(context_def.maximum_iterations_name,
import_scope))
else:
self._maximum_iterations = None
self._parallel_iterations = context_def.parallel_iterations
self._back_prop = context_def.back_prop
self._swap_memory = context_def.swap_memory
self._pivot_for_pred = g.as_graph_element(
ops.prepend_name_scope(context_def.pivot_for_pred_name, import_scope))
# We use this node to control constants created by the body lambda.
self._pivot_for_body = g.as_graph_element(
ops.prepend_name_scope(context_def.pivot_for_body_name, import_scope))
# The boolean tensor for loop termination condition. Used in code
# generation for gradient computation.
self._pivot = g.as_graph_element(
ops.prepend_name_scope(context_def.pivot_name, import_scope))
# The list of exit tensors for loop variables.
self._loop_exits = [
g.as_graph_element(ops.prepend_name_scope(exit_name, import_scope))
for exit_name in context_def.loop_exit_names
]
# The list of enter tensors for loop variables.
self._loop_enters = [
g.as_graph_element(ops.prepend_name_scope(enter_name, import_scope))
for enter_name in context_def.loop_enter_names
]
super(WhileContext, self).__init__(
values_def=context_def.values_def, import_scope=import_scope)
# import_scope causes self.name to be different from the original serialized
# context's name. Rewrite "frame_name" attrs with the new name.
if import_scope:
for tensor_name in self._values:
op = g.as_graph_element(tensor_name).op
if util.IsLoopEnter(op):
# pylint: disable=protected-access
op._set_attr("frame_name",
attr_value_pb2.AttrValue(s=compat.as_bytes(self.name)))
# pylint: enable=protected-access
@property
def maximum_iterations(self):
"""The maximum number of iterations that will be executed."""
return self._maximum_iterations
@property
def parallel_iterations(self):
"""The number of iterations allowed to run in parallel."""
return self._parallel_iterations
@property
def back_prop(self):
"""True iff backprop is enabled for this while loop."""
return self._back_prop
@property
def swap_memory(self):
"""True iff GPU-CPU memory swap is enabled for this while loop."""
return self._swap_memory
@property
def pivot(self):
"""The boolean tensor representing the loop termination condition."""
return self._pivot
@property
def loop_enters(self):
"""The list of enter tensors for loop variables."""
return self._loop_enters
@property
def loop_exits(self):
"""The list of exit tensors for loop variables."""
return self._loop_exits
@property
def grad_state(self):
"""The gradient loop state."""
return self._grad_state
def to_proto(self, export_scope=None):
"""Converts a `WhileContext` to a `WhileContextDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `WhileContextDef` protocol buffer.
"""
if (export_scope is None or self.name.startswith(export_scope)):
context_def = control_flow_pb2.WhileContextDef()
context_def.context_name = ops.strip_name_scope(self.name, export_scope)
context_def.parallel_iterations = self._parallel_iterations
if self._maximum_iterations is not None:
context_def.maximum_iterations_name = ops.strip_name_scope(
self._maximum_iterations.name, export_scope)
context_def.back_prop = self._back_prop
context_def.swap_memory = self._swap_memory
context_def.pivot_for_pred_name = ops.strip_name_scope(
self._pivot_for_pred.name, export_scope)
context_def.pivot_for_body_name = ops.strip_name_scope(
self._pivot_for_body.name, export_scope)
context_def.pivot_name = ops.strip_name_scope(self._pivot.name,
export_scope)
context_def.loop_exit_names.extend([
ops.strip_name_scope(l.name, export_scope) for l in self._loop_exits
])
context_def.loop_enter_names.extend([
ops.strip_name_scope(l.name, export_scope) for l in self._loop_enters
])
context_def.values_def.MergeFrom(
super(WhileContext, self)._to_values_def(
export_scope=export_scope))
for nested in self._nested_contexts:
nested_def = context_def.nested_contexts.add()
nested.to_control_flow_context_def(nested_def)
return context_def
else:
return None
def to_control_flow_context_def(self, context_def, export_scope=None):
context_def.while_ctxt.CopyFrom(self.to_proto(export_scope=export_scope))
@staticmethod
def from_proto(context_def, import_scope=None):
"""Returns a `WhileContext` object created from `context_def`.
Args:
context_def: A `WhileContextDef` protocol buffer.
import_scope: Optional `string`. Name scope to add.
Returns:
A `WhileContext` Python object.
"""
ret = WhileContext(context_def=context_def,
import_scope=import_scope)
ret.Enter()
for nested_def in context_def.nested_contexts:
from_control_flow_context_def(nested_def, import_scope=import_scope)
ret.Exit()
return ret
def GetWhileContext(self):
return self
def GetControlPivot(self):
if self._pivot_for_body is not None:
return self._pivot_for_body
return self._pivot_for_pred
def AddValue(self, val):
"""Add `val` to the current context and its outer context recursively."""
result = val
new_value = val.name not in self._values
# Don't treat ops in this context as new values. Usually all known values
# are in self._values, except when we're importing a while loop inside this
# WhileContext. Since there's a cycle in this case, `val` may be part of the
# imported while loop but not yet processed by this context and added to
# self._values in _AddOpInternal. We only want to process external input
# tensors to the while loop here.
new_value &= val.op._control_flow_context is not self # pylint: disable=protected-access
if new_value:
self._values.add(val.name)
# If we are in a grad context and val is from its forward context,
# use GetRealValue(), which adds the logic to save the history of
# val in forward.
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
forward_ctxt = _GetWhileContext(val.op)
if util.IsLoopExit(val.op):
forward_ctxt = forward_ctxt.outer_context
if forward_ctxt:
forward_ctxt = forward_ctxt.GetWhileContext()
if forward_ctxt == grad_ctxt.grad_state.forward_context:
real_val = grad_ctxt.grad_state.GetRealValue(val)
self._external_values[val.name] = real_val
return real_val
if self._outer_context is not None:
result = self._outer_context.AddValue(val)
# Create an Enter to make `result` known to this loop context.
with ops.control_dependencies(None):
enter = _Enter(
result,
self._name,
is_constant=True,
parallel_iterations=self._parallel_iterations)
enter.graph.prevent_feeding(enter)
if self._outer_context:
self._outer_context.AddInnerOp(enter.op)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext([enter])
# Add `enter` in this context.
self._values.add(enter.name)
self._external_values[val.name] = enter
result = enter
else:
actual_val = self._external_values.get(val.name)
if actual_val is not None:
result = actual_val
return result
def AddOp(self, op):
"""Add `op` to the current context."""
# For a reduction op, if op is in a grad context and its input is from
# its forward context, moving op to the forward context means we would
# store the tensor after the reduction as opposed to the tensor before
# reduction, and therefore could significantly reduce memory consumption.
# For now, we do this only for a few ops.
if op.type in {"Shape", "Size", "Rank"}:
grad_ctxt = ops.get_default_graph()._get_control_flow_context()
if grad_ctxt:
grad_ctxt = grad_ctxt.GetWhileContext()
if grad_ctxt.grad_state:
op_input_forward_ctxt = _GetWhileContext(op.inputs[0].op)
if op_input_forward_ctxt == grad_ctxt.grad_state.forward_context:
op_input_ctxt = op.inputs[0].op._get_control_flow_context()
op._set_control_flow_context(op_input_ctxt)
op_input_ctxt._AddOpInternal(op)
return
self._AddOpInternal(op)
def _AddOpInternal(self, op):
"""Add `op` to the current context.
We move any external control dependencies of the op to the loop pivot, to
ensure they get executed.
"""
if not op.inputs:
# Remove any external control dependency on this op
control_inputs, external_inputs = self._RemoveExternalControlEdges(op)
# Add a control edge from the control pivot to this op.
if not control_inputs:
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
for x in op.outputs:
self._values.add(x.name)
else:
for index in range(len(op.inputs)):
x = op.inputs[index]
real_x = self.AddValue(x)
if real_x != x:
op._update_input(index, real_x) # pylint: disable=protected-access
# Remove any external control dependency on this op.
_, external_inputs = self._RemoveExternalControlEdges(op)
# Add a control dependency to prevent loop invariants from
# enabling ops that should not be executed.
self._MaybeAddControlDependency(op)
for x in op.outputs:
self._values.add(x.name)
if external_inputs:
# Use an identity to pull control inputs as data inputs. Note that we
# ignore ops which don't have outputs. TODO(apassos): fix that
with ops.control_dependencies(None):
self.Enter()
external_inputs = [array_ops.identity(x.outputs[0]).op
for x in external_inputs if x.outputs]
self.Exit()
op._add_control_inputs(external_inputs) # pylint: disable=protected-access
if self._outer_context or not util.IsLoopExit(op):
op.graph.prevent_fetching(op)
for x in op.outputs:
op.graph.prevent_feeding(x)
if self._outer_context:
self._outer_context.AddInnerOp(op)
def _MaybeAddControlDependency(self, op):
"""Add a control input to the op if it only depends on loop invariants."""
def _IsOpFree(op):
"""Determines if `op` needs a control dependency."""
if op.control_inputs:
return False
# pylint: disable=protected-access
if op.graph._is_function(op.type) or op.type == "SymbolicGradient":
return True
# pylint: enable=protected-access
for x in op.inputs:
if not util.IsLoopConstantEnter(x.op):
return False
return True
if _IsOpFree(op):
# pylint: disable=protected-access
op._add_control_input(self.GetControlPivot().op)
# pylint: enable=protected-access
def AddForwardLoopCounter(self, outer_grad_state):
"""Adds a loop that counts the number of iterations.
This is added to the forward loop at the time when we start to
create the loop for backprop gradient computation. Called in
the outer context of this forward context.
The pseudocode is:
`n = 0; while (_pivot) { n++; }`
Note that a control dependency is added to `n` to ensure the correct
execution order of stack push ops.
Args:
outer_grad_state: The outer grad state. None if not nested.
Returns:
The number of iterations taken by the forward loop and the loop index.
"""
n = constant_op.constant(0, name="f_count")
if outer_grad_state is not None:
# Force the stack pushes of i-th execution of an inner loop to be ordered
# before the pushes of (i+1)-th execution of the same inner loop.
outer_add_op = outer_grad_state.forward_index.op.inputs[0].op
n.op._add_control_input(outer_add_op) # pylint: disable=protected-access
self.Enter()
self.AddName(n.name)
enter_n = _Enter(
n,
self._name,
is_constant=False,
parallel_iterations=self._parallel_iterations,
name="f_count")
self.loop_enters.append(enter_n)
merge_n = merge([enter_n, enter_n])[0]
switch_n = switch(merge_n, self._pivot)
index = math_ops.add(switch_n[1], 1)
next_n = _NextIteration(index)
merge_n.op._update_input(1, next_n)
total_iterations = exit(switch_n[0], name="f_count")
self.loop_exits.append(total_iterations)
self.ExitResult([total_iterations])
self.Exit()
return total_iterations, next_n
def AddBackpropLoopCounter(self, count, outer_grad_state):
"""Add the backprop loop that controls the iterations.
This is added to the backprop loop. It is used to control the loop
termination of the backprop loop. Called in the outer context of
this grad context.
The pseudocode is:
`n = count; while (n >= 1) { n--; }`
Note that a control dependency is added to `final_zero` to ensure the
correct execution order of stack pop ops.
Args:
count: The number of iterations for backprop.
outer_grad_state: The outer grad state. None if not nested.
Returns:
The loop index.
"""
one = constant_op.constant(1, name="b_count")
self.Enter()
self.AddName(count.name)
enter_count = _Enter(
count,
self._name,
is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_count")
self.loop_enters.append(enter_count)
merge_count = merge([enter_count, enter_count])[0]
self._pivot_for_pred = merge_count
pred = math_ops.greater_equal(merge_count, one)
self._pivot = loop_cond(pred, name="b_count")
switch_count = switch(merge_count, self._pivot)
index = math_ops.subtract(switch_count[1], one)
self._pivot_for_body = index
next_count = _NextIteration(index)
merge_count.op._update_input(1, next_count)
final_zero = exit(switch_count[0], name="b_count")
self.loop_exits.append(final_zero)
if outer_grad_state is not None:
# Force the stack pops of i-th execution of an inner loop to be ordered
# before the pops of (i+1)-th execution of the same inner loop.
# pylint: disable=protected-access
outer_grad_state.grad_sync._add_control_input(final_zero.op)
# pylint: enable=protected-access
self.ExitResult([final_zero])
self.Exit()
return next_count
def AddBackpropAccumulator(self, op, grad):
"""Add an accumulation loop for every loop invariant.
This is added to the backprop loop. It is used to accumulate partial
gradients within each loop iteration. Called when in the gradient while
context.
The pseudocode is:
```
acc = 0.0;
while (_pivot) {
acc += grad;
}
```
Args:
op: The Enter op for a loop invariant.
grad: The partial gradient of an iteration for a loop invariant.
Returns:
The gradient for a loop invariant.
"""
self.Exit()
# Create a zeros tensor with the right shape for acc. If we don't
# know the full shape statically, we will have to get the shape
# dynamically from the forward inference. Getting the shape right
# for the zeros is only needed for the base case when the loop exits
# without running any iterations.
shape = grad.get_shape()
if shape.is_fully_defined():
if self.outer_context:
self.outer_context.Enter()
acc = constant_op.constant(0, grad.dtype, shape=shape, name="b_acc")
if self.outer_context:
self.outer_context.Exit()
else:
value = op.inputs[0]
if (isinstance(self.outer_context, WhileContext) and
self.outer_context.grad_state is not None):
# We are in a nested while loop.
forward_ctxt = self.grad_state.forward_context
forward_ctxt.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
forward_ctxt.outer_context.Exit()
outer_grad_state = self.grad_state.outer_grad_state
history_zeros_shape = outer_grad_state.AddForwardAccumulator(
zeros_shape)
self.outer_context.Enter()
real_shape = outer_grad_state.AddBackpropAccumulatedValue(
history_zeros_shape, zeros_shape)
acc = array_ops.zeros(real_shape, grad.dtype)
self.outer_context.Exit()
else:
if self.outer_context:
self.outer_context.Enter()
zeros_shape = array_ops.shape_internal(value, optimize=False)
acc = array_ops.zeros(zeros_shape, grad.dtype)
if self.outer_context:
self.outer_context.Exit()
self.Enter()
self.AddName(acc.name)
enter_acc = _Enter(
acc,
self._name,
is_constant=False,
parallel_iterations=self._parallel_iterations,
name="b_acc")
self.loop_enters.append(enter_acc)
merge_acc = merge([enter_acc, enter_acc], name="b_acc")[0]
switch_acc_false, switch_acc_true = switch(merge_acc, self._pivot)
add_acc = math_ops.add(switch_acc_true, grad)
next_acc = _NextIteration(add_acc)
merge_acc.op._update_input(1, next_acc) # pylint: disable=protected-access
result_acc = exit(switch_acc_false, name="b_acc")
self.loop_exits.append(result_acc)
self.ExitResult([result_acc])
return result_acc
def AddBackpropIndexedSlicesAccumulator(self, op, grad):
"""This is used for accumulating gradients that are IndexedSlices.
This is essentially the equivalent of AddBackpropAccumulator but optimized
for things like updating embeddings from within a while loop.
Args:
op: The Enter op for a loop invariant.
grad: The partial gradients represented as an IndexedSlices.
Returns:
The accumulated IndexedSlices gradient of the loop invariant.
"""
values = grad.values
indices = grad.indices
dense_shape = grad.dense_shape
self.Exit()
if self.outer_context:
self.outer_context.Enter()
if values.get_shape().is_fully_defined():
values_shape = tensor_shape.TensorShape(
[tensor_shape.Dimension(1)] + values.get_shape().dims[1:])
if self.outer_context:
self.outer_context.Enter()
values_acc = constant_op.constant(
0, values.dtype, shape=values_shape, name="b_acc")
if self.outer_context:
self.outer_context.Exit()
else:
values_shape = _resource_safe_shape(op.inputs[0])[1:]
values_shape = array_ops.concat([[1], values_shape], 0)
values_acc = array_ops.zeros(values_shape, dtype=values.dtype)
indices_acc = constant_op.constant([0], indices.dtype)
shape_acc = None
if dense_shape is not None:
if dense_shape.get_shape().is_fully_defined():
if self.outer_context:
self.outer_context.Enter()
shape_acc = constant_op.constant(
0, dense_shape.dtype, shape=dense_shape.get_shape())
if self.outer_context:
self.outer_context.Exit()
else:
shape_acc = array_ops.zeros_like(
array_ops.shape_internal(op.inputs[0], optimize=False),
optimize=False)
if self.outer_context:
self.outer_context.Exit()
self.Enter()
self.AddName(values_acc.name)
self.AddName(indices_acc.name)
init_acc = [indices_acc, values_acc]
if shape_acc is not None:
self.AddName(shape_acc.name)
init_acc.append(shape_acc)
# Set use_input_shape=False since the accumulator tensors will grow in
# size. If use_input_shape=True, the _update_input call below will result in
# incompatible shapes.
enter_acc = [
_Enter(
x,
self._name,
is_constant=False,
parallel_iterations=self._parallel_iterations,
use_input_shape=False,
name="b_acc") for x in init_acc
]
# Manually set appropriate partial shapes.
enter_acc[0].set_shape([None])
if values_acc.shape.dims is not None:
enter_acc[1].set_shape([None] + values_acc.shape.as_list()[1:])
self.loop_enters.extend(enter_acc)
merge_acc = [merge([x, x], name="b_acc")[0] for x in enter_acc]
switch_acc = [switch(x, self._pivot) for x in merge_acc]
# The actual accumulation.
acc_indexed_slices = [
array_ops.concat([xa[1], xv], 0)
for xa, xv in zip(switch_acc[:2], [indices, values])
]
if shape_acc is not None:
# For the shape we just keep the maximum
acc_indexed_slices.append(math_ops.maximum(dense_shape, switch_acc[2][1]))
next_acc = [_NextIteration(x) for x in acc_indexed_slices]
for xm, xn in zip(merge_acc, next_acc):
xm.op._update_input(1, xn) # pylint: disable=protected-access
exit_acc = [exit(x[0], name="b_acc") for x in switch_acc]
self.loop_exits.extend(exit_acc)
self.ExitResult(exit_acc)
return ops.IndexedSlices(
indices=exit_acc[0],
values=exit_acc[1],
dense_shape=exit_acc[2] if shape_acc is not None else None)
def _InitializeValues(self, values):
"""Makes the values known to this context."""
self._values = set()
for x in values:
if isinstance(x, ops.Tensor):
self._values.add(x.name)
else:
self._values.add(x.values.name)
self._values.add(x.indices.name)
if isinstance(x, ops.IndexedSlices):
dense_shape = x.dense_shape
elif isinstance(x, sparse_tensor.SparseTensor):
dense_shape = x.dense_shape
else:
raise TypeError("Type %s not supported" % type(x))
if dense_shape is not None:
self._values.add(dense_shape.name)
def _BuildLoop(self, pred, body, original_loop_vars, loop_vars,
shape_invariants):
"""Core: Add the loop termination condition and body to the graph."""
flat_loop_vars = nest.flatten(original_loop_vars)
# Let the context know the loop variables so the loop variables
# would be added in the outer contexts properly.
self._InitializeValues(loop_vars)
real_vars = loop_vars
if self._outer_context:
real_vars = [self._outer_context.AddValue(x) for x in loop_vars]
with ops.control_dependencies(None):
enter_vars = [
_Enter(
x,
self._name,
is_constant=False,
parallel_iterations=self._parallel_iterations,
use_input_shape=(shape_invariants is None)) for x in real_vars
]
for x in enter_vars:
x.graph.prevent_feeding(x)
if self._outer_context:
self._outer_context.AddInnerOp(x.op)
# Finds the closest enclosing non-None control pivot.
outer_context = self._outer_context
control_pivot = None
while outer_context is not None and control_pivot is None:
control_pivot = outer_context.GetControlPivot()
# pylint: disable=protected-access
outer_context = outer_context._outer_context
# pylint: enable=protected-access
if control_pivot is not None:
for var in enter_vars:
if util.IsLoopConstantEnter(var.op.inputs[0].op):
# pylint: disable=protected-access
var.op._add_control_input(control_pivot.op)
# pylint: enable=protected-access
_SetShapeInvariants(real_vars, enter_vars, shape_invariants)
# Fix the control inputs and control flow context of these enter ops.
self._FixControlInputsAndContext(enter_vars)
self._InitializeValues(enter_vars)
self._loop_enters = enter_vars
merge_vars = [merge([x, x])[0] for x in enter_vars]
self._pivot_for_pred = merge_vars[0]
# Build the graph for pred.
merge_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_loop_vars, merge_vars))
packed_vars = nest.pack_sequence_as(
structure=original_loop_vars,
flat_sequence=merge_vars_with_tensor_arrays)
c = ops.convert_to_tensor(pred(*packed_vars))
self._pivot = loop_cond(c, name="LoopCond")
switch_vars = [_SwitchRefOrTensor(x, self._pivot) for x in merge_vars]
# Build the graph for body.
vars_for_body = [_Identity(x[1]) for x in switch_vars]
self._pivot_for_body = vars_for_body[0]
# Convert TensorArray flow variables inside the context back into
# their associated TensorArrays for calling the body.
vars_for_body_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_loop_vars, vars_for_body))
packed_vars_for_body = nest.pack_sequence_as(
structure=original_loop_vars,
flat_sequence=vars_for_body_with_tensor_arrays)
pre_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
body_result = body(*packed_vars_for_body)
post_summaries = ops.get_collection(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
if not nest.is_sequence(body_result):
body_result = [body_result]
if len(post_summaries) > len(pre_summaries):
new_summaries = post_summaries[len(pre_summaries):]
summary_ref = ops.get_collection_ref(ops.GraphKeys._SUMMARY_COLLECTION) # pylint: disable=protected-access
summary_ref[:] = pre_summaries
with ops.control_dependencies(new_summaries):
def map_fn(x):
# TODO(apassos) figure out how to trigger with tensor arrays as well
if isinstance(x, tensor_array_ops.TensorArray):
return x
return array_ops.identity(x)
body_result = nest.map_structure(map_fn, body_result)
# Compare the structure types of input and output of body.
# For backwards compatibility, the first layer is forced to a list
# during this comparison, because inputs are typically lists and
# outputs of the body are typically tuples.
nest.assert_same_structure(list(packed_vars_for_body), list(body_result))
# Store body_result to keep track of TensorArrays returned by body
original_body_result = body_result
# Convert TensorArrays returned by body into their flow variables
result = nest.map_structure(_convert_tensorarray_to_flow,
nest.flatten(body_result))
result = ops.convert_n_to_tensor_or_indexed_slices(result)
# Add NextIteration and the back edges to complete the loop.
if len(merge_vars) != len(result):
raise ValueError("Number of inputs and outputs of body must match "
"loop_vars: %d, %d" % (len(merge_vars), len(result)))
next_vars = []
for m, v in zip(merge_vars, result):
next_vars.append(_AddNextAndBackEdge(m, v))
# Add the exit ops.
exit_vars = [exit(x[0]) for x in switch_vars]
self._loop_exits = exit_vars
# Exit the loop.
self.ExitResult(exit_vars)
return original_body_result, exit_vars
def BuildLoop(self, pred, body, loop_vars, shape_invariants):
"""Add the loop termination condition and body to the graph."""
# Keep original_loop_vars to identify which are TensorArrays
original_loop_vars = loop_vars
# Convert TensorArrays to their flow variables
loop_vars = nest.map_structure(_convert_tensorarray_to_flow,
nest.flatten(loop_vars))
loop_vars = ops.convert_n_to_tensor_or_indexed_slices(loop_vars)
try:
self.Enter()
# _BuildLoop calls _update_input in several places. _lock ensures a
# Session.run call cannot occur between creating and mutating new ops.
with ops.get_default_graph()._lock: # pylint: disable=protected-access
original_body_result, exit_vars = self._BuildLoop(
pred, body, original_loop_vars, loop_vars, shape_invariants)
finally:
self.Exit()
flat_result = nest.flatten(original_body_result)
# Convert TensorArray flow variables outside the context back into
# their associated TensorArrays for returning to caller.
exit_vars_with_tensor_arrays = (
_convert_flows_to_tensorarrays(flat_result, exit_vars))
packed_exit_vars = nest.pack_sequence_as(
structure=original_body_result,
flat_sequence=exit_vars_with_tensor_arrays)
return packed_exit_vars[0] if len(exit_vars) == 1 else packed_exit_vars
def _FixControlInputsAndContext(self, enters):
graph = ops.get_default_graph()
# pylint: disable=protected-access
for e in enters:
if isinstance(e, ops.Tensor):
xs = [e]
else:
if not isinstance(e, (ops.IndexedSlices, sparse_tensor.SparseTensor)):
raise TypeError("Type %s not supported" % type(e))
xs = [e.values, e.indices]
shape = e.dense_shape
if shape is not None:
xs.append(shape)
for x in xs:
inp_op = x.op.inputs[0].op
control_inputs = graph._control_dependencies_for_inputs([inp_op])
outer_control_inputs = [
op for op in control_inputs if self._IsInOuterContext(op)
]
x.op._set_control_flow_context(self)
x.op._add_control_inputs(outer_control_inputs)
graph._record_op_seen_by_control_dependencies(x.op)
# pylint: enable=protected-access
def IsWhileContext(self):
return True
# pylint: disable=redefined-outer-name
@tf_export("while_loop")
def while_loop(cond,
body,
loop_vars,
shape_invariants=None,
parallel_iterations=10,
back_prop=True,
swap_memory=False,
name=None,
maximum_iterations=None):
"""Repeat `body` while the condition `cond` is true.
`cond` is a callable returning a boolean scalar tensor. `body` is a callable
returning a (possibly nested) tuple, namedtuple or list of tensors of the same
arity (length and structure) and types as `loop_vars`. `loop_vars` is a
(possibly nested) tuple, namedtuple or list of tensors that is passed to both
`cond` and `body`. `cond` and `body` both take as many arguments as there are
`loop_vars`.
In addition to regular Tensors or IndexedSlices, the body may accept and
return TensorArray objects. The flows of the TensorArray objects will
be appropriately forwarded between loops and during gradient calculations.
Note that `while_loop` calls `cond` and `body` *exactly once* (inside the
call to `while_loop`, and not at all during `Session.run()`). `while_loop`
stitches together the graph fragments created during the `cond` and `body`
calls with some additional graph nodes to create the graph flow that
repeats `body` until `cond` returns false.
For correctness, `tf.while_loop()` strictly enforces shape invariants for
the loop variables. A shape invariant is a (possibly partial) shape that
is unchanged across the iterations of the loop. An error will be raised
if the shape of a loop variable after an iteration is determined to be more
general than or incompatible with its shape invariant. For example, a shape
of [11, None] is more general than a shape of [11, 17], and [11, 21] is not
compatible with [11, 17]. By default (if the argument `shape_invariants` is
not specified), it is assumed that the initial shape of each tensor in
`loop_vars` is the same in every iteration. The `shape_invariants` argument
allows the caller to specify a less specific shape invariant for each loop
variable, which is needed if the shape varies between iterations. The
@{tf.Tensor.set_shape}
function may also be used in the `body` function to indicate that
the output loop variable has a particular shape. The shape invariant for
SparseTensor and IndexedSlices are treated specially as follows:
a) If a loop variable is a SparseTensor, the shape invariant must be
TensorShape([r]) where r is the rank of the dense tensor represented
by the sparse tensor. It means the shapes of the three tensors of the
SparseTensor are ([None], [None, r], [r]). NOTE: The shape invariant here
is the shape of the SparseTensor.dense_shape property. It must be the shape of
a vector.
b) If a loop variable is an IndexedSlices, the shape invariant must be
a shape invariant of the values tensor of the IndexedSlices. It means
the shapes of the three tensors of the IndexedSlices are (shape, [shape[0]],
[shape.ndims]).
`while_loop` implements non-strict semantics, enabling multiple iterations
to run in parallel. The maximum number of parallel iterations can be
controlled by `parallel_iterations`, which gives users some control over
memory consumption and execution order. For correct programs, `while_loop`
should return the same result for any parallel_iterations > 0.
For training, TensorFlow stores the tensors that are produced in the
forward inference and are needed in back propagation. These tensors are a
main source of memory consumption and often cause OOM errors when training
on GPUs. When the flag swap_memory is true, we swap out these tensors from
GPU to CPU. This for example allows us to train RNN models with very long
sequences and large batches.
Args:
cond: A callable that represents the termination condition of the loop.
body: A callable that represents the loop body.
loop_vars: A (possibly nested) tuple, namedtuple or list of numpy array,
`Tensor`, and `TensorArray` objects.
shape_invariants: The shape invariants for the loop variables.
parallel_iterations: The number of iterations allowed to run in parallel.
It must be a positive integer.
back_prop: Whether backprop is enabled for this while loop.
swap_memory: Whether GPU-CPU memory swap is enabled for this loop.
name: Optional name prefix for the returned tensors.
maximum_iterations: Optional maximum number of iterations of the while loop
to run. If provided, the `cond` output is AND-ed with an additional
condition ensuring the number of iterations executed is no greater than
`maximum_iterations`.
Returns:
The output tensors for the loop variables after the loop. When the length
of `loop_vars` is 1 this is a Tensor, TensorArray or IndexedSlice and when
the length of `loop_vars` is greater than 1 it returns a list.
Raises:
TypeError: if `cond` or `body` is not callable.
ValueError: if `loop_vars` is empty.
Example:
```python
i = tf.constant(0)
c = lambda i: tf.less(i, 10)
b = lambda i: tf.add(i, 1)
r = tf.while_loop(c, b, [i])
```
Example with nesting and a namedtuple:
```python
import collections
Pair = collections.namedtuple('Pair', 'j, k')
ijk_0 = (tf.constant(0), Pair(tf.constant(1), tf.constant(2)))
c = lambda i, p: i < 10
b = lambda i, p: (i + 1, Pair((p.j + p.k), (p.j - p.k)))
ijk_final = tf.while_loop(c, b, ijk_0)
```
Example using shape_invariants:
```python
i0 = tf.constant(0)
m0 = tf.ones([2, 2])
c = lambda i, m: i < 10
b = lambda i, m: [i+1, tf.concat([m, m], axis=0)]
tf.while_loop(
c, b, loop_vars=[i0, m0],
shape_invariants=[i0.get_shape(), tf.TensorShape([None, 2])])
```
Example which demonstrates non-strict semantics: In the following
example, the final value of the counter `i` does not depend on `x`. So
the `while_loop` can increment the counter parallel to updates of `x`.
However, because the loop counter at one loop iteration depends
on the value at the previous iteration, the loop counter itself cannot
be incremented in parallel. Hence if we just want the final value of the
counter (which we print on the line `print(sess.run(i))`), then
`x` will never be incremented, but the counter will be updated on a
single thread. Conversely, if we want the value of the output (which we
print on the line `print(sess.run(out).shape)`), then the counter may be
incremented on its own thread, while `x` can be incremented in
parallel on a separate thread. In the extreme case, it is conceivable
that the thread incrementing the counter runs until completion before
`x` is incremented even a single time. The only thing that can never
happen is that the thread updating `x` can never get ahead of the
counter thread because the thread incrementing `x` depends on the value
of the counter.
```python
import tensorflow as tf
n = 10000
x = tf.constant(list(range(n)))
c = lambda i, x: i < n
b = lambda i, x: (tf.Print(i + 1, [i]), tf.Print(x + 1, [i], "x:"))
i, out = tf.while_loop(c, b, (0, x))
with tf.Session() as sess:
print(sess.run(i)) # prints [0] ... [9999]
# The following line may increment the counter and x in parallel.
# The counter thread may get ahead of the other thread, but not the
# other way around. So you may see things like
# [9996] x:[9987]
# meaning that the counter thread is on iteration 9996,
# while the other thread is on iteration 9987
print(sess.run(out).shape)
```
"""
with ops.name_scope(name, "while", loop_vars):
if not loop_vars:
raise ValueError("No loop variables provided")
if not callable(cond):
raise TypeError("cond must be callable.")
if not callable(body):
raise TypeError("body must be callable.")
if parallel_iterations < 1:
raise TypeError("parallel_iterations must be a positive integer.")
if maximum_iterations is not None:
maximum_iterations = ops.convert_to_tensor(
maximum_iterations, name="maximum_iterations")
if maximum_iterations.shape.ndims != 0:
raise ValueError("maximum_iterations must be a scalar, saw shape: %s" %
maximum_iterations.shape)
counter = constant_op.constant(
0, dtype=maximum_iterations.dtype, name="iteration_counter")
orig_cond = cond
orig_body = body
if len(loop_vars) == 1:
loop_vars = (counter, loop_vars[0])
cond = lambda i, lv: ( # pylint: disable=g-long-lambda
math_ops.logical_and(i < maximum_iterations, orig_cond(lv)))
body = lambda i, lv: (i + 1, orig_body(lv))
else:
loop_vars = (counter, loop_vars)
cond = lambda i, lv: ( # pylint: disable=g-long-lambda
math_ops.logical_and(i < maximum_iterations, orig_cond(*lv)))
body = lambda i, lv: (i + 1, orig_body(*lv))
if context.executing_eagerly():
try_to_pack = len(loop_vars) == 1
packed = False # whether the body result was packed into a 1-item tuple
while cond(*loop_vars):
loop_vars = body(*loop_vars)
if try_to_pack and not isinstance(loop_vars, (list, _basetuple)):
packed = True
loop_vars = (loop_vars,)
if maximum_iterations is not None:
return loop_vars[1]
else:
return loop_vars[0] if packed else loop_vars
if shape_invariants is not None:
if maximum_iterations is not None:
shape_invariants = (tensor_shape.TensorShape([]), shape_invariants)
nest.assert_same_structure(loop_vars, shape_invariants)
loop_context = WhileContext(
maximum_iterations=maximum_iterations,
parallel_iterations=parallel_iterations,
back_prop=back_prop,
swap_memory=swap_memory)
# Only add non-nested loops to the collection. Any nested control flow will
# be encapsulated in the root context.
if loop_context.outer_context is None:
ops.add_to_collection(ops.GraphKeys.WHILE_CONTEXT, loop_context)
result = loop_context.BuildLoop(cond, body, loop_vars, shape_invariants)
if maximum_iterations is not None:
return result[1]
else:
return result
# pylint: enable=redefined-outer-name
def _AsTensorList(x, p):
"""Return x as a list of Tensors or IndexedSlices.
For entries of `x` that are Operations, this returns an Identity of `p`
with a dependency on the operation.
Args:
x: A Tensor/IndexedSlices/Operation or a list or tuple of them.
p: A Tensor to return for entries in `x` that are Operations.
Returns:
A list of Tensors or IndexedSlices.
"""
if not isinstance(x, (list, _basetuple)):
x = [x]
l = []
for v in x:
if isinstance(v, ops.Operation):
v = with_dependencies([v], p)
v = ops.convert_to_tensor_or_indexed_slices(v)
if isinstance(v, ops.Tensor):
l.append(array_ops.identity(v))
else:
l.append(
ops.IndexedSlices(
array_ops.identity(v.values), array_ops.identity(v.indices)))
return l
def _CheckResults(a, b):
assert len(a) == len(b), (
"Values returned by a() and b() must have the same length.")
for x, y in zip(a, b):
assert x.dtype == y.dtype, (
"Values returned by a() [%s] and b() [%s] must have "
"the same type: %s, %s." % (x.name, y.name, x.dtype.name, y.dtype.name))
def with_dependencies(dependencies, output_tensor, name=None):
"""Produces the content of `output_tensor` only after `dependencies`.
In some cases, a user may want the output of an operation to be
consumed externally only after some other dependencies have run
first. This function ensures returns `output_tensor`, but only after all
operations in `dependencies` have run. Note that this means that there is
no guarantee that `output_tensor` will be evaluated after any `dependencies`
have run.
See also @{tf.tuple$tuple} and @{tf.group$group}.
Args:
dependencies: Iterable of operations to run before this op finishes.
output_tensor: A `Tensor` or `IndexedSlices` that will be returned.
name: (Optional) A name for this operation.
Returns:
Same as `output_tensor`.
Raises:
TypeError: if `output_tensor` is not a `Tensor` or `IndexedSlices`.
"""
if context.executing_eagerly():
return output_tensor
with ops.name_scope(name, "control_dependency",
list(dependencies) + [output_tensor]) as name:
with ops.colocate_with(output_tensor):
with ops.control_dependencies(dependencies):
output_tensor = ops.convert_to_tensor_or_indexed_slices(output_tensor)
if isinstance(output_tensor, ops.Tensor):
return _Identity(output_tensor, name=name)
else:
return ops.IndexedSlices(
_Identity(output_tensor.values, name=name), output_tensor.indices,
output_tensor.dense_shape)
def _GroupControlDeps(dev, deps, name=None):
with ops.control_dependencies(deps):
if dev is None:
return no_op(name=name)
else:
with ops.device(dev):
return no_op(name=name)
# TODO(touts): Accept "inputs" as a list.
@tf_export("group")
def group(*inputs, **kwargs):
"""Create an op that groups multiple operations.
When this op finishes, all ops in `inputs` have finished. This op has no
output.
See also @{tf.tuple$tuple} and
@{tf.control_dependencies$control_dependencies}.
Args:
*inputs: Zero or more tensors to group.
name: A name for this operation (optional).
Returns:
An Operation that executes all its inputs.
Raises:
ValueError: If an unknown keyword argument is provided.
"""
if context.executing_eagerly():
return None
name = kwargs.pop("name", None)
if kwargs:
raise ValueError("Unknown keyword arguments: " + ", ".join(kwargs.keys()))
with ops.name_scope(name, "group_deps", inputs) as name:
# Grouping no inputs means do nothing
if not inputs:
return no_op(name=name)
# Sorts *inputs according to their devices.
ops_on_device = {} # device -> operations specified on the device.
for inp in nest.flatten(inputs):
if not hasattr(inp, "device"):
raise TypeError("Expected tf.group() expected Tensor arguments not "
"'%s' with type '%s'" % (inp, type(inp)))
if not hasattr(inp, "device"):
if isinstance(inp, list):
raise TypeError("To call tf.group() with a list, use "
"tf.group(*[...]) not tf.group([...]).")
raise TypeError("Expected tf.group() expected Tensor arguments not "
"'%s' with type '%s'" % (inp, type(inp)))
dev = inp.device
if dev in ops_on_device:
ops_on_device[dev].append(inp)
else:
ops_on_device[dev] = [inp]
if len(ops_on_device) == 1:
# 1-level tree. The root node is the returned NoOp node.
(dev, deps), = ops_on_device.items()
return _GroupControlDeps(dev, deps, name=name)
# 2-level tree. The root node is the returned NoOp node.
# deps contains 1 NoOp node for each device.
deps = []
def device_key(dev):
"""A sort key that allows None to be compared to strings."""
return "" if dev is None else dev
for dev in sorted(six.iterkeys(ops_on_device), key=device_key):
deps.append(_GroupControlDeps(dev, ops_on_device[dev]))
with ops.control_dependencies(deps):
return no_op(name=name)
@tf_export("tuple")
def tuple(tensors, name=None, control_inputs=None): # pylint: disable=redefined-builtin
"""Group tensors together.
This creates a tuple of tensors with the same values as the `tensors`
argument, except that the value of each tensor is only returned after the
values of all tensors have been computed.
`control_inputs` contains additional ops that have to finish before this op
finishes, but whose outputs are not returned.
This can be used as a "join" mechanism for parallel computations: all the
argument tensors can be computed in parallel, but the values of any tensor
returned by `tuple` are only available after all the parallel computations
are done.
See also @{tf.group$group} and
@{tf.control_dependencies$control_dependencies}.
Args:
tensors: A list of `Tensor`s or `IndexedSlices`, some entries can be `None`.
name: (optional) A name to use as a `name_scope` for the operation.
control_inputs: List of additional ops to finish before returning.
Returns:
Same as `tensors`.
Raises:
ValueError: If `tensors` does not contain any `Tensor` or `IndexedSlices`.
TypeError: If `control_inputs` is not a list of `Operation` or `Tensor`
objects.
"""
if context.executing_eagerly():
return tensors
with ops.name_scope(name, "tuple", tensors) as name:
tensors = [t if (isinstance(t, ops.Operation)
or tensor_util.is_tensor(t)
or t is None)
else ops.convert_to_tensor(t) for t in tensors]
gating_ops = [t if isinstance(t, ops.Operation) else t.op for t in tensors
if t is not None]
if control_inputs:
for c in control_inputs:
if isinstance(c, ops.Tensor):
c = c.op
elif not isinstance(c, ops.Operation):
raise TypeError("Control input must be Operation or Tensor: %s" % c)
gating_ops.append(c)
# Note that in order to ensure ordering in the pbtxt, we must take care to
# ensure the order here.
gating_ops = sorted(set(gating_ops), key=lambda op: op._id) # Uniquify ops.
if not gating_ops:
raise ValueError("Must have at least one Tensor: %s" % tensors)
gate = group(*gating_ops)
tpl = []
for t in tensors:
if tensor_util.is_tensor(t):
tpl.append(with_dependencies([gate], t))
elif isinstance(t, ops.Operation):
with ops.control_dependencies([gate]):
tpl.append(group(t))
else:
tpl.append(None)
return tpl
def _assert_at_most_n_true(predicates, n, msg):
"""Returns an Assert op that checks that at most n predicates are True.
Args:
predicates: list of bool scalar tensors.
n: maximum number of true predicates allowed.
msg: Error message.
"""
preds_c = array_ops.stack(predicates, name="preds_c")
num_true_conditions = math_ops.reduce_sum(
math_ops.cast(preds_c, dtypes.int32), name="num_true_conds")
condition = math_ops.less_equal(num_true_conditions,
constant_op.constant(n, name="n_true_conds"))
preds_names = ", ".join(getattr(p, "name", "?") for p in predicates)
error_msg = [
"%s: more than %d conditions (%s) evaluated as True:" %
(msg, n, preds_names), preds_c
]
return Assert(condition, data=error_msg, summarize=len(predicates))
def _case_create_default_action(predicates, actions):
"""Creates default action for a list of actions and their predicates.
It uses the input actions to select an arbitrary as default and makes sure
that corresponding predicates have valid values.
Args:
predicates: a list of bool scalar tensors
actions: a list of callable objects which return tensors.
Returns:
a callable
"""
k = len(predicates) - 1 # could pick any
predicate, action = predicates[k], actions[k]
other_predicates, other_actions = predicates[:k], actions[:k]
def default_action():
others_msg = ("Implementation error: "
"selected default action #%d was called, but some of other "
"predicates are True: " % k)
default_msg = ("Input error: "
"None of conditions evaluated as True:",
array_ops.stack(predicates, name="preds_c"))
with ops.control_dependencies([
_assert_at_most_n_true(other_predicates, n=0, msg=others_msg),
Assert(predicate, data=default_msg)
]):
return action()
return default_action, other_predicates, other_actions
def _case_verify_and_canonicalize_args(pred_fn_pairs, exclusive, name,
allow_python_preds):
"""Verifies input arguments for the case function.
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor,
and a callable which returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for the case operation.
allow_python_preds: if true, pred_fn_pairs may contain Python bools in
addition to boolean Tensors
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
Returns:
a tuple <list of scalar bool tensors, list of callables>.
"""
if not isinstance(pred_fn_pairs, (list, _basetuple, dict)):
raise TypeError("fns must be a list, tuple, or dict")
if isinstance(pred_fn_pairs, collections.OrderedDict):
pred_fn_pairs = pred_fn_pairs.items()
elif isinstance(pred_fn_pairs, dict):
pred_fn_pairs = sorted(pred_fn_pairs.items(), key=lambda item: item[0].name)
if not exclusive:
logging.warn("%s: An unordered dictionary of predicate/fn pairs was "
"provided, but exclusive=False. The order of conditional "
"tests is deterministic but not guaranteed.", name)
for pred_fn_pair in pred_fn_pairs:
if not isinstance(pred_fn_pair, _basetuple) or len(pred_fn_pair) != 2:
raise TypeError("Each entry in pred_fn_pairs must be a 2-tuple")
pred, fn = pred_fn_pair
if isinstance(pred, ops.Tensor):
if pred.dtype != dtypes.bool:
raise TypeError("pred must be Tensor of type bool: %s" % pred.name)
elif not allow_python_preds:
raise TypeError("pred must be a Tensor, got: %s" % pred)
elif not isinstance(pred, bool):
raise TypeError("pred must be a Tensor or bool, got: %s" % pred)
if not callable(fn):
raise TypeError("fn for pred %s must be callable." % pred.name)
predicates, actions = zip(*pred_fn_pairs)
return predicates, actions
def _case_helper(cond_fn, pred_fn_pairs, default,
exclusive, name, allow_python_preds=False, **cond_kwargs):
"""Implementation of case that allows for different cond functions.
Args:
cond_fn: method that has signature and semantics of `cond` above.
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor, and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
name: A name for this operation (optional).
allow_python_preds: if true, pred_fn_pairs may contain Python bools in
addition to boolean Tensors
**cond_kwargs: keyword arguments that will be passed to `cond_fn`.
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
predicates, actions = _case_verify_and_canonicalize_args(
pred_fn_pairs, exclusive, name, allow_python_preds)
with ops.name_scope(name, "case", [predicates]):
if default is None:
default, predicates, actions = _case_create_default_action(
predicates, actions)
fn = default
# To eval conditions in direct order we create nested conditions in reverse:
# cond_fn(c[0], true_fn=.., false_fn=cond_fn(c[1], ...))
for predicate, action in reversed(list(zip(predicates, actions))):
fn = functools.partial(
cond_fn, predicate, true_fn=action, false_fn=fn, **cond_kwargs)
if exclusive:
with ops.control_dependencies([
_assert_at_most_n_true(
predicates, n=1, msg="Input error: exclusive=True")
]):
return fn()
else:
return fn()
@tf_export("case")
def case(pred_fn_pairs,
default=None,
exclusive=False,
strict=False,
name="case"):
"""Create a case operation.
The `pred_fn_pairs` parameter is a dict or list of pairs of size N.
Each pair contains a boolean scalar tensor and a python callable that
creates the tensors to be returned if the boolean evaluates to True.
`default` is a callable generating a list of tensors. All the callables
in `pred_fn_pairs` as well as `default` (if provided) should return the same
number and types of tensors.
If `exclusive==True`, all predicates are evaluated, and an exception is
thrown if more than one of the predicates evaluates to `True`.
If `exclusive==False`, execution stops at the first predicate which
evaluates to True, and the tensors generated by the corresponding function
are returned immediately. If none of the predicates evaluate to True, this
operation returns the tensors generated by `default`.
`tf.case` supports nested structures as implemented in
`tensorflow.python.util.nest`. All of the callables must return the same
(possibly nested) value structure of lists, tuples, and/or named tuples.
Singleton lists and tuples form the only exceptions to this: when returned by
a callable, they are implicitly unpacked to single values. This
behavior is disabled by passing `strict=True`.
If an unordered dictionary is used for `pred_fn_pairs`, the order of the
conditional tests is not guaranteed. However, the order is guaranteed to be
deterministic, so that variables created in conditional branches are created
in fixed order across runs.
**Example 1:**
Pseudocode:
```
if (x < y) return 17;
else return 23;
```
Expressions:
```python
f1 = lambda: tf.constant(17)
f2 = lambda: tf.constant(23)
r = case([(tf.less(x, y), f1)], default=f2)
```
**Example 2:**
Pseudocode:
```
if (x < y && x > z) raise OpError("Only one predicate may evaluate true");
if (x < y) return 17;
else if (x > z) return 23;
else return -1;
```
Expressions:
```python
def f1(): return tf.constant(17)
def f2(): return tf.constant(23)
def f3(): return tf.constant(-1)
r = case({tf.less(x, y): f1, tf.greater(x, z): f2},
default=f3, exclusive=True)
```
Args:
pred_fn_pairs: Dict or list of pairs of a boolean scalar tensor and a
callable which returns a list of tensors.
default: Optional callable that returns a list of tensors.
exclusive: True iff at most one predicate is allowed to evaluate to `True`.
strict: A boolean that enables/disables 'strict' mode; see above.
name: A name for this operation (optional).
Returns:
The tensors returned by the first pair whose predicate evaluated to True, or
those returned by `default` if none does.
Raises:
TypeError: If `pred_fn_pairs` is not a list/dictionary.
TypeError: If `pred_fn_pairs` is a list but does not contain 2-tuples.
TypeError: If `fns[i]` is not callable for any i, or `default` is not
callable.
"""
return _case_helper(cond, pred_fn_pairs, default, exclusive, name,
allow_python_preds=False, strict=strict)
class XLAControlFlowContext(ControlFlowContext):
"""Base class for XLA and TPU control flow contexts."""
def __init__(self):
super(XLAControlFlowContext, self).__init__()
self._name = "XLAControlFlowContext"
def IsXLAContext(self):
return True
def AddOp(self, _):
pass
def AddValue(self, x):
return x
def from_control_flow_context_def(context_def, import_scope=None):
"""Deserializes `context_def` into the appropriate ControlFlowContext.
Args:
context_def: ControlFlowContextDef proto
import_scope: Optional `string`. Name scope to add.
Returns:
A ControlFlowContext subclass
"""
if context_def.HasField("cond_ctxt"):
return CondContext.from_proto(context_def.cond_ctxt,
import_scope=import_scope)
if context_def.HasField("while_ctxt"):
return WhileContext.from_proto(context_def.while_ctxt,
import_scope=import_scope)
raise NotImplementedError("Unknown ControlFlowContextDef field: %s"
% context_def.WhichOneof("ctxt"))
ops.register_proto_function(
ops.GraphKeys.COND_CONTEXT,
proto_type=control_flow_pb2.CondContextDef,
to_proto=CondContext.to_proto,
from_proto=CondContext.from_proto)
ops.register_proto_function(
ops.GraphKeys.WHILE_CONTEXT,
proto_type=control_flow_pb2.WhileContextDef,
to_proto=WhileContext.to_proto,
from_proto=WhileContext.from_proto)
|
{
"content_hash": "c48a5120b93984f407f9d1934d19ff20",
"timestamp": "",
"source": "github",
"line_count": 3719,
"max_line_length": 113,
"avg_line_length": 38.93143318096262,
"alnum_prop": 0.6640766372439324,
"repo_name": "nburn42/tensorflow",
"id": "ee024ce64a79de3aa326ce710b3f9daba25fb260",
"size": "145475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/control_flow_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "341132"
},
{
"name": "C++",
"bytes": "39824558"
},
{
"name": "CMake",
"bytes": "194702"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "590137"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33704964"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "426212"
}
],
"symlink_target": ""
}
|
"""
Compile YOLO-V2 and YOLO-V3 in DarkNet Models
=============================================
**Author**: `Siju Samuel <https://siju-samuel.github.io/>`_
This article is an introductory tutorial to deploy darknet models with TVM.
All the required models and libraries will be downloaded from the internet by the script.
This script runs the YOLO-V2 and YOLO-V3 Model with the bounding boxes
Darknet parsing have dependancy with CFFI and CV2 library
Please install CFFI and CV2 before executing this script
.. code-block:: bash
pip install cffi
pip install opencv-python
"""
# numpy and matplotlib
import numpy as np
import matplotlib.pyplot as plt
import sys
# tvm, relay
import tvm
from tvm import te
from tvm import relay
from ctypes import *
from tvm.contrib.download import download_testdata
from tvm.relay.testing.darknet import __darknetffi__
import tvm.relay.testing.yolo_detection
import tvm.relay.testing.darknet
######################################################################
# Choose the model
# -----------------------
# Models are: 'yolov2', 'yolov3' or 'yolov3-tiny'
# Model name
MODEL_NAME = "yolov3"
######################################################################
# Download required files
# -----------------------
# Download cfg and weights file if first time.
CFG_NAME = MODEL_NAME + ".cfg"
WEIGHTS_NAME = MODEL_NAME + ".weights"
REPO_URL = "https://github.com/dmlc/web-data/blob/main/darknet/"
CFG_URL = REPO_URL + "cfg/" + CFG_NAME + "?raw=true"
WEIGHTS_URL = "https://pjreddie.com/media/files/" + WEIGHTS_NAME
cfg_path = download_testdata(CFG_URL, CFG_NAME, module="darknet")
weights_path = download_testdata(WEIGHTS_URL, WEIGHTS_NAME, module="darknet")
# Download and Load darknet library
if sys.platform in ["linux", "linux2"]:
DARKNET_LIB = "libdarknet2.0.so"
DARKNET_URL = REPO_URL + "lib/" + DARKNET_LIB + "?raw=true"
elif sys.platform == "darwin":
DARKNET_LIB = "libdarknet_mac2.0.so"
DARKNET_URL = REPO_URL + "lib_osx/" + DARKNET_LIB + "?raw=true"
else:
err = "Darknet lib is not supported on {} platform".format(sys.platform)
raise NotImplementedError(err)
lib_path = download_testdata(DARKNET_URL, DARKNET_LIB, module="darknet")
DARKNET_LIB = __darknetffi__.dlopen(lib_path)
net = DARKNET_LIB.load_network(cfg_path.encode("utf-8"), weights_path.encode("utf-8"), 0)
dtype = "float32"
batch_size = 1
data = np.empty([batch_size, net.c, net.h, net.w], dtype)
shape_dict = {"data": data.shape}
print("Converting darknet to relay functions...")
mod, params = relay.frontend.from_darknet(net, dtype=dtype, shape=data.shape)
######################################################################
# Import the graph to Relay
# -------------------------
# compile the model
target = tvm.target.Target("llvm", host="llvm")
dev = tvm.cpu(0)
data = np.empty([batch_size, net.c, net.h, net.w], dtype)
shape = {"data": data.shape}
print("Compiling the model...")
with tvm.transform.PassContext(opt_level=3):
lib = relay.build(mod, target=target, params=params)
[neth, netw] = shape["data"][2:] # Current image shape is 608x608
######################################################################
# Load a test image
# -----------------
test_image = "dog.jpg"
print("Loading the test image...")
img_url = REPO_URL + "data/" + test_image + "?raw=true"
img_path = download_testdata(img_url, test_image, "data")
data = tvm.relay.testing.darknet.load_image(img_path, netw, neth)
######################################################################
# Execute on TVM Runtime
# ----------------------
# The process is no different from other examples.
from tvm.contrib import graph_executor
m = graph_executor.GraphModule(lib["default"](dev))
# set inputs
m.set_input("data", tvm.nd.array(data.astype(dtype)))
# execute
print("Running the test image...")
# detection
# thresholds
thresh = 0.5
nms_thresh = 0.45
m.run()
# get outputs
tvm_out = []
if MODEL_NAME == "yolov2":
layer_out = {}
layer_out["type"] = "Region"
# Get the region layer attributes (n, out_c, out_h, out_w, classes, coords, background)
layer_attr = m.get_output(2).numpy()
layer_out["biases"] = m.get_output(1).numpy()
out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3])
layer_out["output"] = m.get_output(0).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
layer_out["coords"] = layer_attr[5]
layer_out["background"] = layer_attr[6]
tvm_out.append(layer_out)
elif MODEL_NAME == "yolov3":
for i in range(3):
layer_out = {}
layer_out["type"] = "Yolo"
# Get the yolo layer attributes (n, out_c, out_h, out_w, classes, total)
layer_attr = m.get_output(i * 4 + 3).numpy()
layer_out["biases"] = m.get_output(i * 4 + 2).numpy()
layer_out["mask"] = m.get_output(i * 4 + 1).numpy()
out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3])
layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
tvm_out.append(layer_out)
elif MODEL_NAME == "yolov3-tiny":
for i in range(2):
layer_out = {}
layer_out["type"] = "Yolo"
# Get the yolo layer attributes (n, out_c, out_h, out_w, classes, total)
layer_attr = m.get_output(i * 4 + 3).numpy()
layer_out["biases"] = m.get_output(i * 4 + 2).numpy()
layer_out["mask"] = m.get_output(i * 4 + 1).numpy()
out_shape = (layer_attr[0], layer_attr[1] // layer_attr[0], layer_attr[2], layer_attr[3])
layer_out["output"] = m.get_output(i * 4).numpy().reshape(out_shape)
layer_out["classes"] = layer_attr[4]
tvm_out.append(layer_out)
thresh = 0.560
# do the detection and bring up the bounding boxes
img = tvm.relay.testing.darknet.load_image_color(img_path)
_, im_h, im_w = img.shape
dets = tvm.relay.testing.yolo_detection.fill_network_boxes(
(netw, neth), (im_w, im_h), thresh, 1, tvm_out
)
last_layer = net.layers[net.n - 1]
tvm.relay.testing.yolo_detection.do_nms_sort(dets, last_layer.classes, nms_thresh)
coco_name = "coco.names"
coco_url = REPO_URL + "data/" + coco_name + "?raw=true"
font_name = "arial.ttf"
font_url = REPO_URL + "data/" + font_name + "?raw=true"
coco_path = download_testdata(coco_url, coco_name, module="data")
font_path = download_testdata(font_url, font_name, module="data")
with open(coco_path) as f:
content = f.readlines()
names = [x.strip() for x in content]
tvm.relay.testing.yolo_detection.show_detections(img, dets, thresh, names, last_layer.classes)
tvm.relay.testing.yolo_detection.draw_detections(
font_path, img, dets, thresh, names, last_layer.classes
)
plt.imshow(img.transpose(1, 2, 0))
plt.show()
|
{
"content_hash": "bcd0b9598016c8b25146908714bb3bd0",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 97,
"avg_line_length": 36.494623655913976,
"alnum_prop": 0.6218326458456099,
"repo_name": "Laurawly/tvm-1",
"id": "232058641ab008d2c9a2f666ff8a3efa28f9cba5",
"size": "7573",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gallery/how_to/compile_models/from_darknet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "4093"
},
{
"name": "C",
"bytes": "351611"
},
{
"name": "C++",
"bytes": "11660999"
},
{
"name": "CMake",
"bytes": "228510"
},
{
"name": "Cuda",
"bytes": "16902"
},
{
"name": "Cython",
"bytes": "28979"
},
{
"name": "Go",
"bytes": "111527"
},
{
"name": "HTML",
"bytes": "2664"
},
{
"name": "Java",
"bytes": "199950"
},
{
"name": "JavaScript",
"bytes": "15305"
},
{
"name": "Makefile",
"bytes": "67149"
},
{
"name": "Objective-C",
"bytes": "24259"
},
{
"name": "Objective-C++",
"bytes": "87655"
},
{
"name": "Python",
"bytes": "16256580"
},
{
"name": "RenderScript",
"bytes": "1895"
},
{
"name": "Rust",
"bytes": "391076"
},
{
"name": "Shell",
"bytes": "228674"
},
{
"name": "TypeScript",
"bytes": "94385"
}
],
"symlink_target": ""
}
|
"""
This file was automatically generated.
"""
import six
class ajaxSpider(object):
def __init__(self, zap):
self.zap = zap
@property
def status(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/status/')))
def results(self, start=None, count=None):
"""
This component is optional and therefore the API will only work if it is installed
"""
params = {}
if start is not None:
params['start'] = start
if count is not None:
params['count'] = count
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/results/', params)))
@property
def number_of_results(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/numberOfResults/')))
@property
def full_results(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/fullResults/')))
@property
def option_browser_id(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionBrowserId/')))
@property
def option_event_wait(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionEventWait/')))
@property
def option_max_crawl_depth(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionMaxCrawlDepth/')))
@property
def option_max_crawl_states(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionMaxCrawlStates/')))
@property
def option_max_duration(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionMaxDuration/')))
@property
def option_number_of_browsers(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionNumberOfBrowsers/')))
@property
def option_reload_wait(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionReloadWait/')))
@property
def option_click_default_elems(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionClickDefaultElems/')))
@property
def option_click_elems_once(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionClickElemsOnce/')))
@property
def option_random_inputs(self):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/view/optionRandomInputs/')))
def scan(self, url=None, inscope=None, contextname=None, subtreeonly=None, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
params = {'apikey': apikey}
if url is not None:
params['url'] = url
if inscope is not None:
params['inScope'] = inscope
if contextname is not None:
params['contextName'] = contextname
if subtreeonly is not None:
params['subtreeOnly'] = subtreeonly
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/scan/', params)))
def scan_as_user(self, contextname, username, url=None, subtreeonly=None, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
params = {'contextName': contextname, 'userName': username, 'apikey': apikey}
if url is not None:
params['url'] = url
if subtreeonly is not None:
params['subtreeOnly'] = subtreeonly
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/scanAsUser/', params)))
def stop(self, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/stop/', {'apikey': apikey})))
def set_option_browser_id(self, string, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionBrowserId/', {'String': string, 'apikey': apikey})))
def set_option_click_default_elems(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionClickDefaultElems/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_click_elems_once(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionClickElemsOnce/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_event_wait(self, integer, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionEventWait/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_crawl_depth(self, integer, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionMaxCrawlDepth/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_crawl_states(self, integer, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionMaxCrawlStates/', {'Integer': integer, 'apikey': apikey})))
def set_option_max_duration(self, integer, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionMaxDuration/', {'Integer': integer, 'apikey': apikey})))
def set_option_number_of_browsers(self, integer, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionNumberOfBrowsers/', {'Integer': integer, 'apikey': apikey})))
def set_option_random_inputs(self, boolean, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionRandomInputs/', {'Boolean': boolean, 'apikey': apikey})))
def set_option_reload_wait(self, integer, apikey=''):
"""
This component is optional and therefore the API will only work if it is installed
"""
return six.next(six.itervalues(self.zap._request(self.zap.base + 'ajaxSpider/action/setOptionReloadWait/', {'Integer': integer, 'apikey': apikey})))
|
{
"content_hash": "8c21e670d31de28e6df4e120057166a5",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 163,
"avg_line_length": 43.951219512195124,
"alnum_prop": 0.6478357380688125,
"repo_name": "Woolworths/zap-api-python",
"id": "3a6885cc3c0c558519f1e28dd43923fdbbe4ad7f",
"size": "9723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/zapv2/ajaxSpider.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "152889"
}
],
"symlink_target": ""
}
|
"""This example removes the user from all its teams.
To determine which users exist, run get_all_users.py.
Tags: UserTeamAssociationService.performUserTeamAssociationAction
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.dfp import DfpUtils
# Initialize client object.
client = DfpClient(path=os.path.join('..', '..', '..', '..'))
# Initialize appropriate service.
user_team_association_service = client.GetService(
'UserTeamAssociationService', version='v201302')
user_id = 'INSERT_USER_ID_HERE'
# Create filter text to select user team associations by the user ID.
values = [{
'key': 'userId',
'value': {
'xsi_type': 'NumberValue',
'value': user_id
}
}]
query = 'WHERE userId = :userId'
# Get user team associations by statement.
user_team_associations = DfpUtils.GetAllEntitiesByStatementWithService(
user_team_association_service, query=query, bind_vars=values)
for user_team_association in user_team_associations:
print ('User team association between user with ID \'%s\' and team with '
'ID \'%s\' will be deleted.' % (user_team_association['userId'],
user_team_association['teamId']))
print ('Number of teams that the user will be removed from: %s' %
len(user_team_associations))
# Perform action.
result = user_team_association_service.PerformUserTeamAssociationAction(
{'type': 'DeleteUserTeamAssociations'},
{'query': query, 'values': values})[0]
# Display results.
if result and int(result['numChanges']) > 0:
print ('Number of teams that the user was removed from: %s'
% result['numChanges'])
else:
print 'No user team associations were deleted.'
|
{
"content_hash": "8c5182322149bcb30dd09db1727350e6",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 32.81967213114754,
"alnum_prop": 0.6918081918081919,
"repo_name": "donspaulding/adspygoogle",
"id": "aa5520e01cb922d308c92fa60ce4813a23845297",
"size": "2620",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/adspygoogle/dfp/v201302/delete_user_team_associations.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3734067"
},
{
"name": "Shell",
"bytes": "603"
}
],
"symlink_target": ""
}
|
from random import randint
def binary_search(array, element):
"""
Perform Binary Search by Iterative Method.
:param array: Iterable of elements
:param element: element to search
:return: returns value of index of element (if found) else return None
"""
left = 0
right = len(array) - 1
while left <= right:
mid = (right + left) // 2
# indices of a list must be integer
if array[mid] == element:
return mid
elif array[mid] > element:
right = mid - 1
else:
left = mid + 1
return None
def binary_search_recursive(array, element, left=0, right=None):
"""
Perform Binary Search by Iterative Method.
:param array: Iterable of elements
:param left: start limit of array
:param right: end limit of array
:param element: element to be searched
:return: returns value of index of element (if found) else return None
"""
right = len(array) - 1 if right is None else right
if right >= left:
mid = (right + left) // 2
if array[mid] == element:
return mid
elif array[mid] > element:
return binary_search_recursive(array, element, left, mid - 1)
else:
return binary_search_recursive(array, element, mid + 1, right)
else:
return None
def main():
size = 100 # user can change it
domain = 100 # user can change it
array = [1, 9, 11, 13, 5, 7, 8, 5, 17, 1156, 114]
array.sort()
element = 13
result = binary_search_recursive(array, element)
if result is None:
print('Recursive Binary Search : Element not present in array')
else:
print('Recursive Binary Search : Element is present at index', result)
result = binary_search(array, element)
if result is None:
print('Iterative Binary Search : Element not present in array')
else:
print('Iterative Binary Search : Element is present at index', result)
if __name__ == '__main__':
main()
|
{
"content_hash": "ef09f8ee576a90a8f979795a237f6b3a",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 30.014705882352942,
"alnum_prop": 0.6065654091131798,
"repo_name": "iiitv/algos",
"id": "71be43524ef0a87c0021d79e1a69104abe2fdcff",
"size": "2041",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "binary_search/binary_search.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "46001"
},
{
"name": "C#",
"bytes": "23392"
},
{
"name": "C++",
"bytes": "19868"
},
{
"name": "Go",
"bytes": "27067"
},
{
"name": "Java",
"bytes": "82941"
},
{
"name": "JavaScript",
"bytes": "26815"
},
{
"name": "Python",
"bytes": "57228"
},
{
"name": "Shell",
"bytes": "2325"
}
],
"symlink_target": ""
}
|
"""
Support for Wink binary sensors.
For more details about this platform, please refer to the documentation at
at https://home-assistant.io/components/binary_sensor.wink/
"""
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.sensor.wink import WinkDevice
from homeassistant.helpers.entity import Entity
DEPENDENCIES = ['wink']
# These are the available sensors mapped to binary_sensor class
SENSOR_TYPES = {
"opened": "opening",
"brightness": "light",
"vibration": "vibration",
"loudness": "sound",
"liquid_detected": "moisture",
"motion": "motion",
"presence": "occupancy",
"co_detected": "gas",
"smoke_detected": "smoke"
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Wink binary sensor platform."""
import pywink
for sensor in pywink.get_sensors():
if sensor.capability() in SENSOR_TYPES:
add_devices([WinkBinarySensorDevice(sensor, hass)])
for key in pywink.get_keys():
add_devices([WinkBinarySensorDevice(key, hass)])
for sensor in pywink.get_smoke_and_co_detectors():
add_devices([WinkBinarySensorDevice(sensor, hass)])
for hub in pywink.get_hubs():
add_devices([WinkHub(hub, hass)])
for remote in pywink.get_remotes():
add_devices([WinkRemote(remote, hass)])
for button in pywink.get_buttons():
add_devices([WinkButton(button, hass)])
for gang in pywink.get_gangs():
add_devices([WinkGang(gang, hass)])
class WinkBinarySensorDevice(WinkDevice, BinarySensorDevice, Entity):
"""Representation of a Wink binary sensor."""
def __init__(self, wink, hass):
"""Initialize the Wink binary sensor."""
super().__init__(wink, hass)
self._unit_of_measurement = self.wink.unit()
self.capability = self.wink.capability()
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.wink.state()
@property
def sensor_class(self):
"""Return the class of this sensor, from SENSOR_CLASSES."""
return SENSOR_TYPES.get(self.capability)
class WinkHub(WinkDevice, BinarySensorDevice, Entity):
"""Representation of a Wink Hub."""
def __init(self, wink, hass):
"""Initialize the hub sensor."""
WinkDevice.__init__(self, wink, hass)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.wink.state()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
'update needed': self.wink.update_needed(),
'firmware version': self.wink.firmware_version()
}
class WinkRemote(WinkDevice, BinarySensorDevice, Entity):
"""Representation of a Wink Lutron Connected bulb remote."""
def __init(self, wink, hass):
"""Initialize the hub sensor."""
WinkDevice.__init__(self, wink, hass)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.wink.state()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
'button_on_pressed': self.wink.button_on_pressed(),
'button_off_pressed': self.wink.button_off_pressed(),
'button_up_pressed': self.wink.button_up_pressed(),
'button_down_pressed': self.wink.button_down_pressed()
}
class WinkButton(WinkDevice, BinarySensorDevice, Entity):
"""Representation of a Wink Relay button."""
def __init(self, wink, hass):
"""Initialize the hub sensor."""
WinkDevice.__init__(self, wink, hass)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.wink.state()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
'pressed': self.wink.pressed(),
'long_pressed': self.wink.long_pressed()
}
class WinkGang(WinkDevice, BinarySensorDevice, Entity):
"""Representation of a Wink Relay gang."""
def __init(self, wink, hass):
"""Initialize the gang sensor."""
WinkDevice.__init__(self, wink, hass)
@property
def is_on(self):
"""Return true if the gang is connected."""
return self.wink.state()
|
{
"content_hash": "55cefc0cfdc8ef7b29af1a369f97a785",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 74,
"avg_line_length": 29.613333333333333,
"alnum_prop": 0.6307969383160739,
"repo_name": "eagleamon/home-assistant",
"id": "19ecb853536586f0b7fb24bd866f017b2f119608",
"size": "4442",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/binary_sensor/wink.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1510047"
},
{
"name": "Python",
"bytes": "5066084"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "14079"
}
],
"symlink_target": ""
}
|
from authentication.nodes import UserNode
from django.shortcuts import render
from authentication.models import UserProfile
from django.http import HttpResponse,HttpResponseRedirect
from .forms import UserProfileForm
# Create your views here.
def home(request):
user = UserNode.nodes.get(uid=UserProfile.objects.get(user=request.user).id)
form=UserProfileForm()
return render(request,'profile.html',{'form':form,'profile':user.get_profile()})
def edit(request):
profiles = UserProfile.objects.filter(user=request.user)
form=UserProfileForm(request.POST,request.FILES,instance=profiles[0])
form.save()
return HttpResponseRedirect('/profile/')
|
{
"content_hash": "c503fb3f1abdabf697f4af357d09340a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 84,
"avg_line_length": 44.6,
"alnum_prop": 0.7832585949177877,
"repo_name": "programmernoob1/MeeT",
"id": "18081e1772bbe6d8cd99f1d9179a5e5b8fb1520d",
"size": "669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "userprofile/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "4199"
},
{
"name": "HTML",
"bytes": "11568"
},
{
"name": "JavaScript",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "22572"
}
],
"symlink_target": ""
}
|
'''Handles all the data preparation including: feature engineering, dimensionality reduction, and clustering
Inspiration for the feature engineering had several sources:
http://trevorstephens.com/post/73461351896/titanic-getting-started-with-r-part-4-feature
http://triangleinequality.wordpress.com/2013/09/08/basic-feature-engineering-with-the-titanic-data/
http://www.sgzhaohang.com/blog/tag/kaggle/
'''
import re
import numpy as np
import pandas as pd
import random as rd
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
# Print options
np.set_printoptions(precision=4, threshold=10000, linewidth=160, edgeitems=999, suppress=True)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 160)
pd.set_option('expand_frame_repr', False)
pd.set_option('precision', 4)
def processCabin():
""" Generate features from the Cabin variable
Cabin numbers, when present, contain a single (or space-delimited list) cabin number that is composed of
a letter and number with no space or other character between. This is a sparse variable: < 30% is populated
"""
global df
# Replace missing values with "U0"
df['Cabin'][df.Cabin.isnull()] = 'U0'
# create feature for the alphabetical part of the cabin number
df['CabinLetter'] = df['Cabin'].map( lambda x : getCabinLetter(x))
df['CabinLetter'] = pd.factorize(df['CabinLetter'])[0]
# create binary features for each cabin letters
if keep_binary:
cletters = pd.get_dummies(df['CabinLetter']).rename(columns=lambda x: 'CabinLetter_' + str(x))
df = pd.concat([df, cletters], axis=1)
# create feature for the numerical part of the cabin number
df['CabinNumber'] = df['Cabin'].map( lambda x : getCabinNumber(x)).astype(int) + 1
# scale the number to process as a continuous feature
if keep_scaled:
scaler = preprocessing.StandardScaler()
df['CabinNumber_scaled'] = scaler.fit_transform(df['CabinNumber'])
def getCabinLetter(cabin):
"""
Find the letter component of the Cabin variable
"""
match = re.compile("([a-zA-Z]+)").search(cabin)
if match:
return match.group()
else:
return 'U'
def getCabinNumber(cabin):
"""
Find the number component of the Cabin variable
"""
match = re.compile("([0-9]+)").search(cabin)
if match:
return match.group()
else:
return 0
def processTicket():
"""
Generate features from the Ticket variable
"""
global df
df['TicketPrefix'] = df['Ticket'].map( lambda x : getTicketPrefix(x.upper()))
df['TicketPrefix'] = df['TicketPrefix'].map( lambda x: re.sub('[\.?\/?]', '', x) )
df['TicketPrefix'] = df['TicketPrefix'].map( lambda x: re.sub('STON', 'SOTON', x) )
#print len(df['TicketPrefix'].unique()), "ticket codes:", np.sort(df['TicketPrefix'].unique())
df['TicketPrefixId'] = pd.factorize(df['TicketPrefix'])[0]
# create binary features for each cabin letters
if keep_binary:
prefixes = pd.get_dummies(df['TicketPrefix']).rename(columns=lambda x: 'TicketPrefix_' + str(x))
df = pd.concat([df, prefixes], axis=1)
df.drop(['TicketPrefix'], axis=1, inplace=True)
df['TicketNumber'] = df['Ticket'].map( lambda x: getTicketNumber(x) )
df['TicketNumberDigits'] = df['TicketNumber'].map( lambda x: len(x) ).astype(np.int)
df['TicketNumberStart'] = df['TicketNumber'].map( lambda x: x[0:1] ).astype(np.int)
#print np.sort(df.TicketNumberStart.unique())
df['TicketNumber'] = df.TicketNumber.astype(np.int)
#print np.sort(df['TicketNumber'])
if keep_scaled:
scaler = preprocessing.StandardScaler()
df['TicketNumber_scaled'] = scaler.fit_transform(df['TicketNumber'])
def getTicketPrefix(ticket):
"""
Find the letter component of the ticket variable)
"""
match = re.compile("([a-zA-Z\.\/]+)").search(ticket)
if match:
return match.group()
else:
return 'U'
### Find the numerical component of the ticket variable)
def getTicketNumber(ticket):
match = re.compile("([\d]+$)").search(ticket)
if match:
return match.group()
else:
return '0'
### Generate features from the ticket price
def processFare():
global df
# replace missing values as the median fare. Currently the datasets only contain one missing Fare value
df['Fare'][ np.isnan(df['Fare']) ] = df['Fare'].median()
# zero values cause problems with our division interaction variables so set to 1/10th of the lowest fare
df['Fare'][ np.where(df['Fare']==0)[0] ] = df['Fare'][ df['Fare'].nonzero()[0] ].min() / 10
# bin into quintiles for binary features
df['Fare_bin'] = pd.qcut(df['Fare'], 4)
if keep_binary:
df = pd.concat([df, pd.get_dummies(df['Fare_bin']).rename(columns=lambda x: 'Fare_' + str(x))], axis=1)
if keep_bins:
df['Fare_bin_id'] = pd.factorize(df['Fare_bin'])[0]+1
# center and scale the fare to use as a continuous variable
if keep_scaled:
scaler = preprocessing.StandardScaler()
df['Fare_scaled'] = scaler.fit_transform(df['Fare'])
if keep_bins and keep_scaled:
scaler = preprocessing.StandardScaler()
df['Fare_bin_id_scaled'] = scaler.fit_transform(df['Fare_bin_id'])
if not keep_strings:
df.drop('Fare_bin', axis=1, inplace=True)
### Build binary features from 3-valued categorical feature
def processEmbarked():
global df
# Replace missing values with most common port, and create binary features
df.Embarked[ df.Embarked.isnull() ] = df.Embarked.dropna().mode().values
# Lets turn this into a number so it conforms to decision tree feature requirements
df['Embarked'] = pd.factorize(df['Embarked'])[0]
# Create binary features for each port
if keep_binary:
df = pd.concat([df, pd.get_dummies(df['Embarked']).rename(columns=lambda x: 'Embarked_' + str(x))], axis=1)
### Generate features based on the passenger class
def processPClass():
global df
# Replace missing values with mode
df.Pclass[ df.Pclass.isnull() ] = df.Pclass.dropna().mode().values
# create binary features
if keep_binary:
df = pd.concat([df, pd.get_dummies(df['Pclass']).rename(columns=lambda x: 'Pclass_' + str(x))], axis=1)
if keep_scaled:
scaler = preprocessing.StandardScaler()
df['Pclass_scaled'] = scaler.fit_transform(df['Pclass'])
### Generate features from the SibSp and Parch variables
def processFamily():
global df
# interaction variables require no zeros, so let's just bump everything
df['SibSp'] = df['SibSp'] + 1
df['Parch'] = df['Parch'] + 1
# First process scaling
if keep_scaled:
scaler = preprocessing.StandardScaler()
df['SibSp_scaled'] = scaler.fit_transform(df['SibSp'])
df['Parch_scaled'] = scaler.fit_transform(df['Parch'])
# Then build binary features
if keep_binary:
sibsps = pd.get_dummies(df['SibSp']).rename(columns=lambda x: 'SibSp_' + str(x))
parchs = pd.get_dummies(df['Parch']).rename(columns=lambda x: 'Parch_' + str(x))
df = pd.concat([df, sibsps, parchs], axis=1)
### Convert the Sex variable from a string to binary
def processSex():
global df
df['Gender'] = np.where(df['Sex'] == 'male', 1, 0)
### Generate features from the Name variable
def processName():
global df
# how many different names do they have?
df['Names'] = df['Name'].map(lambda x: len(re.split(' ', x)))
# what is each person's title?
df['Title'] = df['Name'].map(lambda x: re.compile(", (.*?)\.").findall(x)[0])
# group low-occuring, related titles together
df['Title'][df.Title == 'Jonkheer'] = 'Master'
df['Title'][df.Title.isin(['Ms','Mlle'])] = 'Miss'
df['Title'][df.Title == 'Mme'] = 'Mrs'
df['Title'][df.Title.isin(['Capt', 'Don', 'Major', 'Col', 'Sir'])] = 'Sir'
df['Title'][df.Title.isin(['Dona', 'Lady', 'the Countess'])] = 'Lady'
# Build binary features
if keep_binary:
df = pd.concat([df, pd.get_dummies(df['Title']).rename(columns=lambda x: 'Title_' + str(x))], axis=1)
# process scaling
if keep_scaled:
scaler = preprocessing.StandardScaler()
df['Names_scaled'] = scaler.fit_transform(df['Names'])
if keep_bins:
df['Title_id'] = pd.factorize(df['Title'])[0]+1
if keep_bins and keep_scaled:
scaler = preprocessing.StandardScaler()
df['Title_id_scaled'] = scaler.fit_transform(df['Title_id'])
### Generate features from the Age variable
def processAge():
global df
setMissingAges()
# center the mean and scale to unit variance
if keep_scaled:
scaler = preprocessing.StandardScaler()
df['Age_scaled'] = scaler.fit_transform(df['Age'])
# have a feature for children
df['isChild'] = np.where(df.Age < 13, 1, 0)
# bin into quartiles and create binary features
df['Age_bin'] = pd.qcut(df['Age'], 4)
if keep_binary:
df = pd.concat([df, pd.get_dummies(df['Age_bin']).rename(columns=lambda x: 'Age_' + str(x))], axis=1)
if keep_bins:
df['Age_bin_id'] = pd.factorize(df['Age_bin'])[0]+1
if keep_bins and keep_scaled:
scaler = preprocessing.StandardScaler()
df['Age_bin_id_scaled'] = scaler.fit_transform(df['Age_bin_id'])
if not keep_strings:
df.drop('Age_bin', axis=1, inplace=True)
### Populate missing ages using a RandomForestClassifier
def setMissingAges():
global df
age_df = df[['Age','Embarked','Fare', 'Parch', 'SibSp', 'Title_id','Pclass','Names','CabinLetter']]
X = age_df.loc[ (df.Age.notnull()) ].values[:, 1::]
y = age_df.loc[ (df.Age.notnull()) ].values[:, 0]
rtr = RandomForestRegressor(n_estimators=2000, n_jobs=-1)
rtr.fit(X, y)
predictedAges = rtr.predict(age_df.loc[ (df.Age.isnull()) ].values[:, 1::])
df.loc[ (df.Age.isnull()), 'Age' ] = predictedAges
### Keep the raw list until the very end even if raw values are not retained so that interaction
### parameters can be created
def processDrops():
global df
rawDropList = ['Name', 'Names', 'Title', 'Sex', 'SibSp', 'Parch', 'Pclass', 'Embarked', \
'Cabin', 'CabinLetter', 'CabinNumber', 'Age', 'Fare', 'Ticket', 'TicketNumber']
stringsDropList = ['Title', 'Name', 'Cabin', 'Ticket', 'Sex', 'Ticket', 'TicketNumber']
if not keep_raw:
df.drop(rawDropList, axis=1, inplace=True)
elif not keep_strings:
df.drop(stringsDropList, axis=1, inplace=True)
def getDataSets(binary=False, bins=False, scaled=False, strings=False, raw=True, pca=False, balanced=False):
"""
Performs all feature engineering tasks including populating missing values, generating binary categorical
features, scaling, and other transformations. The boolean parameters of this function will allow fine-grained
control of what types of features to return, so that it can be used by multiple ML algorithms
Parameters
==========
binary - boolean
whether or not to include binary features in the data set
bins - boolean
whether or not to include binned features in the data set
scaled - boolean
whether or not to include scaled features in the data set
strings - boolean
whether or not to include features that are strings in the data set
raw - boolean
whether or not to include raw features in the data set
pca - boolean
whether or not to perform PCA on the data set
balanced - boolean
whether or not to perform up sampling on the survived examples to balance the class distributions
Returns
=======
input_df - array-like
The labeled training data
submit_df - array-like
The unlabled test data to predict and submit
"""
global keep_binary, keep_bins, keep_scaled, keep_raw, keep_strings, df
keep_binary = binary
keep_bins = bins
keep_scaled = scaled
keep_raw = raw
keep_strings = strings
# read in the training and testing data into Pandas.DataFrame objects
input_df = pd.read_csv('../../data/train.csv', header=0)
submit_df = pd.read_csv('../../data/test.csv', header=0)
# merge the two DataFrames into one
df = pd.concat([input_df, submit_df])
# re-number the combined data set so there aren't duplicate indexes
df.reset_index(inplace=True)
# reset_index() generates a new column that we don't want, so let's get rid of it
df.drop('index', axis=1, inplace=True)
# the remaining columns need to be reindexed so we can access the first column at '0' instead of '1'
df = df.reindex_axis(input_df.columns, axis=1)
# process the individual variables present in the raw data
processCabin()
processTicket()
processName()
processFare()
processEmbarked()
processFamily()
processSex()
processPClass()
processAge()
processDrops()
# Move the survived column back to the first position
columns_list = list(df.columns.values)
columns_list.remove('Survived')
new_col_list = list(['Survived'])
new_col_list.extend(columns_list)
df = df.reindex(columns=new_col_list)
print "Starting with", df.columns.size, "manually generated features...\n", df.columns.values
#*********************************************************************************************************
# Automated feature generation based on basic math on scaled features
numerics = df.loc[:, ['Age_scaled', 'Fare_scaled', 'Pclass_scaled', 'Parch_scaled', 'SibSp_scaled',
'Names_scaled', 'CabinNumber_scaled', 'Age_bin_id_scaled', 'Fare_bin_id_scaled']]
print "\nFeatures used for automated feature generation:\n", numerics.head(10)
new_fields_count = 0
for i in range(0, numerics.columns.size-1):
for j in range(0, numerics.columns.size-1):
if i <= j:
name = str(numerics.columns.values[i]) + "*" + str(numerics.columns.values[j])
df = pd.concat([df, pd.Series(numerics.iloc[:,i] * numerics.iloc[:,j], name=name)], axis=1)
new_fields_count += 1
if i < j:
name = str(numerics.columns.values[i]) + "+" + str(numerics.columns.values[j])
df = pd.concat([df, pd.Series(numerics.iloc[:,i] + numerics.iloc[:,j], name=name)], axis=1)
new_fields_count += 1
if not i == j:
name = str(numerics.columns.values[i]) + "/" + str(numerics.columns.values[j])
df = pd.concat([df, pd.Series(numerics.iloc[:,i] / numerics.iloc[:,j], name=name)], axis=1)
name = str(numerics.columns.values[i]) + "-" + str(numerics.columns.values[j])
df = pd.concat([df, pd.Series(numerics.iloc[:,i] - numerics.iloc[:,j], name=name)], axis=1)
new_fields_count += 2
print "\n", new_fields_count, "new features generated"
#*********************************************************************************************************
# Use Spearman correlation to remove highly correlated features
# calculate the correlation matrix
df_corr = df.drop(['Survived', 'PassengerId'],axis=1).corr(method='spearman')
# create a mask to ignore self-
mask = np.ones(df_corr.columns.size) - np.eye(df_corr.columns.size)
df_corr = mask * df_corr
drops = []
# loop through each variable
for col in df_corr.columns.values:
# if we've already determined to drop the current variable, continue
if np.in1d([col],drops):
continue
# find all the variables that are highly correlated with the current variable
# and add them to the drop list
corr = df_corr[abs(df_corr[col]) > 0.98].index
#print col, "highly correlated with:", corr
drops = np.union1d(drops, corr)
print "\nDropping", drops.shape[0], "highly correlated features...\n" #, drops
df.drop(drops, axis=1, inplace=True)
#*********************************************************************************************************
# Split the data sets apart again, perform PCA/clustering/class balancing if necessary
#
input_df = df[:input_df.shape[0]]
submit_df = df[input_df.shape[0]:]
if pca:
print "reducing and clustering now..."
input_df, submit_df = reduceAndCluster(input_df, submit_df)
else:
# drop the empty 'Survived' column for the test set that was created during set concatentation
submit_df.drop('Survived', axis=1, inplace=1)
print "\n", input_df.columns.size, "initial features generated...\n" #, input_df.columns.values
if balanced:
# Undersample training examples of passengers who did not survive
print 'Perished data shape:', input_df[input_df.Survived==0].shape
print 'Survived data shape:', input_df[input_df.Survived==1].shape
perished_sample = rd.sample(input_df[input_df.Survived==0].index, input_df[input_df.Survived==1].shape[0])
input_df = pd.concat([input_df.ix[perished_sample], input_df[input_df.Survived==1]])
input_df.sort(inplace=True)
print 'New even class training shape:', input_df.shape
return input_df, submit_df
def reduceAndCluster(input_df, submit_df, clusters=3):
"""
Takes the train and test data frames and performs dimensionality reduction with PCA and clustering
This was part of some experimentation and wasn't used for top scoring submissions. Leaving it in for reference
"""
# join the full data together
df = pd.concat([input_df, submit_df])
df.reset_index(inplace=True)
df.drop('index', axis=1, inplace=True)
df = df.reindex_axis(input_df.columns, axis=1)
# Series of labels
survivedSeries = pd.Series(df['Survived'], name='Survived')
print df.head()
# Split into feature and label arrays
X = df.values[:, 1::]
y = df.values[:, 0]
print X[0:5]
# Minimum percentage of variance we want to be described by the resulting transformed components
variance_pct = .99
# Create PCA object
pca = PCA(n_components=variance_pct)
# Transform the initial features
X_transformed = pca.fit_transform(X,y)
# Create a data frame from the PCA'd data
pcaDataFrame = pd.DataFrame(X_transformed)
print pcaDataFrame.shape[1], " components describe ", str(variance_pct)[1:], "% of the variance"
# use basic clustering to group similar examples and save the cluster ID for each example in train and test
kmeans = KMeans(n_clusters=clusters, random_state=np.random.RandomState(4), init='random')
#==============================================================================================================
# # Perform clustering on labeled AND unlabeled data
# clusterIds = kmeans.fit_predict(X_pca)
#==============================================================================================================
# Perform clustering on labeled data and then predict clusters for unlabeled data
trainClusterIds = kmeans.fit_predict(X_transformed[:input_df.shape[0]])
print "clusterIds shape for training data: ", trainClusterIds.shape
#print "trainClusterIds: ", trainClusterIds
testClusterIds = kmeans.predict(X_transformed[input_df.shape[0]:])
print "clusterIds shape for test data: ", testClusterIds.shape
#print "testClusterIds: ", testClusterIds
clusterIds = np.concatenate([trainClusterIds, testClusterIds])
print "all clusterIds shape: ", clusterIds.shape
#print "clusterIds: ", clusterIds
# construct the new DataFrame comprised of "Survived", "ClusterID", and the PCA features
clusterIdSeries = pd.Series(clusterIds, name='ClusterId')
df = pd.concat([survivedSeries, clusterIdSeries, pcaDataFrame], axis=1)
# split into separate input and test sets again
input_df = df[:input_df.shape[0]]
submit_df = df[input_df.shape[0]:]
submit_df.reset_index(inplace=True)
submit_df.drop('index', axis=1, inplace=True)
submit_df.drop('Survived', axis=1, inplace=1)
return input_df, submit_df
if __name__ == '__main__':
"""
Test script to make sure everything is running about right
I did some experiments with clustering and trying to build separate models for each cluster, but I couldn't
get even sized clusters even with significant tweaking
"""
train, test = getDataSets(bins=True, scaled=True, binary=True)
drop_list = ['PassengerId']
train.drop(drop_list, axis=1, inplace=1)
test.drop(drop_list, axis=1, inplace=1)
train, test = reduceAndCluster(train, test)
print "Labeled survived counts :\n", pd.value_counts(train['Survived'])/train.shape[0]
print "Labeled cluster counts :\n", pd.value_counts(train['ClusterId'])/train.shape[0]
print "Unlabeled cluster counts:\n", pd.value_counts(test['ClusterId'])/test.shape[0]
print train.columns.values
|
{
"content_hash": "4889559ef573b75490d54095d579b3a5",
"timestamp": "",
"source": "github",
"line_count": 567,
"max_line_length": 115,
"avg_line_length": 39.22398589065256,
"alnum_prop": 0.6066546762589928,
"repo_name": "davenovelli/SDPAKaggle",
"id": "f1cc20dd7ee68c4b1b7edd79bca9a0073701ffcf",
"size": "22240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/davenovelli/loaddata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "31426"
},
{
"name": "Python",
"bytes": "22240"
}
],
"symlink_target": ""
}
|
import time
from inspect import isclass
from flask import Blueprint, current_app, request
from flask_login import current_user, login_required
from flask_restful import Resource, abort
from redash import settings
from redash.authentication import current_org
from redash.models import db
from redash.tasks import record_event as record_event_task
from redash.utils import json_dumps
from sqlalchemy.orm.exc import NoResultFound
from sqlalchemy import cast
from sqlalchemy.dialects import postgresql
from sqlalchemy_utils import sort_query
routes = Blueprint(
"redash", __name__, template_folder=settings.fix_assets_path("templates")
)
class BaseResource(Resource):
decorators = [login_required]
def __init__(self, *args, **kwargs):
super(BaseResource, self).__init__(*args, **kwargs)
self._user = None
def dispatch_request(self, *args, **kwargs):
kwargs.pop("org_slug", None)
return super(BaseResource, self).dispatch_request(*args, **kwargs)
@property
def current_user(self):
return current_user._get_current_object()
@property
def current_org(self):
return current_org._get_current_object()
def record_event(self, options):
record_event(self.current_org, self.current_user, options)
# TODO: this should probably be somewhere else
def update_model(self, model, updates):
for k, v in updates.items():
setattr(model, k, v)
def record_event(org, user, options):
if user.is_api_user():
options.update({"api_key": user.name, "org_id": org.id})
else:
options.update({"user_id": user.id, "user_name": user.name, "org_id": org.id})
options.update({"user_agent": request.user_agent.string, "ip": request.remote_addr})
if "timestamp" not in options:
options["timestamp"] = int(time.time())
record_event_task.delay(options)
def require_fields(req, fields):
for f in fields:
if f not in req:
abort(400)
def get_object_or_404(fn, *args, **kwargs):
try:
rv = fn(*args, **kwargs)
if rv is None:
abort(404)
except NoResultFound:
abort(404)
return rv
def paginate(query_set, page, page_size, serializer, **kwargs):
count = query_set.count()
if page < 1:
abort(400, message="Page must be positive integer.")
if (page - 1) * page_size + 1 > count > 0:
abort(400, message="Page is out of range.")
if page_size > 250 or page_size < 1:
abort(400, message="Page size is out of range (1-250).")
results = query_set.paginate(page, page_size)
# support for old function based serializers
if isclass(serializer):
items = serializer(results.items, **kwargs).serialize()
else:
items = [serializer(result) for result in results.items]
return {"count": count, "page": page, "page_size": page_size, "results": items}
def org_scoped_rule(rule):
if settings.MULTI_ORG:
return "/<org_slug>{}".format(rule)
return rule
def json_response(response):
return current_app.response_class(json_dumps(response), mimetype="application/json")
def filter_by_tags(result_set, column):
if request.args.getlist("tags"):
tags = request.args.getlist("tags")
result_set = result_set.filter(
cast(column, postgresql.ARRAY(db.Text)).contains(tags)
)
return result_set
def order_results(results, default_order, allowed_orders, fallback=True):
"""
Orders the given results with the sort order as requested in the
"order" request query parameter or the given default order.
"""
# See if a particular order has been requested
requested_order = request.args.get("order", "").strip()
# and if not (and no fallback is wanted) return results as is
if not requested_order and not fallback:
return results
# and if it matches a long-form for related fields, falling
# back to the default order
selected_order = allowed_orders.get(requested_order, None)
if selected_order is None and fallback:
selected_order = default_order
# The query may already have an ORDER BY statement attached
# so we clear it here and apply the selected order
return sort_query(results.order_by(None), selected_order)
|
{
"content_hash": "c061a6ded08b7fcc1ab3fc915493f908",
"timestamp": "",
"source": "github",
"line_count": 144,
"max_line_length": 88,
"avg_line_length": 30.006944444444443,
"alnum_prop": 0.666512381393196,
"repo_name": "denisov-vlad/redash",
"id": "26db7130035e205fbe3e813db5bccd1c18d69d61",
"size": "4321",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "redash/handlers/base.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2135"
},
{
"name": "Dockerfile",
"bytes": "3500"
},
{
"name": "HTML",
"bytes": "32865"
},
{
"name": "JavaScript",
"bytes": "990852"
},
{
"name": "Less",
"bytes": "196598"
},
{
"name": "Makefile",
"bytes": "1381"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1229816"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "TypeScript",
"bytes": "521588"
}
],
"symlink_target": ""
}
|
__author__ = "Cedric Bonhomme"
__version__ = "$Revision: 0.1 $"
__date__ = "$Date: 2016/06/01$"
__revision__ = "$Date: 2016/06/10 $"
__copyright__ = "Copyright (c) Luxembourg Institute of Science and Technology"
__license__ = ""
import sys
import subprocess
try:
from weasyprint import HTML
except Exception as e:
print("Problem with weasyprint: {}".format(e))
import conf
def launch_background_process(parameters=[]):
"""
Fetch the feeds in a new processus.
The "asyncio" crawler is launched with the manager.
"""
cmd = [sys.executable, conf.BASE_DIR + "/manager.py"]
cmd.extend(parameters)
return subprocess.Popen(cmd, stdout=subprocess.PIPE)
def create_pdf(html_code):
""""""
pdf_file = HTML(
string=html_code, base_url=conf.SHELTERS_PICTURES_SERVER_PATH
).write_pdf()
return pdf_file
|
{
"content_hash": "7d7991b30a41ef507aeaa27ca2af2705",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 25.147058823529413,
"alnum_prop": 0.6549707602339181,
"repo_name": "cedricbonhomme/shelter-database",
"id": "1b31f940dc73fb84c486b1c81768a72a9e86af04",
"size": "1109",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "shelter/web/lib/misc_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "215790"
},
{
"name": "HTML",
"bytes": "340201"
},
{
"name": "JavaScript",
"bytes": "502358"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "167944"
},
{
"name": "Shell",
"bytes": "1952"
}
],
"symlink_target": ""
}
|
import numpy as np
from numpy import linalg
class Quaternion (object):
"""
Quaternion class :
------------------
A quaternion has a scalar part and a vector part.
In this class the quaternion is represented as an array of 4 elements :
- the first element is the scalar part
- the next 3 elements represents the vector part
One can acces to the array directly with the attribute "array"
e.g. q1=Quaternion(1,0,0,0) --> q1.array
A quaternion can be instanciated with 1, 2 or 4 elements
(see : __init__() for more information).
It can also return a rotation vector, a rotation matrix, or a SO3
(see the methods : to...() for more information).
"""
def __init__(self,*args):
"""
Instanciation of the quaternion with 1, 2 or 4 arguments :
-----------------------------------------------------------
This creates a 4-sized array (self.array) representing the quaternion
with the first element representing the scalar part
and the 3 others the vector part.
With 4 arguments :
------------------
- the first one is used as the scalar part,
the other three as the vector part.
With 2 arguments :
------------------
- the 1-sized argument is used as the scalar part,
the 3-sized argument is used as the vector part.
With 1 argument :
-----------------
- if it is a quaternion it will create a copy of this quaternion.
- if it is a scalar, the scalar will be used as the scalar part
and the vector part will be set at (0,0,0).
- if it is an array, matrix, tuple or list of 4 elements,
the first element is used as the scalar part
and the rest as the vector part.
- if it is an array, matrix, tuple or list of 3 elements,
the 3 elements are interpreted as a rotation vector,
this creates a quaternion representing the same rotation.
- if it is a to 2 dimension array convertible array, matrix, tuple
or list with at least (3*3) elements,
the upper left (3*3) elements are interpreted as a rotation matrix,
this creates a quaternion representing the same rotation.
- if it is an instance of SO3, quaternion is built from rotation
matrix.
With 0 arguments :
------------------
If no argument is given, than the quaternion will be set by default
to with the scalar part set to 1 and the vector part to (0,0,0).
(this is the neutral element for multiplication in quaternion space)
To create a quaternion from Roll, Pitch, Yaw angles :
-----------------------------------------------------
first instanciate a quaternion and than use the method fromRPY()
to change the values of it to the dezired ones.
e.g. : quat().fromRPY(R,P,Y)
"""
error=False
if len(args)==0: # By default, if no argument is given
self.array=np.array([1.,0.,0.,0.])
elif len (args) == 4: # From 4 elements
if np.array(args).size==4:
self.array = np.double(np.array (args))
else:
error=True
elif len (args) == 1:
if type(args[0])==Quaternion: # From a Quaternion
self.array=args[0].array.copy()
elif np.array(args[0]).size==1: # From one sized element, this element will be the scalar part, the vector part will be set at (0,0,0)
self.array=np.double(np.hstack([np.array(args[0]),np.array([0,0,0])]))
elif np.array(args[0]).size==4 and max(np.array(args[0]).shape)==4: # From an array, matrix, tuple or list of 4 elements
self.array = np.double(np.array(args[0])).reshape(4,)
elif np.array(args[0]).size==3 and max(np.array(args[0]).shape)==3: # From an array, matrix, tuple or list of 3 elements interpreted as a rotation vector
rV=np.double(np.array(args[0])).reshape(3,)
alpha=np.double(linalg.norm(rV))
if alpha !=0:
e=rV/alpha
else:
e=rV
self.array=np.hstack([np.cos(alpha/2.),np.sin(alpha/2.)*e])
elif len(np.array(args[0]).shape)==2 and np.array(args[0]).shape[0]>=3 and np.array(args[0]).shape[1]>=3: # From a to 2 dimension array convertible array, matrix, tuple or list with at least (3*3) elements interpreted as a rotation matrix
rM=np.double(np.array(args[0])[:3,:3])
selec=np.zeros(4)
selec[0]=1+rM[0,0]+rM[1,1]+rM[2,2]
selec[1]=1+rM[0,0]-rM[1,1]-rM[2,2]
selec[2]=1-rM[0,0]+rM[1,1]-rM[2,2]
selec[3]=1-rM[0,0]-rM[1,1]+rM[2,2]
param=selec.argmax()
if selec[param]>0:
q=np.zeros(4)
if param==0:
q[0]=np.sqrt(selec[param])
q[1]=(rM[2,1]-rM[1,2])/q[0]
q[2]=(rM[0,2]-rM[2,0])/q[0]
q[3]=(rM[1,0]-rM[0,1])/q[0]
self.array=q*0.5
# print '--1--V3'
elif param==1:
q[1]=np.sqrt(selec[param])
q[0]=(rM[2,1]-rM[1,2])/q[1]
q[2]=(rM[1,0]+rM[0,1])/q[1]
q[3]=(rM[0,2]+rM[2,0])/q[1]
self.array=q*0.5
# print '--2--V3'
elif param==2:
q[2]=np.sqrt(selec[param])
q[0]=(rM[0,2]-rM[2,0])/q[2]
q[1]=(rM[1,0]+rM[0,1])/q[2]
q[3]=(rM[2,1]+rM[1,2])/q[2]
self.array=q*0.5
# print '--3--V3'
elif param==3:
q[3]=np.sqrt(selec[param])
q[0]=(rM[1,0]-rM[0,1])/q[3]
q[1]=(rM[0,2]+rM[2,0])/q[3]
q[2]=(rM[2,1]+rM[1,2])/q[3]
self.array=q*0.5
# print '--4--V3'
else:
error=True
else:
error=True
elif len(args)==2: # From a scalar part (1 element) and a vector part (3 elements)
arg0=np.double(np.array(args[0]))
arg1=np.double(np.array(args[1]))
if arg0.size==1 and arg1.size==3:
self.array=np.zeros(4)
self.array[0]=arg0
self.array[1:4]=arg1[:]
elif arg0.size==3 and arg1.size==1:
self.array=np.zeros(4)
self.array[0]=arg1
self.array[1:4]=arg0[:]
else:
error=True
else:
error=True
if error==False and self.array.shape!=(4,):
del self.array
error=True
if error:
raise TypeError ("Impossible to instanciate the Quaternion object with the given arguments")
def __str__(self):
"""
String representation of the quaternion.
"""
aff='[ '
aff+=str(self.array [0])+' + '
aff+=str(self.array [1])+' i + '
aff+=str(self.array [2])+' j + '
aff+=str(self.array [3])+' k ]'
return aff
def __neg__(self):
"""
Returns a quaternion which elements are the opposite of the original,
(this opposite quaternion represents the same rotation).
"""
return Quaternion(-self.array)
def __add__(self,other):
"""
If other is not a quaternion it is casted to a quaternion,
the elements are added one to one.
"""
if type(other)!=Quaternion:
q2=Quaternion(other)
else:
q2=other
return Quaternion(self.array+q2.array)
def __sub__(self,other):
"""
If other is not a quaternion it is casted to a quaternion,
the elements are substracted one to one.
"""
if type(other)!=Quaternion:
q2=Quaternion(other)
else:
q2=other
return Quaternion(self.array-q2.array)
def __mul__(self,other):
"""
If other is not a quaternion it is casted to a quaternion,
the result of the quaternion multiplication is returned.
"""
if type(other)!=Quaternion:
q2=Quaternion(other)
else:
q2=other
qr=np.zeros(4)
qr[0]=self.array[0]*q2.array[0]-np.vdot(self.array[1:],q2.array[1:])
qr[1:4]=np.cross(self.array[1:4],q2.array[1:4])+self.array[0]*q2.array[1:4]+q2.array[0]*self.array[1:4]
return Quaternion(qr)
def __rmul__(self,other):
"""
other is casted to a quaternion,
the result of the quaternion multiplication is returned.
"""
return Quaternion(other)*self
def __abs__(self):
"""
Returns the norm of the quaternion.
"""
return np.double(linalg.norm(self.array))
def conjugate(self):
"""
Returns the conjugate of the quaternion.
"""
return Quaternion(self.array[0],-self.array[1:4])
def inv(self):
"""
Returns the inverse of the quaternion.
"""
return Quaternion(self.conjugate().array/(abs(self)**2))
def __div__(self,other):
"""
If other is not a quaternion it is casted to a quaternion,
the result of the quaternion multiplication with the inverse of other
is returned.
"""
if type(other)!=Quaternion:
q2=Quaternion(other)
else:
q2=other
return self*q2.inv()
def __pow__(self,n):
"""
Returns quaternion**n with quaternion**0 = Quaternion(1,0,0,0).
"""
r=Quaternion()
for i in range(n):
r=r*self
return r
def normalize (self):
"""
Changes the values of the quaternion to make it a unit quaternion
representing the same rotation as the original one
and returns the updated version.
"""
self.array /= abs(self);
return self
def normalized (self):
"""
Returns the unit quaternion representation of the quaternion
without changing the original.
"""
qr=Quaternion(self)
qr.normalize()
return qr
def toRotationMatrix(self):
"""
Returns a (3*3) array (rotation matrix)
representing the same rotation as the (normalized) quaternion.
"""
q=self.normalized().array
rm=np.zeros((3,3))
rm[0,0]=1-2*(q[2]**2+q[3]**2)
rm[0,1]=2*q[1]*q[2]-2*q[0]*q[3]
rm[0,2]=2*q[1]*q[3]+2*q[0]*q[2]
rm[1,0]=2*q[1]*q[2]+2*q[0]*q[3]
rm[1,1]=1-2*(q[1]**2+q[3]**2)
rm[1,2]=2*q[2]*q[3]-2*q[0]*q[1]
rm[2,0]=2*q[1]*q[3]-2*q[0]*q[2]
rm[2,1]=2*q[2]*q[3]+2*q[0]*q[1]
rm[2,2]=1-2*(q[1]**2+q[2]**2)
return rm
def toRotationVector(self):
"""
Returns a 3-sized array (rotation vector)
representing the same rotation as the (normalized) quaternion.
"""
q=self.normalized().array
rV=np.zeros(3)
alpha=2*np.arccos(q[0])
if linalg.norm(q[1:4])!=0:
rV=alpha*q[1:4]/linalg.norm(q[1:4])
return rV
def copy(self):
"""
Returns a copy of the quaternion.
"""
return Quaternion(self)
def toRPY(self):
"""
Returns a 3-sized array with representing the same rotation
as the (normalized) quaternion. With :
- the first element representing the Roll,
- the second the Pitch
- the third the Yaw
Where Roll Pitch and Yaw are the angles so that the rotation
with the quaternion represents the same rotation as :
- A rotation of R (Roll) about the original x-axis,
followed by a rotation of P (Pitch) about the original y-axis,
followed by a rotation of Y (Yaw) about the original z-axis.
- Or otherwise a rotation of Y about the original z-axis,
followed by a rotation of P about the new y-axis,
followed by a rotation of R about the new x-axis.
"""
q=self.normalized().array
r=np.arctan2(2*(q[0]*q[1]+q[2]*q[3]),1-2*(q[1]**2+q[2]**2))
p=np.arctan2(2*(q[0]*q[2]-q[3]*q[1]),np.sqrt((2*(q[0]*q[1]+q[2]*q[3]))**2+(1-2*(q[1]**2+q[2]**2))**2)) # We cas use arcsin but arctan2 is more robust
y=np.arctan2(2*(q[0]*q[3]+q[1]*q[2]),1-2*(q[2]**2+q[3]**2))
return np.array([r,p,y])
def fromRPY(self,R,P,Y):
"""
Set the values of the quaternion to the values of a unit quaternion
representing the same rotation as the one performed by Roll Pitch Yaw :
- A rotation of R (Roll) about the original x-axis,
followed by a rotation of P (Pitch) about the original y-axis,
followed by a rotation of Y (Yaw) about the original z-axis.
- Or otherwise a rotation of Y about the original z-axis,
followed by a rotation of P about the new y-axis,
followed by a rotation of R about the new x-axis.
"""
r=R/2.
p=P/2.
y=Y/2.
self.array[0]=np.cos(r)*np.cos(p)*np.cos(y)+np.sin(r)*np.sin(p)*np.sin(y)
self.array[1]=np.sin(r)*np.cos(p)*np.cos(y)-np.cos(r)*np.sin(p)*np.sin(y)
self.array[2]=np.cos(r)*np.sin(p)*np.cos(y)+np.sin(r)*np.cos(p)*np.sin(y)
self.array[3]=np.cos(r)*np.cos(p)*np.sin(y)-np.sin(r)*np.sin(p)*np.cos(y)
return self.normalize()
def toTuple (self):
"""
Return quaternion as a tuple a float starting with real part.
"""
return tuple (self.array)
|
{
"content_hash": "083447cca4772c7a708891d17c42f30d",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 251,
"avg_line_length": 39,
"alnum_prop": 0.5167980680446055,
"repo_name": "Mathieu-Geisert/SceneViewer-corba",
"id": "1c71e49b2f2e9fc428165be1094d742afd96f1c1",
"size": "14838",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "src/gepetto/quaternion.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "110009"
},
{
"name": "CMake",
"bytes": "6921"
},
{
"name": "Python",
"bytes": "17679"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
from past.builtins import basestring
import collections
import logging
from jsonschema.compat import str_types
from flexget import plugin
from flexget.config_schema import one_or_more
from flexget.event import event
from flexget.entry import Entry
from flexget.utils.cached_input import cached
log = logging.getLogger('from_imdb')
class FromIMDB(object):
"""
This plugin enables generating entries based on an entity, an entity being a person, character or company.
It's based on IMDBpy which is required (pip install imdbpy). The basic config required just an IMDB ID of the
required entity.
For example:
from_imdb: ch0001354
Schema description:
Other than ID, all other properties are meant to filter the full list that the entity generates.
id: string that relates to a supported entity type. For example: 'nm0000375'. Required.
job_types: a string or list with job types from job_types. Default is 'actor'.
content_types: A string or list with content types from content_types. Default is 'movie'.
max_entries: The maximum number of entries that can return. This value's purpose is basically flood protection
against unruly configurations that will return too many results. Default is 200.
Advanced config example:
dynamic_movie_queue:
from_imdb:
id: co0051941
job_types:
- actor
- director
content_types: tv series
accept_all: yes
movie_queue: add
"""
job_types = [
'actor',
'actress',
'director',
'producer',
'writer',
'self',
'editor',
'miscellaneous',
'editorial department',
'cinematographer',
'visual effects',
'thanks',
'music department',
'in development',
'archive footage',
'soundtrack',
]
content_types = [
'movie',
'tv series',
'tv mini series',
'video game',
'video movie',
'tv movie',
'episode',
]
content_type_conversion = {
'movie': 'movie',
'tv series': 'tv',
'tv mini series': 'tv',
'tv movie': 'tv',
'episode': 'tv',
'video movie': 'video',
'video game': 'video game',
}
character_content_type_conversion = {
'movie': 'feature',
'tv series': 'tv',
'tv mini series': 'tv',
'tv movie': 'tv',
'episode': 'tv',
'video movie': 'video',
'video game': 'video-game',
}
jobs_without_content_type = ['actor', 'actress', 'self', 'in development', 'archive footage']
imdb_pattern = one_or_more(
{
'type': 'string',
'pattern': r'(nm|co|ch)\d{7,8}',
'error_pattern': 'Get the id from the url of the person/company you want to use,'
' e.g. http://imdb.com/text/<id here>/blah',
},
unique_items=True,
)
schema = {
'oneOf': [
imdb_pattern,
{
'type': 'object',
'properties': {
'id': imdb_pattern,
'job_types': one_or_more(
{'type': 'string', 'enum': job_types}, unique_items=True
),
'content_types': one_or_more(
{'type': 'string', 'enum': content_types}, unique_items=True
),
'max_entries': {'type': 'integer'},
'match_type': {'type': 'string', 'enum': ['strict', 'loose']},
},
'required': ['id'],
'additionalProperties': False,
},
]
}
def prepare_config(self, config):
"""
Converts config to dict form and sets defaults if needed
"""
config = config
if isinstance(config, basestring):
config = {'id': [config]}
elif isinstance(config, list):
config = {'id': config}
if isinstance(config, dict) and not isinstance(config['id'], list):
config['id'] = [config['id']]
config.setdefault('content_types', [self.content_types[0]])
config.setdefault('job_types', [self.job_types[0]])
config.setdefault('max_entries', 200)
config.setdefault('match_type', 'strict')
if isinstance(config.get('content_types'), str_types):
log.debug('Converted content type from string to list.')
config['content_types'] = [config['content_types']]
if isinstance(config['job_types'], str_types):
log.debug('Converted job type from string to list.')
config['job_types'] = [config['job_types']]
# Special case in case user meant to add actress instead of actor (different job types in IMDB)
if 'actor' in config['job_types'] and 'actress' not in config['job_types']:
config['job_types'].append('actress')
return config
def get_items(self, config):
items = []
for id in config['id']:
try:
entity_type, entity_object = self.get_entity_type_and_object(id)
except Exception as e:
log.error(
'Could not resolve entity via ID: {}. '
'Either error in config or unsupported entity. Error:{}'.format(id, e)
)
continue
items += self.get_items_by_entity(
entity_type,
entity_object,
config.get('content_types'),
config.get('job_types'),
config.get('match_type'),
)
return set(items)
def get_entity_type_and_object(self, imdb_id):
"""
Return a tuple of entity type and entity object
:param imdb_id: string which contains IMDB id
:return: entity type, entity object (person, company, etc.)
"""
if imdb_id.startswith('nm'):
person = self.ia.get_person(imdb_id[2:])
log.info('Starting to retrieve items for person: %s' % person)
return 'Person', person
elif imdb_id.startswith('co'):
company = self.ia.get_company(imdb_id[2:])
log.info('Starting to retrieve items for company: %s' % company)
return 'Company', company
elif imdb_id.startswith('ch'):
character = self.ia.get_character(imdb_id[2:])
log.info('Starting to retrieve items for Character: %s' % character)
return 'Character', character
def get_items_by_entity(
self, entity_type, entity_object, content_types, job_types, match_type
):
"""
Gets entity object and return movie list using relevant method
"""
if entity_type == 'Company':
return self.items_by_company(entity_object)
if entity_type == 'Character':
return self.items_by_character(entity_object, content_types, match_type)
elif entity_type == 'Person':
return self.items_by_person(entity_object, job_types, content_types, match_type)
def flatten_list(self, _list):
"""
Gets a list of lists and returns a flat list
"""
for el in _list:
if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
for sub in self.flatten_list(el):
yield sub
else:
yield el
def flat_list(self, non_flat_list, remove_none=False):
flat_list = self.flatten_list(non_flat_list)
if remove_none:
flat_list = [_f for _f in flat_list if _f]
return flat_list
def filtered_items(self, unfiltered_items, content_types, match_type):
items = []
unfiltered_items = set(unfiltered_items)
for item in sorted(unfiltered_items):
if match_type == 'strict':
log.debug('Match type is strict, verifying item type to requested content types')
self.ia.update(item)
if item['kind'] in content_types:
log.verbose(
'Adding item "{}" to list. Item kind is "{}"'.format(item, item['kind'])
)
items.append(item)
else:
log.verbose('Rejecting item "{}". Item kind is "{}'.format(item, item['kind']))
else:
log.debug('Match type is loose, all items are being added')
items.append(item)
return items
def items_by_person(self, person, job_types, content_types, match_type):
"""
Return item list for a person object
"""
unfiltered_items = self.flat_list(
[self.items_by_job_type(person, job_type, content_types) for job_type in job_types],
remove_none=True,
)
return self.filtered_items(unfiltered_items, content_types, match_type)
def items_by_content_type(self, person, job_type, content_type):
return [
_f
for _f in (person.get(job_type + ' ' + self.content_type_conversion[content_type], []))
if _f
]
def items_by_job_type(self, person, job_type, content_types):
items = (
person.get(job_type, [])
if job_type in self.jobs_without_content_type
else [
person.get(job_type + ' ' + 'documentary', [])
and person.get(job_type + ' ' + 'short', [])
and self.items_by_content_type(person, job_type, content_type)
if content_type == 'movie'
else self.items_by_content_type(person, job_type, content_type)
for content_type in content_types
]
)
return [_f for _f in items if _f]
def items_by_character(self, character, content_types, match_type):
"""
Return items list for a character object
:param character: character object
:param content_types: content types as defined in config
:return:
"""
unfiltered_items = self.flat_list(
[
character.get(self.character_content_type_conversion[content_type])
for content_type in content_types
],
remove_none=True,
)
return self.filtered_items(unfiltered_items, content_types, match_type)
def items_by_company(self, company):
"""
Return items list for a company object
:param company: company object
:return: company items list
"""
return company.get('production companies')
@cached('from_imdb', persist='2 hours')
def on_task_input(self, task, config):
try:
from imdb import IMDb
self.ia = IMDb()
except ImportError:
log.error(
'IMDBPY is required for this plugin. Please install using "pip install imdbpy"'
)
return
entries = []
config = self.prepare_config(config)
items = self.get_items(config)
if not items:
log.error('Could not get IMDB item list, check your configuration.')
return
for item in items:
entry = Entry(
title=item['title'],
imdb_id='tt' + self.ia.get_imdbID(item),
url='',
imdb_url=self.ia.get_imdbURL(item),
)
if entry.isvalid():
if entry not in entries:
entries.append(entry)
if entry and task.options.test:
log.info("Test mode. Entry includes:")
for key, value in list(entry.items()):
log.info(' {}: {}'.format(key.capitalize(), value))
else:
log.error('Invalid entry created? %s' % entry)
if len(entries) <= config.get('max_entries'):
return entries
else:
log.warning(
'Number of entries (%s) exceeds maximum allowed value %s. '
'Edit your filters or raise the maximum value by entering a higher "max_entries"'
% (len(entries), config.get('max_entries'))
)
return
@event('plugin.register')
def register_plugin():
plugin.register(FromIMDB, 'from_imdb', api_ver=2)
|
{
"content_hash": "4c06f80e7fe6d0a980c73027199fac2d",
"timestamp": "",
"source": "github",
"line_count": 360,
"max_line_length": 114,
"avg_line_length": 35.28055555555556,
"alnum_prop": 0.54263443823321,
"repo_name": "tobinjt/Flexget",
"id": "ad44ecaea3f114f8b4805f1e38995e5e5f6bab08",
"size": "12701",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "flexget/components/imdb/from_imdb.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "2338"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3492888"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
def create_project_locale_permission(apps, schema_editor):
"""Create permissions for project/locale translators."""
Permission = apps.get_model('auth', 'Permission')
ContentType = apps.get_model('contenttypes', 'ContentType')
project_locale_content_type, _ = ContentType.objects.get_or_create(app_label='base', model='projectlocale')
Permission.objects.get_or_create(
codename='can_translate_project_locale',
content_type=project_locale_content_type,
name="Can translate a locale of project"
)
class Migration(migrations.Migration):
dependencies = [
('base', '0072_auto_20161209_2330'),
('auth', '0006_require_contenttypes_0002')
]
operations = [
migrations.RunPython(create_project_locale_permission, migrations.RunPython.noop)
]
|
{
"content_hash": "8ddb910c31b3d3448d543e74139e8321",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 111,
"avg_line_length": 31.06896551724138,
"alnum_prop": 0.6936736958934517,
"repo_name": "mastizada/pontoon",
"id": "bbbabff2e749e225d6d1b1be382b0f725422aabc",
"size": "974",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pontoon/base/migrations/0073_add_project_locale_permission.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "116831"
},
{
"name": "HTML",
"bytes": "131060"
},
{
"name": "JavaScript",
"bytes": "472460"
},
{
"name": "Makefile",
"bytes": "1085"
},
{
"name": "Python",
"bytes": "841704"
},
{
"name": "Shell",
"bytes": "4616"
}
],
"symlink_target": ""
}
|
"""Tests for letsenecrypt.plugins.selection"""
import sys
import unittest
import mock
import zope.component
from certbot.display import util as display_util
from certbot import interfaces
class ConveniencePickPluginTest(unittest.TestCase):
"""Tests for certbot.plugins.selection.pick_*."""
def _test(self, fun, ifaces):
config = mock.Mock()
default = mock.Mock()
plugins = mock.Mock()
with mock.patch("certbot.plugins.selection.pick_plugin") as mock_p:
mock_p.return_value = "foo"
self.assertEqual("foo", fun(config, default, plugins, "Question?"))
mock_p.assert_called_once_with(
config, default, plugins, "Question?", ifaces)
def test_authenticator(self):
from certbot.plugins.selection import pick_authenticator
self._test(pick_authenticator, (interfaces.IAuthenticator,))
def test_installer(self):
from certbot.plugins.selection import pick_installer
self._test(pick_installer, (interfaces.IInstaller,))
def test_configurator(self):
from certbot.plugins.selection import pick_configurator
self._test(pick_configurator,
(interfaces.IAuthenticator, interfaces.IInstaller))
class PickPluginTest(unittest.TestCase):
"""Tests for certbot.plugins.selection.pick_plugin."""
def setUp(self):
self.config = mock.Mock(noninteractive_mode=False)
self.default = None
self.reg = mock.MagicMock()
self.question = "Question?"
self.ifaces = []
def _call(self):
from certbot.plugins.selection import pick_plugin
return pick_plugin(self.config, self.default, self.reg,
self.question, self.ifaces)
def test_default_provided(self):
self.default = "foo"
self._call()
self.assertEqual(1, self.reg.filter.call_count)
def test_no_default(self):
self._call()
self.assertEqual(1, self.reg.visible().ifaces.call_count)
def test_no_candidate(self):
self.assertTrue(self._call() is None)
def test_single(self):
plugin_ep = mock.MagicMock()
plugin_ep.init.return_value = "foo"
plugin_ep.misconfigured = False
self.reg.visible().ifaces().verify().available.return_value = {
"bar": plugin_ep}
self.assertEqual("foo", self._call())
def test_single_misconfigured(self):
plugin_ep = mock.MagicMock()
plugin_ep.init.return_value = "foo"
plugin_ep.misconfigured = True
self.reg.visible().ifaces().verify().available.return_value = {
"bar": plugin_ep}
self.assertTrue(self._call() is None)
def test_multiple(self):
plugin_ep = mock.MagicMock()
plugin_ep.init.return_value = "foo"
self.reg.visible().ifaces().verify().available.return_value = {
"bar": plugin_ep,
"baz": plugin_ep,
}
with mock.patch("certbot.plugins.selection.choose_plugin") as mock_choose:
mock_choose.return_value = plugin_ep
self.assertEqual("foo", self._call())
mock_choose.assert_called_once_with(
[plugin_ep, plugin_ep], self.question)
def test_choose_plugin_none(self):
self.reg.visible().ifaces().verify().available.return_value = {
"bar": None,
"baz": None,
}
with mock.patch("certbot.plugins.selection.choose_plugin") as mock_choose:
mock_choose.return_value = None
self.assertTrue(self._call() is None)
class ChoosePluginTest(unittest.TestCase):
"""Tests for certbot.plugins.selection.choose_plugin."""
def setUp(self):
zope.component.provideUtility(display_util.FileDisplay(sys.stdout))
self.mock_apache = mock.Mock(
description_with_name="a", misconfigured=True)
self.mock_stand = mock.Mock(
description_with_name="s", misconfigured=False)
self.mock_stand.init().more_info.return_value = "standalone"
self.plugins = [
self.mock_apache,
self.mock_stand,
]
def _call(self):
from certbot.plugins.selection import choose_plugin
return choose_plugin(self.plugins, "Question?")
@mock.patch("certbot.plugins.selection.z_util")
def test_selection(self, mock_util):
mock_util().menu.side_effect = [(display_util.OK, 0),
(display_util.OK, 1)]
self.assertEqual(self.mock_stand, self._call())
self.assertEqual(mock_util().notification.call_count, 1)
@mock.patch("certbot.plugins.selection.z_util")
def test_more_info(self, mock_util):
mock_util().menu.side_effect = [
(display_util.HELP, 0),
(display_util.HELP, 1),
(display_util.OK, 1),
]
self.assertEqual(self.mock_stand, self._call())
self.assertEqual(mock_util().notification.call_count, 2)
@mock.patch("certbot.plugins.selection.z_util")
def test_no_choice(self, mock_util):
mock_util().menu.return_value = (display_util.CANCEL, 0)
self.assertTrue(self._call() is None)
|
{
"content_hash": "d5768eb7939a3318eebe1d77dc1c5a69",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 82,
"avg_line_length": 34.87248322147651,
"alnum_prop": 0.6212471131639723,
"repo_name": "bsmr-misc-forks/letsencrypt",
"id": "001ca5cff734097d8f7b651cf0e05ed619ece7c9",
"size": "5196",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "certbot/plugins/selection_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62302"
},
{
"name": "Augeas",
"bytes": "5245"
},
{
"name": "Batchfile",
"bytes": "35005"
},
{
"name": "DIGITAL Command Language",
"bytes": "516"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37245"
},
{
"name": "Nginx",
"bytes": "118585"
},
{
"name": "Python",
"bytes": "1503621"
},
{
"name": "Shell",
"bytes": "177504"
}
],
"symlink_target": ""
}
|
from test_framework.test_framework import DankcoinTestFramework
from test_framework.util import str_to_b64str, assert_equal
import os
import http.client
import urllib.parse
class HTTPBasicsTest (DankcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = False
self.num_nodes = 1
def setup_chain(self):
super().setup_chain()
#Append rpcauth to dankcoin.conf before initialization
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
with open(os.path.join(self.options.tmpdir+"/node0", "dankcoin.conf"), 'a') as f:
f.write(rpcauth+"\n")
f.write(rpcauth2+"\n")
def setup_network(self):
self.nodes = self.setup_nodes()
def run_test(self):
##################################################
# Check correctness of the rpcauth config option #
##################################################
url = urllib.parse.urlparse(self.nodes[0].url)
#Old authpair
authpair = url.username + ':' + url.password
#New authpair generated via share/rpcuser tool
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
password = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
#Second authpair with different username
rpcauth2 = "rpcauth=rt2:f8607b1a88861fac29dfccf9b52ff9f$ff36a0c23c8c62b4846112e50fa888416e94c17bfd4c42f88fd8f55ec6a3137e"
password2 = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
authpairnew = "rt:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpair)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Use new authpair to confirm both work
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong login name with rt's password
authpairnew = "rtwrong:"+password
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Wrong password for rt
authpairnew = "rt:"+password+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
#Correct for rt2
authpairnew = "rt2:"+password2
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, False)
conn.close()
#Wrong password for rt2
authpairnew = "rt2:"+password2+"wrong"
headers = {"Authorization": "Basic " + str_to_b64str(authpairnew)}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
assert_equal(resp.status==401, True)
conn.close()
if __name__ == '__main__':
HTTPBasicsTest ().main ()
|
{
"content_hash": "9feeda52a5c665f69883bcb52bac7603",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 129,
"avg_line_length": 38.693693693693696,
"alnum_prop": 0.6265424912689174,
"repo_name": "dankcoin/dankcoin",
"id": "f5162519fb2849e9e2045a4641673232c3a0eec0",
"size": "4562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/multi_rpc.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "679373"
},
{
"name": "C++",
"bytes": "4691594"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "3831"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2101"
},
{
"name": "M4",
"bytes": "173500"
},
{
"name": "Makefile",
"bytes": "102963"
},
{
"name": "Objective-C",
"bytes": "3777"
},
{
"name": "Objective-C++",
"bytes": "7242"
},
{
"name": "Protocol Buffer",
"bytes": "2312"
},
{
"name": "Python",
"bytes": "908583"
},
{
"name": "QMake",
"bytes": "2021"
},
{
"name": "Shell",
"bytes": "34197"
}
],
"symlink_target": ""
}
|
__author__ = 'Tom Schaul, tom@idsia.ch'
class Experiment(object):
""" An experiment matches up a task with an agent and handles their interactions.
"""
def __init__(self, task, agent):
self.task = task
self.agent = agent
self.stepid = 0
def doInteractions(self, number = 1):
""" The default implementation directly maps the methods of the agent and the task.
Returns the number of interactions done.
"""
for dummy in range(number):
reward = self._oneInteraction()
return reward
def _oneInteraction(self):
self.stepid += 1
self.agent.integrateObservation(self.task.getObservation())
self.task.performAction(self.agent.getAction())
reward = self.task.getReward()
self.agent.giveReward(reward)
return reward
|
{
"content_hash": "1ab63c184863fbd4f0e0f1c3e9d92c2d",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 91,
"avg_line_length": 31.77777777777778,
"alnum_prop": 0.6223776223776224,
"repo_name": "daanwierstra/pybrain",
"id": "6a9edefc2cf8081e5e317c22bef73b109e15690e",
"size": "858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pybrain/rl/experiments/experiment.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "380415"
},
{
"name": "Python",
"bytes": "1279804"
},
{
"name": "Shell",
"bytes": "121"
}
],
"symlink_target": ""
}
|
import time
import xbahn.path
from xbahn.mixins import EventMixin, LogMixin
from xbahn.connection import receiver, sender
from xbahn.message import Message
LINK_NAME_COUNTER = 0
def get_link_name():
global LINK_NAME_COUNTER
LINK_NAME_COUNTER += 1
return "lnk-%d" % LINK_NAME_COUNTER
class Wire(LogMixin, EventMixin):
def __init__(self, receive=None, send=None, respond=None):
EventMixin.__init__(self)
self.connection_receive = receive
self.connection_respond = respond
self.connection_send = send
self.meta = {}
self.name = ""
if receive:
receive.on("receive", self.on_receive)
def __repr__(self):
return "WIRE-%s" % self.name
def disconnect(self):
"""
Close all connections that are set on this wire
"""
if self.connection_receive:
self.connection_receive.close()
if self.connection_respond:
self.connection_respond.close()
if self.connection_send:
self.connection_send.close()
def send(self, path, message):
if message.path:
message.meta.update(path=xbahn.path.append(message.path, path))
else:
message.meta.update(path=path)
if self.meta:
message.meta.update(**self.meta)
if message.has_callbacks("response"):
#message is expecting response
self.on("response_%s" % message.id, message.attach_response, once=True)
self.on("response_%s" % message.id, message.response, once=True)
self.connection_send.send(message)
def send_and_wait(self, path, message, timeout=0, responder=None):
"""
Send a message and block until a response is received. Return response message
"""
message.on("response", lambda x,event_origin,source:None, once=True)
if timeout > 0:
ts = time.time()
else:
ts = 0
sent = False
while not message.response_received:
if not sent:
self.send(path, message)
sent = True
if ts:
if time.time() - ts > timeout:
raise exceptions.TimeoutError("send_and_wait(%s)"%path, timeout)
return message.response_message
def respond(self, to_message, message, path=None):
if not path:
path = "xbahn.response.%s" % to_message.id
message.meta.update(response_id=to_message.id, path=path)
if self.meta:
message.meta.update(**self.meta)
self.connection_respond.respond(to_message, message)
def on_receive(self, message=None, data=None, event_origin=None):
# trigger events for message received
self.trigger("receive", message=message)
for p in xbahn.path.walk(message.path):
self.trigger("receive_%s" % p, message=message)
if message.response_id:
# trigger events for response received
self.trigger("response", message=message)
self.trigger("response_%s" % message.response_id, message=message)
class Link(LogMixin, EventMixin):
"""
Manages receiving and sending to one or more connection
Attributes:
- name (str): the links name
- main (Wire): the main wire, is set automatically during the
first wire() call or by passing "main" as the name during wire()
"""
def __init__(self, name=None, **kwargs):
"""
Keyword Arguments:
- name (str): name for this link, if not provided a unique
default value will be used
- receive (Connection): if supplied a "main" wire will be
established using this connection as a receiver
- send (Connection): if supplied a "main" wire will be
established using this connection as a sender
- respond (Connection): if supplied a "main" wire will be
established using this connection as a responder
"""
EventMixin.__init__(self)
self.name = name or get_link_name()
self.main = None
if "receive" in kwargs or "send" in kwargs or "respond" in kwargs:
self.wire(
"main",
receive=kwargs.get("receive"),
send=kwargs.get("send"),
respond=kwargs.get("respond")
)
def __repr__(self):
return "LINK: %s" % self.name
def wire(self, name, receive=None, send=None, respond=None, **kwargs):
"""
Wires the link to a connection. Can be called multiple
times to set up wires to different connections
After creation wire will be accessible on the link via its name
as an attribute.
You can undo this action with the cut() method
Arguments:
- name (str): unique name for the wire
Keyword Arguments:
- receive (Connection): wire receiver to this connection
- respond (Connection): wire responder to this connection
- send (Connection): wire sender to this connection
- meta (dict): attach these meta variables to any message
sent from this wire
Returns:
- Wire: the created wire instance
"""
if hasattr(self, name) and name != "main":
raise AttributeError("cannot use '%s' as name for wire, attribute already exists")
if send:
self.log_debug("Wiring '%s'.send: %s" % (name, send))
if respond:
self.log_debug("Wiring '%s'.respond: %s" % (name, respond))
if receive:
self.log_debug("Wiring '%s'.receive: %s" % (name, receive))
wire = Wire(receive=receive, send=send, respond=respond)
wire.name = "%s.%s" % (self.name, name)
wire.meta = kwargs.get("meta",{})
wire.on("receive", self.on_receive)
setattr(self, name, wire)
if not self.main:
self.main = wire
return wire
def cut(self, name, disconnect=False):
"""
Cut a wire (undo a wire() call)
Arguments:
- name (str): name of the wire
Keyword Arguments:
- disconnect (bool): if True also disconnect all connections on the
specified wire
"""
wire = getattr(self, name, None)
if wire and isinstance(wire, Wire):
if name != "main":
delattr(self, name)
if disconnect:
wire.disconnect()
wire.off("receive", self.on_receive)
if self.main == wire:
self.main = None
self.set_main_wire()
def set_main_wire(self, wire=None):
"""
Sets the specified wire as the link's main wire
This is done automatically during the first wire() call
Keyword Arguments:
- wire (Wire): if None, use the first wire instance found
Returns:
- Wire: the new main wire instance
"""
if not wire:
for k in dir(self):
if isinstance(getattr(self, k), Wire):
wire = getattr(self, k)
break
elif not isinstance(wire, Wire):
raise ValueError("wire needs to be a Wire instance")
if not isinstance(wire, Wire):
wire = None
self.main = wire
return wire
def wires(self):
"""
Yields name (str), wire (Wire) for all wires on
this link
"""
for k in dir(self):
if isinstance(getattr(self, k), Wire):
yield k, getattr(self, k)
def disconnect(self):
"""
Cut all wires and disconnect all connections established on this link
"""
for name, wire in self.wires():
self.cut(name, disconnect=True)
def on_receive(self, message=None, event_origin=None):
# trigger events for message received
self.trigger("receive", message=message, wire=event_origin)
for p in xbahn.path.walk(message.path):
self.trigger("receive_%s" % p, message=message, wire=event_origin)
if message.response_id:
# trigger events for response received
self.trigger("response", message=message, wire=event_origin)
self.trigger("response_%s" % message.response_id, message=message, wire=event_origin)
|
{
"content_hash": "53bfa4bee592713e6ceb256249f54b67",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 97,
"avg_line_length": 29.943859649122807,
"alnum_prop": 0.5699554722287321,
"repo_name": "20c/xbahn",
"id": "108212786cdf3dee486e1c87feb16f5b5169a332",
"size": "8534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xbahn/connection/link.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "107754"
}
],
"symlink_target": ""
}
|
import random
def stoke_fire(player_values):
wood = player_values['wood']
warmth = player_values['warmth']
hunger = player_values['hunger']
friends = player_values['friends']
anxiety = player_values['anxiety']
pet = player_values['pet']
if int(wood) < 1:
status = "No wood!"
else:
status = "Warm from Stoking Fire"
if int(friends) < 1:
warmth = (int(warmth) + int(pet)) + random.randint(1,4)
hunger = int(hunger) - random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) - random.randint(1,2)
wood = int(wood) - random.randint(1,4)
elif int(friends) > 0:
if int(friends) < 5:
warmth = (int(warmth) + int(pet)) + (random.randint(1,4) * int(friends))
hunger = int(hunger) - random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) - random.randint(1,2)
wood = int(wood) - (random.randint(1,4)/int(friends))
elif int(friends) > 5:
if int(friends) < 10:
warmth = (int(warmth) + int(pet)) + (random.randint(1,4) * (int(friends)/2))
hunger = int(hunger) - random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) - (random.randint(1,2)/0.5)
wood = int(wood) - (random.randint(1,4)/int(friends))
elif int(friends) > 10:
warmth = (int(warmth) + int(pet)) + (random.randint(1,4) * (int(friends)/4))
hunger = int(hunger) - random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) - (random.randint(1,2)/2)
wood = int(wood) - (random.randint(1,4)/int(friends))
player_values['wood'] = wood
player_values['warmth'] = warmth
player_values['hunger'] = hunger
player_values['friends'] = friends
player_values['anxiety'] = anxiety
player_values['pet'] = pet
return player_values
def gather_wood(player_values):
wood = player_values['wood']
warmth = player_values['warmth']
hunger = player_values['hunger']
friends = player_values['friends']
anxiety = player_values['anxiety']
pet = player_values['pet']
if int(anxiety) < 5:
status = "Too Scared to Gather Wood"
elif int(warmth) < 1:
status = "Too Cold to Gather Wood"
else:
status = "Cold from Gathering Wood"
if int(friends) < 1:
warmth = (int(warmth) + int(pet)) - random.randint(1,4)
hunger = int(hunger) - random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) - random.randint(1,2)
wood = (int(wood) + random.randint(1,4)) * int(anxiety)
if int(friends) > 0:
warmth = (int(warmth) + int(pet)) - random.randint(1,4)
hunger = int(hunger) - random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) - random.randint(1,2)
wood = (int(wood) + (random.randint(1,4) * int(friends))) * int(anxiety)
player_values['wood'] = wood
player_values['warmth'] = warmth
player_values['hunger'] = hunger
player_values['friends'] = friends
player_values['anxiety'] = anxiety
player_values['pet'] = pet
return player_values
def gather_food(player_values):
wood = player_values['wood']
warmth = player_values['warmth']
hunger = player_values['hunger']
friends = player_values['friends']
anxiety = player_values['anxiety']
food = player_values['food']
pet = player_values['pet']
if int(anxiety) < 5:
status = "Too Scared to Hunt"
elif int(warmth) < 1:
status = "Too Cold to Hunt"
else:
status = "Cold from Hunting"
if int(friends) < 1:
warmth = (int(warmth) + int(pet)) - random.randint(1,4)
hunger = int(hunger) - random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) - random.randint(1,2)
food = (int(food) + random.randint(1,4)) * int(anxiety)
if int(friends) > 0:
warmth = (int(warmth) + int(pet)) - random.randint(1,4)
hunger = int(hunger) - random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) - random.randint(1,2)
food = (int(food) + (random.randint(1,4) * int(friends))) * int(anxiety)
player_values['wood'] = wood
player_values['warmth'] = warmth
player_values['hunger'] = hunger
player_values['friends'] = friends
player_values['anxiety'] = anxiety
player_values['food'] = food
player_values['pet'] = pet
return player_values
def eat_food(player_values):
wood = player_values['wood']
warmth = player_values['warmth']
hunger = player_values['hunger']
friends = player_values['friends']
health = player_values['health']
anxiety = player_values['anxiety']
food = player_values['food']
pet = player_values['pet']
if int(warmth) < 1:
status = "Too Cold to Eat"
elif int(food) < 1:
status = "No Food!"
else:
status = "Less Hungry from Eating"
if int(friends) < 1:
warmth = (int(warmth) + int(pet)) - random.randint(1,4)
hunger = int(hunger) + random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) - random.randint(1,2)
food = int(food) - random.randint(1,4)
health = int(health) + random.randint(1,4)
if int(friends) > 0:
warmth = (int(warmth) + int(pet)) - random.randint(1,4)
hunger = int(hunger) + (random.randint(1,4) * int(friends))
anxiety = (int(anxiety) + int(pet)) - random.randint(1,2)
food = int(food) - (random.randint(1,4) * int(friends))
health = int(health) + (random.randint(1,4) * int(friends))
player_values['wood'] = wood
player_values['warmth'] = warmth
player_values['hunger'] = hunger
player_values['friends'] = friends
player_values['health'] = health
player_values['anxiety'] = anxiety
player_values['food'] = food
player_values['pet'] = pet
return player_values
def do_nothing(player_values):
wood = player_values['wood']
warmth = player_values['warmth']
hunger = player_values['hunger']
friends = player_values['friends']
anxiety = player_values['anxiety']
food = player_values['food']
status = player_values['status']
pet = player_values['pet']
if int(pet) > 0:
status = "Cuddling..."
warmth = ((int(warmth) + int(pet)) + (random.randint(1,4) * int(friends))) * int(wood)
hunger = int(hunger) - (random.randint(1,4) * int(friends))
anxiety = (int(anxiety) + int(pet)) + (random.randint(4,8) * int(friends))
wood = int(wood) - random.randint(1,4)
elif int(friends) < 1:
status = "Doing nothing..."
warmth = ((int(warmth) + int(pet)) + random.randint(1,4)) * int(wood)
hunger = int(hunger) - random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) + random.randint(4,8)
wood = int(wood) - random.randint(1,4)
elif int(friends) > 0:
status = "Doing nothing with friends..."
warmth = ((int(warmth) + int(pet)) + (random.randint(1,4) * int(friends))) * int(wood)
hunger = int(hunger) - (random.randint(1,4) * int(friends))
anxiety = (int(anxiety) + int(pet)) + (random.randint(4,8) * int(friends))
wood = int(wood) - random.randint(1,4)
elif int(stranger) > 1:
status = "Doing nothing... With a freak in the house..."
warmth = ((int(warmth) + int(pet)) + random.randint(1,4)) * int(wood)
hunger = int(hunger) - random.randint(1,4)
anxiety = (int(anxiety) + int(pet)) - (stranger * (random.randint(1,2) / 10))
wood = int(wood) - random.randint(1,4)
player_values['wood'] = wood
player_values['warmth'] = warmth
player_values['hunger'] = hunger
player_values['friends'] = friends
player_values['anxiety'] = anxiety
player_values['food'] = food
player_values['status'] = status
player_values['pet'] = pet
return player_values
if __name__ == '__main__':
print("This is the game actions library, designed for use with AColdWalk, licensed under the MIT License, (c) James Milne 2015")
|
{
"content_hash": "f3996805af82f2e2ca16549ab273ade1",
"timestamp": "",
"source": "github",
"line_count": 189,
"max_line_length": 132,
"avg_line_length": 43.407407407407405,
"alnum_prop": 0.5748415407118479,
"repo_name": "shakna-israel/AColdWalk",
"id": "9a1988a8c4b46e79783b8a007748d61f50ff85c7",
"size": "8204",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27376"
}
],
"symlink_target": ""
}
|
import cStringIO
import logging
import os
import sys
import textwrap
import unittest
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, ROOT_DIR)
try:
from collections import OrderedDict # pylint: disable=E0611
except ImportError:
SIMPLEJSON_PATH = os.path.join(ROOT_DIR, os.pardir, os.pardir, 'third_party')
sys.path.insert(0, SIMPLEJSON_PATH)
from simplejson import OrderedDict
import dmprof
from find_runtime_symbols import FUNCTION_SYMBOLS
from find_runtime_symbols import SOURCEFILE_SYMBOLS
from find_runtime_symbols import TYPEINFO_SYMBOLS
class SymbolMappingCacheTest(unittest.TestCase):
class MockBucketSet(object):
def __init__(self, addresses):
self._addresses = addresses
def iter_addresses(self, symbol_type): # pylint: disable=W0613
for address in self._addresses:
yield address
class MockSymbolFinder(object):
def __init__(self, mapping):
self._mapping = mapping
def find(self, address_list):
result = OrderedDict()
for address in address_list:
result[address] = self._mapping[address]
return result
_TEST_FUNCTION_CACHE = textwrap.dedent("""\
1 0x0000000000000001
7fc33eebcaa4 __gnu_cxx::new_allocator::allocate
7fc33ef69242 void DispatchToMethod
""")
_EXPECTED_TEST_FUNCTION_CACHE = textwrap.dedent("""\
1 0x0000000000000001
7fc33eebcaa4 __gnu_cxx::new_allocator::allocate
7fc33ef69242 void DispatchToMethod
2 0x0000000000000002
7fc33ef7bc3e std::map::operator[]
7fc34411f9d5 WTF::RefCounted::operator new
""")
_TEST_FUNCTION_ADDRESS_LIST1 = [
0x1, 0x7fc33eebcaa4, 0x7fc33ef69242]
_TEST_FUNCTION_ADDRESS_LIST2 = [
0x1, 0x2, 0x7fc33eebcaa4, 0x7fc33ef69242, 0x7fc33ef7bc3e, 0x7fc34411f9d5]
_TEST_FUNCTION_DICT = {
0x1: '0x0000000000000001',
0x2: '0x0000000000000002',
0x7fc33eebcaa4: '__gnu_cxx::new_allocator::allocate',
0x7fc33ef69242: 'void DispatchToMethod',
0x7fc33ef7bc3e: 'std::map::operator[]',
0x7fc34411f9d5: 'WTF::RefCounted::operator new',
}
def test_update(self):
symbol_mapping_cache = dmprof.SymbolMappingCache()
cache_f = cStringIO.StringIO()
cache_f.write(self._TEST_FUNCTION_CACHE)
# No update from self._TEST_FUNCTION_CACHE
symbol_mapping_cache.update(
FUNCTION_SYMBOLS,
self.MockBucketSet(self._TEST_FUNCTION_ADDRESS_LIST1),
self.MockSymbolFinder(self._TEST_FUNCTION_DICT), cache_f)
for address in self._TEST_FUNCTION_ADDRESS_LIST1:
self.assertEqual(self._TEST_FUNCTION_DICT[address],
symbol_mapping_cache.lookup(FUNCTION_SYMBOLS, address))
self.assertEqual(self._TEST_FUNCTION_CACHE, cache_f.getvalue())
# Update to self._TEST_FUNCTION_ADDRESS_LIST2
symbol_mapping_cache.update(
FUNCTION_SYMBOLS,
self.MockBucketSet(self._TEST_FUNCTION_ADDRESS_LIST2),
self.MockSymbolFinder(self._TEST_FUNCTION_DICT), cache_f)
for address in self._TEST_FUNCTION_ADDRESS_LIST2:
self.assertEqual(self._TEST_FUNCTION_DICT[address],
symbol_mapping_cache.lookup(FUNCTION_SYMBOLS, address))
self.assertEqual(self._EXPECTED_TEST_FUNCTION_CACHE, cache_f.getvalue())
class PolicyTest(unittest.TestCase):
class MockSymbolMappingCache(object):
def __init__(self):
self._symbol_caches = {
FUNCTION_SYMBOLS: {},
SOURCEFILE_SYMBOLS: {},
TYPEINFO_SYMBOLS: {},
}
def add(self, symbol_type, address, symbol):
self._symbol_caches[symbol_type][address] = symbol
def lookup(self, symbol_type, address):
symbol = self._symbol_caches[symbol_type].get(address)
return symbol if symbol else '0x%016x' % address
_TEST_POLICY = textwrap.dedent("""\
{
"components": [
"second",
"mmap-v8",
"malloc-v8",
"malloc-WebKit",
"mmap-catch-all",
"malloc-catch-all"
],
"rules": [
{
"name": "second",
"stacktrace": "optional",
"allocator": "optional"
},
{
"name": "mmap-v8",
"stacktrace": ".*v8::.*",
"allocator": "mmap"
},
{
"name": "malloc-v8",
"stacktrace": ".*v8::.*",
"allocator": "malloc"
},
{
"name": "malloc-WebKit",
"stacktrace": ".*WebKit::.*",
"allocator": "malloc"
},
{
"name": "mmap-catch-all",
"stacktrace": ".*",
"allocator": "mmap"
},
{
"name": "malloc-catch-all",
"stacktrace": ".*",
"allocator": "malloc"
}
],
"version": "POLICY_DEEP_3"
}
""")
def test_load(self):
policy = dmprof.Policy.parse(cStringIO.StringIO(self._TEST_POLICY), 'json')
self.assertTrue(policy)
self.assertEqual('POLICY_DEEP_3', policy.version)
def test_find(self):
policy = dmprof.Policy.parse(cStringIO.StringIO(self._TEST_POLICY), 'json')
self.assertTrue(policy)
symbol_mapping_cache = self.MockSymbolMappingCache()
symbol_mapping_cache.add(FUNCTION_SYMBOLS, 0x1212, 'v8::create')
symbol_mapping_cache.add(FUNCTION_SYMBOLS, 0x1381, 'WebKit::create')
bucket1 = dmprof.Bucket([0x1212, 0x013], False, 0x29492, '_Z')
bucket1.symbolize(symbol_mapping_cache)
bucket2 = dmprof.Bucket([0x18242, 0x1381], False, 0x9492, '_Z')
bucket2.symbolize(symbol_mapping_cache)
bucket3 = dmprof.Bucket([0x18242, 0x181], False, 0x949, '_Z')
bucket3.symbolize(symbol_mapping_cache)
self.assertEqual('malloc-v8', policy.find(bucket1))
self.assertEqual('malloc-WebKit', policy.find(bucket2))
self.assertEqual('malloc-catch-all', policy.find(bucket3))
class UploadCommandTest(unittest.TestCase):
def test(self):
command = dmprof.UploadCommand()
returncode = command.do([
'upload',
'--gsutil',
os.path.join(ROOT_DIR, 'tests', 'mock_gsutil.py'),
os.path.join(ROOT_DIR, 'tests', 'data', 'heap.01234.0001.heap'),
'gs://test-storage/'])
self.assertEqual(0, returncode)
if __name__ == '__main__':
logging.basicConfig(
level=logging.DEBUG if '-v' in sys.argv else logging.ERROR,
format='%(levelname)5s %(filename)15s(%(lineno)3d): %(message)s')
unittest.main()
|
{
"content_hash": "5a0654541e37edb534a1648e21223197",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 79,
"avg_line_length": 32.2029702970297,
"alnum_prop": 0.6267486548808608,
"repo_name": "codenote/chromium-test",
"id": "7229c3bb473c98e6d1b4bdd41511eb0c49e41020",
"size": "6694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/deep_memory_profiler/tests/dmprof_test.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import annotations
import random
from concurrent.futures import (
FIRST_COMPLETED,
FIRST_EXCEPTION,
Future,
TimeoutError,
as_completed,
wait,
)
from time import sleep
import pytest
from tlz import take
from distributed.event import Event
from distributed.metrics import time
from distributed.utils import CancelledError
from distributed.utils_test import inc, slowadd, slowinc, throws, varying
def number_of_processing_tasks(client):
return sum(len(v) for k, v in client.processing().items())
def test_submit(client):
with client.get_executor() as e:
f1 = e.submit(slowadd, 1, 2)
assert isinstance(f1, Future)
f2 = e.submit(slowadd, 3, y=4)
f3 = e.submit(throws, "foo")
f4 = e.submit(slowadd, x=5, y=6)
assert f1.result() == 3
assert f2.result() == 7
with pytest.raises(RuntimeError):
f3.result()
assert f4.result() == 11
def test_as_completed(client):
with client.get_executor() as e:
N = 10
fs = [e.submit(slowinc, i, delay=0.02) for i in range(N)]
expected = set(range(1, N + 1))
for f in as_completed(fs):
res = f.result()
assert res in expected
expected.remove(res)
assert not expected
def test_wait(client):
def block_inc(x, ev):
ev.wait()
return x + 1
with client.get_executor(pure=False) as e:
ev = Event()
N = 10
fs = [e.submit(block_inc, i, ev, pure=False) for i in range(N)]
res = wait(fs, timeout=0.01)
assert len(res.not_done) > 0
ev.set()
res = wait(fs)
assert len(res.not_done) == 0
assert res.done == set(fs)
ev.clear()
nthreads = sum(client.nthreads().values())
fs = [e.submit(block_inc, i, ev, pure=False) for i in range(nthreads - 1)]
fs.append(e.submit(inc, 0))
fs.extend([e.submit(block_inc, i, ev, pure=False) for i in range(nthreads, N)])
res = wait(fs, return_when=FIRST_COMPLETED)
assert len(res.not_done) > 0
assert len(res.done) >= 1
ev.set()
res = wait(fs)
assert len(res.not_done) == 0
assert res.done == set(fs)
ev.clear()
fs = [e.submit(inc, i) for i in range(N)]
fs += [e.submit(throws, None)]
fs += [e.submit(block_inc, i, ev, pure=False) for i in range(N)]
res = wait(fs, return_when=FIRST_EXCEPTION)
assert any(f.exception() for f in res.done)
assert res.not_done
errors = []
for fs in res.done:
try:
fs.result()
except RuntimeError as e:
errors.append(e)
assert len(errors) == 1
assert "hello" in str(errors[0])
ev.set()
def test_cancellation(client):
with client.get_executor(pure=False) as e:
fut = e.submit(sleep, 2.0)
start = time()
while number_of_processing_tasks(client) == 0:
assert time() < start + 30
sleep(0.01)
assert not fut.done()
fut.cancel()
assert fut.cancelled()
start = time()
while number_of_processing_tasks(client) != 0:
assert time() < start + 30
sleep(0.01)
with pytest.raises(CancelledError):
fut.result()
def test_cancellation_wait(client):
with client.get_executor(pure=False) as e:
fs = [e.submit(slowinc, i, delay=0.2) for i in range(10)]
fs[3].cancel()
res = wait(fs, return_when=FIRST_COMPLETED, timeout=30)
assert len(res.not_done) > 0
assert len(res.done) >= 1
assert fs[3] in res.done
assert fs[3].cancelled()
def test_cancellation_as_completed(client):
with client.get_executor(pure=False) as e:
fs = [e.submit(slowinc, i, delay=0.2) for i in range(10)]
fs[3].cancel()
fs[8].cancel()
n_cancelled = sum(f.cancelled() for f in as_completed(fs, timeout=30))
assert n_cancelled == 2
@pytest.mark.slow()
def test_map(client):
with client.get_executor() as e:
N = 10
it = e.map(inc, range(N))
expected = set(range(1, N + 1))
for x in it:
expected.remove(x)
assert not expected
with client.get_executor(pure=False) as e:
N = 10
it = e.map(slowinc, range(N), [0.3] * N, timeout=1.2)
results = []
with pytest.raises(TimeoutError):
for x in it:
results.append(x)
assert 2 <= len(results) < 7
with client.get_executor(pure=False) as e:
N = 10
# Not consuming the iterator will cancel remaining tasks
it = e.map(slowinc, range(N), [0.3] * N)
for _ in take(2, it):
pass
# Some tasks still processing
assert number_of_processing_tasks(client) > 0
# Garbage collect the iterator => remaining tasks are cancelled
del it
sleep(0.5)
assert number_of_processing_tasks(client) == 0
def get_random():
return random.random()
def test_pure(client):
N = 10
with client.get_executor() as e:
fs = [e.submit(get_random) for i in range(N)]
res = [fut.result() for fut in as_completed(fs)]
assert len(set(res)) < len(res)
with client.get_executor(pure=False) as e:
fs = [e.submit(get_random) for i in range(N)]
res = [fut.result() for fut in as_completed(fs)]
assert len(set(res)) == len(res)
def test_workers(client, s, a, b):
N = 10
with client.get_executor(workers=[b["address"]]) as e:
fs = [e.submit(slowinc, i) for i in range(N)]
wait(fs)
has_what = client.has_what()
assert not has_what.get(a["address"])
assert len(has_what[b["address"]]) == N
def test_unsupported_arguments(client, s, a, b):
with pytest.raises(TypeError) as excinfo:
client.get_executor(workers=[b["address"]], foo=1, bar=2)
assert "unsupported arguments to ClientExecutor: ['bar', 'foo']" in str(
excinfo.value
)
def test_retries(client):
args = [ZeroDivisionError("one"), ZeroDivisionError("two"), 42]
with client.get_executor(retries=5, pure=False) as e:
future = e.submit(varying(args))
assert future.result() == 42
with client.get_executor(retries=4) as e:
future = e.submit(varying(args))
result = future.result()
assert result == 42
with client.get_executor(retries=2) as e:
future = e.submit(varying(args))
with pytest.raises(ZeroDivisionError, match="two"):
res = future.result()
with client.get_executor(retries=0) as e:
future = e.submit(varying(args))
with pytest.raises(ZeroDivisionError, match="one"):
res = future.result()
def test_shutdown_wait(client):
# shutdown(wait=True) waits for pending tasks to finish
e = client.get_executor()
start = time()
fut = e.submit(sleep, 1.0)
e.shutdown()
assert time() >= start + 1.0
sleep(0.1) # wait for future outcome to propagate
assert fut.done()
fut.result() # doesn't raise
with pytest.raises(RuntimeError):
e.submit(sleep, 1.0)
def test_shutdown_nowait(client):
# shutdown(wait=False) cancels pending tasks
e = client.get_executor()
start = time()
fut = e.submit(sleep, 5.0)
e.shutdown(wait=False)
assert time() < start + 2.0
sleep(0.1) # wait for future outcome to propagate
assert fut.cancelled()
with pytest.raises(RuntimeError):
e.submit(sleep, 1.0)
|
{
"content_hash": "736b18972511b0a63359508c7375ada1",
"timestamp": "",
"source": "github",
"line_count": 262,
"max_line_length": 87,
"avg_line_length": 29.217557251908396,
"alnum_prop": 0.5811887655127368,
"repo_name": "dask/distributed",
"id": "1019c2d59f8e1e3c202f24ae3668f22a4772c7c4",
"size": "7655",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "distributed/tests/test_client_executor.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4220"
},
{
"name": "HTML",
"bytes": "16583"
},
{
"name": "JavaScript",
"bytes": "9337"
},
{
"name": "Jinja",
"bytes": "17081"
},
{
"name": "Python",
"bytes": "3746516"
},
{
"name": "Shell",
"bytes": "2030"
}
],
"symlink_target": ""
}
|
from juriscraper.opinions.united_states.state import kan
class Site(kan.Site):
def __init__(self):
super(Site, self).__init__()
self.court_id = self.__module__
self.court_index = 2
|
{
"content_hash": "a607307bd99dd9cd7601ebabf0aca3eb",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 56,
"avg_line_length": 22.3,
"alnum_prop": 0.57847533632287,
"repo_name": "brianwc/juriscraper",
"id": "50bd2b47b33d95db9df7befb6ddfa126f63d9071",
"size": "384",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opinions/united_states/state/kanctapp.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "24111143"
},
{
"name": "Python",
"bytes": "661024"
}
],
"symlink_target": ""
}
|
from zoo.pipeline.api.onnx.mapper.operator_mapper import OperatorMapper
from zoo.pipeline.api.onnx.onnx_helper import OnnxHelper
import zoo.pipeline.api.keras.layers as zlayers
class EluMapper(OperatorMapper):
def __init__(self, node, _params, _all_tensors):
super(EluMapper, self).__init__(node, _params, _all_tensors)
def _to_tensor(self):
if "alpha" in self.onnx_attr:
alpha = float(self.onnx_attr['alpha'])
else:
alpha = 1.0
elu = zlayers.ELU(alpha=alpha)
return elu(self.model_inputs[0].zvalue)
|
{
"content_hash": "91f696ddcc6ab11b30ca075281fc2bf7",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 71,
"avg_line_length": 35.9375,
"alnum_prop": 0.6591304347826087,
"repo_name": "intel-analytics/analytics-zoo",
"id": "cadac61ff8a6ff402712c13fa44da7c38b51d8d5",
"size": "1165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyzoo/zoo/pipeline/api/onnx/mapper/elu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "73165"
},
{
"name": "Groovy",
"bytes": "1613"
},
{
"name": "Java",
"bytes": "209136"
},
{
"name": "Jupyter Notebook",
"bytes": "24437284"
},
{
"name": "Makefile",
"bytes": "11724"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "4085490"
},
{
"name": "RobotFramework",
"bytes": "17467"
},
{
"name": "Scala",
"bytes": "3562801"
},
{
"name": "Shell",
"bytes": "413512"
}
],
"symlink_target": ""
}
|
#######################################################################################
# Python implementation of LinkedIn OAuth Authorization, Profile and Connection APIs. #
# #
# Author: Ozgur Vatansever #
# Email : ozgurvt@gmail.com #
# LinkedIn Account: http://www.linkedin.com/in/ozgurvt #
#######################################################################################
__version__ = "1.8.1"
"""
Provides a Pure Python LinkedIn API Interface.
"""
try:
import sha
except DeprecationWarning, derr:
import hashlib
sha = hashlib.sha1
import urllib, time, random, httplib, hmac, binascii, cgi, string
from HTMLParser import HTMLParser
from model import *
class OAuthError(Exception):
"""
General OAuth exception, nothing special.
"""
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class Stripper(HTMLParser):
"""
Stripper class that strips HTML entity.
"""
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def getAlteredData(self):
return ''.join(self.fed)
class XMLBuilder(object):
def __init__(self, rootTagName):
self.document = minidom.Document()
self.root = self.document.createElement(rootTagName)
self.document.appendChild(self.root)
def xml(self):
return self.document.toxml()
def __unicode__(self):
return self.document.toprettyxml()
def append_element_to_root(self, element):
self.root.appendChild(element)
def append_list_of_elements_to_element(self, element, elements):
map(lambda x:element.appendChild(x),elements)
return element
def create_element(self, tag_name):
return self.document.createElement(str(tag_name))
def create_element_with_text_node(self, tag_name, text_node):
text_node = self.document.createTextNode(str(text_node))
element = self.document.createElement(str(tag_name))
element.appendChild(text_node)
return element
def create_elements(self, **elements):
return [self.create_element_with_text_node(tag_name, text_node) for tag_name, text_node in elements.items()]
class ConnectionError(Exception):
pass
class LinkedIn(object):
def __init__(self, api_key, api_secret, callback_url, gae = False):
"""
LinkedIn Base class that simply implements LinkedIn OAuth Authorization and LinkedIn APIs such as Profile, Connection vs.
@ LinkedIn OAuth Authorization
------------------------------
In OAuth terminology, there are 2 tokens that we need in order to have permission to perform an API request.
Those are request_token and access_token. Thus, this class basicly intends to wrap methods of OAuth spec. which
are related of gettting request_token and access_token strings.
@ Important Note:
-----------------
HMAC-SHA1 hashing algorithm will be used while encrypting a request body of an HTTP request. Other alternatives
such as 'SHA-1' or 'PLAINTEXT' are ignored.
@Reference for OAuth
--------------------
Please take a look at the link below if you have a basic knowledge of HTTP protocol
- http://developer.linkedin.com/docs/DOC-1008
Please create an application from the link below if you do not have an API key and secret key yet.
- https://www.linkedin.com/secure/developer
@api_key: Your API key
@api_secret: Your API secret key
@callback_url: the return url when the user grants permission to Consumer.
"""
# Credientials
self.API_ENDPOINT = "api.linkedin.com"
self.BASE_URL = "https://%s" % self.API_ENDPOINT
self.VERSION = "1.0"
self._api_key = api_key
self._api_secret = api_secret
self._callback_url = callback_url
self._gae = gae # Is it google app engine
self._request_token = None # that comes later
self._access_token = None # that comes later and later
self._request_token_secret = None
self._access_token_secret = None
self._verifier = None
self._error = None
def request_token(self):
"""
Performs the corresponding API which returns the request token in a query string
The POST Querydict must include the following:
* oauth_callback
* oauth_consumer_key
* oauth_nonce
* oauth_signature_method
* oauth_timestamp
* oauth_version
"""
self.clear()
method = "GET"
relative_url = "/uas/oauth/requestToken"
query_dict = self._query_dict({"oauth_callback" : self._callback_url})
self._calc_signature(self._get_url(relative_url), query_dict, self._request_token_secret, method)
try:
response = self._https_connection(method, relative_url, query_dict)
except ConnectionError:
return False
oauth_problem = self._get_value_from_raw_qs("oauth_problem", response)
if oauth_problem:
self._error = oauth_problem
return False
self._request_token = self._get_value_from_raw_qs("oauth_token", response)
self._request_token_secret = self._get_value_from_raw_qs("oauth_token_secret", response)
return True
def access_token(self, request_token = None, request_token_secret = None, verifier = None):
"""
Performs the corresponding API which returns the access token in a query string
Accroding to the link (http://developer.linkedin.com/docs/DOC-1008), POST Querydict must include the following:
* oauth_consumer_key
* oauth_nonce
* oauth_signature_method
* oauth_timestamp
* oauth_token (request token)
* oauth_version
"""
self._request_token = request_token and request_token or self._request_token
self._request_token_secret = request_token_secret and request_token_secret or self._request_token_secret
self._verifier = verifier and verifier or self._verifier
# if there is no request token, fail immediately
if self._request_token is None:
raise OAuthError("There is no Request Token. Please perform 'request_token' method and obtain that token first.")
if self._request_token_secret is None:
raise OAuthError("There is no Request Token Secret. Please perform 'request_token' method and obtain that token first.")
if self._verifier is None:
raise OAuthError("There is no Verifier Key. Please perform 'request_token' method, redirect user to API authorize page and get the _verifier.")
method = "GET"
relative_url = "/uas/oauth/accessToken"
query_dict = self._query_dict({
"oauth_token" : self._request_token,
"oauth_verifier" : self._verifier
})
self._calc_signature(self._get_url(relative_url), query_dict, self._request_token_secret, method)
try:
response = self._https_connection(method, relative_url, query_dict)
except ConnectionError:
return False
oauth_problem = self._get_value_from_raw_qs("oauth_problem", response)
if oauth_problem:
self._error = oauth_problem
return False
self._access_token = self._get_value_from_raw_qs("oauth_token", response)
self._access_token_secret = self._get_value_from_raw_qs("oauth_token_secret", response)
return True
def get_profile(self, member_id = None, url = None, fields=[]):
"""
Gets the public profile for a specific user. It is determined by his/her member id or public url.
If none of them is given, the information og the application's owner are returned.
If none of them are given, current user's details are fetched.
The argument 'fields' determines how much information will be fetched.
Examples:
client.get_profile(merber_id = 123, url = None, fields=['first-name', 'last-name']) : fetches the profile of a user whose id is 123.
client.get_profile(merber_id = None, url = None, fields=['first-name', 'last-name']) : fetches current user's profile
client.get_profile(member_id = None, 'http://www.linkedin.com/in/ozgurv') : fetches the profile of a user whose profile url is http://www.linkedin.com/in/ozgurv
@ Returns Profile instance
"""
#################
# BEGIN ROUTINE #
#################
# if there is no access token or secret, fail immediately
self._check_tokens()
# specify the url according to the parameters given
raw_url = "/v1/people/"
if url:
url = self._quote(url)
raw_url = (raw_url + "url=%s:public") % url
elif member_id:
raw_url = (raw_url + "id=%s" % member_id)
else:
raw_url = raw_url + "~"
if url is None:
fields = ":(%s)" % ",".join(fields) if len(fields) > 0 else None
if fields:
raw_url = raw_url + fields
try:
response = self._do_normal_query(raw_url)
return Profile.create(response) # this creates Profile instance or gives you null
except ConnectionError:
return None
def get_connections(self, member_id = None, public_url = None, fields=[]):
"""
Fetches the connections of a user whose id is the given member_id or url is the given public_url
If none of the parameters given, the connections of the current user are fetched.
@Returns: a list of Profile instances or an empty list if there is no connection.
Example urls:
* http://api.linkedin.com/v1/people/~/connections (for current user)
* http://api.linkedin.com/v1/people/id=12345/connections (fetch with member_id)
* http://api.linkedin.com/v1/people/url=http%3A%2F%2Fwww.linkedin.com%2Fin%2Flbeebe/connections (fetch with public_url)
"""
self._check_tokens()
raw_url = "/v1/people/%s/connections"
if member_id:
raw_url = raw_url % ("id=" + member_id)
elif public_url:
raw_url = raw_url % ("url=" + self._quote(public_url))
else:
raw_url = raw_url % "~"
fields = ":(%s)" % ",".join(fields) if len(fields) > 0 else None
if fields:
raw_url = raw_url + fields
try:
response = self._do_normal_query(raw_url)
document = minidom.parseString(response)
connections = document.getElementsByTagName("person")
result = []
for connection in connections:
profile = Profile.create(connection.toxml())
if profile is not None:
result.append(profile)
return result
except ConnectionError:
return None
def get_search(self, parameters):
"""
Use the Search API to find LinkedIn profiles using keywords,
company, name, or other methods. This returns search results,
which are an array of matching member profiles. Each matching
profile is similar to a mini-profile popup view of LinkedIn
member profiles.
Request URL Structure:
http://api.linkedin.com/v1/people?keywords=['+' delimited keywords]&name=[first name + last name]&company=[company name]¤t-company=[true|false]&title=[title]¤t-title=[true|false]&industry-code=[industry code]&search-location-type=[I,Y]&country-code=[country code]&postal-code=[postal code]&network=[in|out]&start=[number]&count=[1-10]&sort-criteria=[ctx|endorsers|distance|relevance]
"""
self._check_tokens()
try:
response = self._do_normal_query("/v1/people", method="GET", params=parameters)
except ConnectionError:
return None
error = self._parse_error(response)
if error:
self._error = error
return None
document = minidom.parseString(response)
connections = document.getElementsByTagName("person")
result = []
for connection in connections:
profile = Profile.create(connection.toxml())
if profile is not None:
result.append(profile)
return result
def send_message(self, subject, message, ids = [], send_yourself = False):
"""
Sends a Non-HTML message and subject to the members whose IDs are given as a parameter 'ids'.
If the user gives more than 10 ids, the IDs after 10th ID are ignored.
@Input: string x string x list x bool
@Output: bool
Returns True if successfully sends the message otherwise returns False.
Important Note: You can send a message at most 10 connections at one time.
Technical Explanation:
---------------------
Sends a POST request to the URL 'http://api.linkedin.com/v1/people/~/mailbox'.
The XML that will be sent looks like this:
<?xml VERSION='1.0' encoding='UTF-8'?>
<mailbox-item>
<recipients>
<recipient>
<person path='/people/{id}' />
</recipient>
</recipients>
<subject>{subject}</subject>
<body>{message}</body>
</mailbox-item>
The resulting XML would be like this:
if result is None or '', it is guaranteed that you sent the message. If there occurs an error, you get the following:
<?xml VERSION='1.0' encoding='UTF-8' standalone='yes'?>
<error>
<status>...</status>
<timestamp>...</timestamp>
<error-code>...</error-code>
<message>...</message>
</error>
"""
#######################################################################################
# What we do here is first we need to shorten to ID list to 10 elements just in case. #
# Then we need to strip HTML tags using HTMLParser library. #
# Then we are going to build up the XML body and post the request. #
# According to the response parsed, we return True or False. #
#######################################################################################
self._check_tokens()
# Shorten the list.
ids = ids[:10]
if send_yourself:
ids = ids[:9]
ids.append("~")
subjectStripper = Stripper()
subjectStripper.feed(subject)
subject = subjectStripper.getAlteredData()
bodyStripper = Stripper()
bodyStripper.feed(message)
body = bodyStripper.getAlteredData()
# Build up the POST body.
builder = XMLBuilder("mailbox-item")
recipients_element = builder.create_element("recipients")
subject_element = builder.create_element_with_text_node("subject", subject)
body_element = builder.create_element_with_text_node("body", body)
for member_id in ids:
recipient_element = builder.create_element("recipient")
person_element = builder.create_element("person")
person_element.setAttribute("path", "/people/%s" % member_id)
recipient_element.appendChild(person_element)
recipients_element.appendChild(recipient_element)
builder.append_element_to_root(recipients_element)
builder.append_element_to_root(subject_element)
builder.append_element_to_root(body_element)
body = builder.xml()
try:
response = self._do_normal_query("/v1/people/~/mailbox", body=body, method="POST")
# If API server sends us a response, we know that there occurs an error.
# So we have to parse the response to make sure what causes the error.
# and let the user know by returning False.
if response:
self._error = self._parse_error(response)
return False
except ConnectionError:
return False
return True
def send_invitation(self, subject, message, first_name, last_name, email):
"""
Sends an invitation to the given email address.
This method is very similiar to 'send_message' method.
@input: string x string x string x string x string
@output: bool
"""
#########################################################################################
# What we do here is first, we need to check the access token. #
# Then we need to strip HTML tags using the HTMLParser library. #
# Then we are going to build up the XML body and post the request. #
# According to the response parsed, we return True or False. #
#########################################################################################
self._check_tokens()
subjectStripper = Stripper()
subjectStripper.feed(subject)
subject = subjectStripper.getAlteredData()
bodyStripper = Stripper()
bodyStripper.feed(message)
body = bodyStripper.getAlteredData()
# Build up the POST body.
builder = XMLBuilder("mailbox-item")
recipients_element = builder.create_element("recipients")
subject_element = builder.create_element_with_text_node("subject", subject)
body_element = builder.create_element_with_text_node("body", body)
recipient_element = builder.create_element("recipient")
person_element = builder.create_element("person")
person_element.setAttribute("path", "/people/email=%s" % email)
first_name_element = builder.create_element_with_text_node("first-name", first_name)
last_name_element = builder.create_element_with_text_node("last-name", last_name)
builder.append_list_of_elements_to_element(person_element, [first_name_element, last_name_element])
recipient_element.appendChild(person_element)
recipients_element.appendChild(recipient_element)
item_content_element = builder.create_element("item-content")
invitation_request_element = builder.create_element("invitation-request")
connect_type_element = builder.create_element_with_text_node("connect-type", "friend")
invitation_request_element.appendChild(connect_type_element)
item_content_element.appendChild(invitation_request_element)
builder.append_element_to_root(recipients_element)
builder.append_element_to_root(subject_element)
builder.append_element_to_root(body_element)
builder.append_element_to_root(item_content_element)
body = builder.xml()
try:
response = self._do_normal_query("/v1/people/~/mailbox", body=body, method="POST")
# If API server sends us a response, we know that there occurs an error.
# So we have to parse the response to make sure what causes the error.
# and let the user know by returning False.
if response:
self._error = self._parse_error(response)
return False
except ConnectionError:
return False
return True
def set_status(self, status_message):
"""
This API is deprecated and should be replaced with the share status of linkedin
Issues a status of the connected user. There is a 140 character limit on status message.
If it is longer than 140 characters, it is shortened.
-----------
Usage Rules
* We must use an access token from the user.
* We can only set status for the user who grants us access.
-----------
@input: string
@output: bool
"""
self._check_tokens()
# Shorten the message just in case.
status_message = str(status_message)
if len(status_message) > 140:
status_message = status_message[:140]
# Build up the XML request
builder = XMLBuilder("current-status")
status_node = builder.document.createTextNode(status_message)
builder.root.appendChild(status_node)
body = builder.xml()
try:
response = self._do_normal_query("/v1/people/~/current-status", body=body, method="PUT")
# If API server sends us a response, we know that there occurs an error.
# So we have to parse the response to make sure what causes the error.
# and let the user know by returning False.
if response:
self._error = self._parse_error(response)
return False
except ConnectionError:
return False
return True
def clear_status(self):
"""
This API is deprecated and should be replaced with the share status of linkedin
Clears the status of the connected user.
-----------
Usage Rules
* We must use an access token from the user.
* We can only set status for the user who grants us access.
-----------
@input: none
@output: bool
"""
self._check_tokens()
try:
response = self._do_normal_query("/v1/people/~/current-status", method="DELETE")
# If API server sends us a response, we know that there occurs an error.
# So we have to parse the response to make sure what causes the error.
# and let the user know by returning False.
if response:
self._error = self._parse_error(response)
return False
except ConnectionError:
return False
return True
def share_update(self, comment=None, title=None, submitted_url=None,
submitted_image_url=None, description=None,
visibility="connections-only"):
"""
Use the Share API to have a member share content with their network or with all of LinkedIn
-----------
Usage Rules
* We must use an access token from the user.
* We can only share items for the user who grants us access.
-----------
visibility: anyone or connections-only
@output: bool
SAMPLE
<?xml VERSION="1.0" encoding="UTF-8"?>
<share>
<comment>83% of employers will use social media to hire: 78% LinkedIn, 55% Facebook, 45% Twitter [SF Biz Times] http://bit.ly/cCpeOD</comment>
<content>
<title>Survey: Social networks top hiring tool - San Francisco Business Times</title>
<submitted-url>http://sanfrancisco.bizjournals.com/sanfrancisco/stories/2010/06/28/daily34.html</submitted-url>
<submitted-image-url>http://images.bizjournals.com/travel/cityscapes/thumbs/sm_sanfrancisco.jpg</submitted-image-url>
</content>
<visibility>
<code>anyone</code>
</visibility>
</share>
"""
self._check_tokens()
if comment is not None:
comment = str(comment)
if len(comment) > 700:
comment = comment[:700]
if title is not None:
title = str(title)
if len(title) > 200:
title = title[:200]
if description is not None:
description = str(description)
if len(description) > 400:
description = description[:400]
# Build up the XML request
builder = XMLBuilder("share")
if len(comment) > 0:
comment_element = builder.create_element_with_text_node("comment", comment)
builder.append_element_to_root(comment_element)
if (submitted_url is not None) or (title is not None):
content_element = builder.create_element("content")
if submitted_url is not None:
submitted_url_element = builder.create_element_with_text_node("submitted-url", submitted_url)
content_element.appendChild(submitted_url_element)
# must have url to inlcude image url
if submitted_image_url is not None:
submitted_image_url_element = builder.create_element_with_text_node("submitted-image-url", submitted_image_url)
content_element.appendChild(submitted_image_url_element)
if title is not None:
title_element = builder.create_element_with_text_node("title", title)
content_element.appendChild(title_element)
if description is not None:
description_element = builder.create_element_with_text_node("description", description)
content_element.appendChild(description_element)
builder.append_element_to_root(content_element)
visibility_element = builder.create_element("visibility")
code_element = builder.create_element_with_text_node("code", visibility)
visibility_element.appendChild(code_element)
builder.append_element_to_root(visibility_element)
body = builder.xml()
try:
response = self._do_normal_query("/v1/people/~/shares", body=body, method="POST")
# If API server sends us a response, we know that there occurs an error.
# So we have to parse the response to make sure what causes the error.
# and let the user know by returning False.
if response:
self._error = self._parse_error(response)
return False
except ConnectionError:
return False
return True
def get_authorize_url(self, request_token = None):
self._request_token = request_token and request_token or self._request_token
if self._request_token is None:
raise OAuthError("OAuth Request Token is NULL. Plase acquire this first.")
return "%s%s?oauth_token=%s" % (self.BASE_URL, "/uas/oauth/authorize", self._request_token)
def get_error(self):
return self._error
def clear(self):
self._request_token = None
self._access_token = None
self._verifier = None
self._request_token_secret = None
self._access_token_secret = None
self._error = None
#################################################
# HELPER FUNCTIONS #
# You do not explicitly use those methods below #
#################################################
def _generate_nonce(self, length = 20):
return ''.join([string.letters[random.randint(0, len(string.letters) - 1)] for i in range(length)])
def _get_url(self, relative_path):
return self.BASE_URL + relative_path
def _generate_timestamp(self):
return str(int(time.time()))
def _quote(self, st):
return urllib.quote(st, safe='~')
def _utf8(self, st):
return isinstance(st, unicode) and st.encode("utf-8") or str(st)
def _urlencode(self, query_dict):
keys_and_values = [(self._quote(self._utf8(k)), self._quote(self._utf8(v))) for k,v in query_dict.items()]
keys_and_values.sort()
return '&'.join(['%s=%s' % (k, v) for k, v in keys_and_values])
def _get_value_from_raw_qs(self, key, qs):
raw_qs = cgi.parse_qs(qs, keep_blank_values = False)
rs = raw_qs.get(key)
if type(rs) == list:
return rs[0]
else:
return rs
def _signature_base_string(self, method, uri, query_dict):
return "&".join([self._quote(method), self._quote(uri), self._quote(self._urlencode(query_dict))])
def _parse_error(self, str_as_xml):
"""
Helper function in order to get error message from an xml string.
In coming xml can be like this:
<?xml VERSION='1.0' encoding='UTF-8' standalone='yes'?>
<error>
<status>404</status>
<timestamp>1262186271064</timestamp>
<error-code>0000</error-code>
<message>[invalid.property.name]. Couldn't find property with name: first_name</message>
</_error>
"""
try:
xmlDocument = minidom.parseString(str_as_xml)
if len(xmlDocument.getElementsByTagName("error")) > 0:
error = xmlDocument.getElementsByTagName("message")
if error:
error = error[0]
return error.childNodes[0].nodeValue
return None
except OAuthError, detail:
# raise detail
raise OAuthError("Invalid XML String given: error: %s" % repr(detail))
def _create_oauth_header(self, query_dict):
header = 'OAuth realm="http://api.linkedin.com", '
header += ", ".join(['%s="%s"' % (k, self._quote(query_dict[k]))
for k in sorted(query_dict)])
return header
def _query_dict(self, additional = {}):
query_dict = {"oauth_consumer_key": self._api_key,
"oauth_nonce": self._generate_nonce(),
"oauth_signature_method": "HMAC-SHA1",
"oauth_timestamp": self._generate_timestamp(),
"oauth_version": self.VERSION
}
query_dict.update(additional)
return query_dict
def _do_normal_query(self, relative_url, body=None, method="GET", params=None):
method = method
query_dict = self._query_dict({"oauth_token" : self._access_token})
signature_dict = dict(query_dict)
if (params):
signature_dict.update(params)
query_dict["oauth_signature"] = self._calc_signature(self._get_url(relative_url),
signature_dict, self._access_token_secret, method, update=False)
if (params):
relative_url = "%s?%s" % (relative_url, self._urlencode(params))
response = self._https_connection(method, relative_url, query_dict, body)
if (response):
error = self._parse_error(response)
if error:
self._error = error
raise ConnectionError()
return response
def _check_tokens(self):
if self._access_token is None:
self._error = "There is no Access Token. Please perform 'access_token' method and obtain that token first."
raise OAuthError(self._error)
if self._access_token_secret is None:
self._error = "There is no Access Token Secret. Please perform 'access_token' method and obtain that token first."
raise OAuthError(self._error)
def _calc_key(self, token_secret):
key = self._quote(self._api_secret) + "&"
if (token_secret):
key += self._quote(token_secret)
return key
def _calc_signature(self, url, query_dict, token_secret, method = "GET", update=True):
query_string = self._quote(self._urlencode(query_dict))
signature_base_string = "&".join([self._quote(method), self._quote(url), query_string])
hashed = hmac.new(self._calc_key(token_secret), signature_base_string, sha)
signature = binascii.b2a_base64(hashed.digest())[:-1]
if (update):
query_dict["oauth_signature"] = signature
return signature
def _https_connection(self, method, relative_url, query_dict, body=None):
if (self._gae):
return self._https_connection_gae(method, relative_url, query_dict, body)
else:
return self._https_connection_regular(method, relative_url, query_dict, body)
def _https_connection_regular(self, method, relative_url, query_dict, body = None):
header = self._create_oauth_header(query_dict)
connection = None
try:
connection = httplib.HTTPSConnection(self.API_ENDPOINT)
connection.request(method, relative_url, body = body,
headers={'Authorization':header})
response = connection.getresponse()
if response is None:
self._error = "No HTTP response received."
raise ConnectionError()
return response.read()
finally:
if (connection):
connection.close()
def _https_connection_gae(self, method, relative_url, query_dict, body = None):
from google.appengine.api import urlfetch
if (method == "GET"):
method = urlfetch.GET
elif (method == "POST"):
method = urlfetch.POST
elif (method == "PUT"):
method = urlfetch.PUT
elif (method == "DELETE"):
method = urlfetch.DELETE
header = self._create_oauth_header(query_dict)
headers = {'Authorization':header}
if (body):
headers["Content-Type"] = "text/xml"
url = self._get_url(relative_url)
rpc = urlfetch.create_rpc(deadline=10.0)
urlfetch.make_fetch_call(rpc, url, method=method, headers=headers,
payload=body)
return rpc.get_result().content
########################
# END HELPER FUNCTIONS #
########################
|
{
"content_hash": "587065afb7d0f836724fd90847d8a065",
"timestamp": "",
"source": "github",
"line_count": 838,
"max_line_length": 403,
"avg_line_length": 41.99403341288783,
"alnum_prop": 0.5617061180415447,
"repo_name": "venkatesh22/python-linkedin",
"id": "e2174bc83e5276a2a304132fce8c52b74ca471c8",
"size": "35216",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "linkedin/linkedin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48097"
}
],
"symlink_target": ""
}
|
import numpy as np
from math import log
from sklearn.naive_bayes import MultinomialNB
from sklearn.utils.extmath import safe_sparse_dot
class FeatMultinomialNB(MultinomialNB):
"""A MultinomialNB classifier that can be trained using labeled features.
"""
def fit(self, X, Y, sample_weight=None, features=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
features : array-like, shape = [n_classes, n_features], optional
Boost for the prior probability of a feature given a class. For no
boost use the value alpha given on the initialization.
Returns
-------
self : object
Returns self.
"""
if features is not None:
self.alpha = features
self.instance_num = X.shape[0]
return_value = super(FeatMultinomialNB, self).fit(X, Y, sample_weight)
self._information_gain()
return return_value
def _count(self, X, Y):
super(FeatMultinomialNB, self)._count(X, Y)
# Number of instances with class j and presence of feature k
# shape class, feat.
self.count_feat_and_class = safe_sparse_dot(Y.T, (X > 0))
def _information_gain(self):
"""Calculates the information gain for each feature.
Stores the value in self.feat_information_gain
"""
prob_Ik1_and_class = self.count_feat_and_class / self.instance_num
prob_Ik1 = self.count_feat_and_class.sum(axis=0) / self.instance_num
term1 = (prob_Ik1_and_class *
((np.log(prob_Ik1_and_class) - np.log(prob_Ik1)).T -
self.class_log_prior_).T)
prob_Ik0_and_class = ((self.count_feat_and_class.T -
self.class_count_).T * -1 / self.instance_num)
term2 = (prob_Ik0_and_class *
((np.log(prob_Ik0_and_class) - np.log(1 - prob_Ik1)).T -
self.class_log_prior_).T)
self.feat_information_gain = (np.nan_to_num(term1).sum(axis=0) +
np.nan_to_num(term2).sum(axis=0))
def instance_proba(self, X):
"""Calculates the probability of each instance in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Returns
-------
array-like, shape = [n_samples]
"""
feat_prob = safe_sparse_dot(np.exp(self.class_log_prior_),
np.exp(self.feature_log_prob_)).T
instance_log_prob = safe_sparse_dot(X, np.log(feat_prob))
return np.exp(instance_log_prob)
|
{
"content_hash": "59856f4d987649b1df3013fa4301d913",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 78,
"avg_line_length": 38.4375,
"alnum_prop": 0.5778861788617886,
"repo_name": "mit0110/activepipe",
"id": "5e3629e48370dd88ddf89813cf404e8447523a86",
"size": "3075",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "featmultinomial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "56661"
}
],
"symlink_target": ""
}
|
'''EditorPage - one page in a CustomNotebook'''
import Tkinter as tk
import ttk
from dte_margin import DteMargin
from robotdte import RobotDTE
import codecs
import re
import os
import sys
import tkFont
import platform
from rwb import FONT_SCHEME, COLOR_SCHEME
import urllib2
# I hate to hard-code these, but I don't think there's a robot
# API I can use to get them.
AUTOMATIC_VARS = [ "${TEST NAME}", "${TEST STATUS}", "${TEST MESSAGE}",
"${PREV TEST NAME}", "${PREV TEST STATUS}",
"${PREV TEST MESSAGE}", "${SUITE NAME}",
"${SUITE SOURCE}", "${SUITE STATUS}", "${SUITE MESSAGE}",
"${OUTPUT FILE}", "${LOG FILE}", "${REPORT FILE}",
"${DEBUG FILE}", "${OUTPUT DIR}" ]
KEYWORD_SETTINGS = ["[Documentation]", "[Arguments]", "[Return]",
"[Teardown]", "[Timeout]"]
TESTCASE_SETTINGS = ["[Documentation]", "[Tags]", "[Setup]",
"[Teardown]", "[Template]", "[Timeout]"
"[Force Tags]", "[Default Tags]",
"[Test Setup]", "[Test Teardown]",
"[Test Template]", "[Test Timeout]"]
class EditorPage(tk.Frame):
'''This class represents one "page" in the editor notebook
Since Tkinter text widgets manage their data, this class
will not keep its own copy in a property in order to cut
down on memory usage.
'''
encoding = "ascii"
parent = None
notebook = None
is_uri = False
is_readonly = False
def __init__(self, parent, path=None, name=None, app=None):
self.parent = parent
self.app = app
self.path = path
self.name = name
self._loaded = False
if name is None and path is not None:
self.name = os.path.basename(path)
if path is not None:
if path.split(":", 1)[0].lower() in ("http", "https"):
self.is_uri = True
self.is_readonly = True
else:
self.path = os.path.abspath(self.path)
self._name_var = tk.StringVar()
self._name_var.trace("w", self._on_name_trace)
tk.Frame.__init__(self, parent)
self._create_widgets()
# self.configure(background=self.app.colors.background2)
# keep track of window height; when it changes, don't
# immediately update the line numbers
self._last_winfo_height = self.winfo_height()
# This arranges for the file to be loaded the first time
# the widget becomes visible. No point in doing it any
# sooner. This potentially greatly speeds up startup
# time when opening a lot of files.
self.bind("<Visibility>", self._on_visibility)
def _on_visibility(self, event):
if not self._loaded:
self.after(1, self.after_idle, self.load, self.path)
def _on_linenumber_control_click(self, event):
text_index = self.dte.index("@%s,%s" % (event.x, event.y))
self.dte.mark_set("click", "%s linestart" % text_index)
self.dte.tag_add("sel", "%s linestart" % text_index, "%s lineend+1c" % text_index)
def _on_linenumber_click(self, event):
try:
text_index = self.dte.index("@%s,%s" % (event.x, event.y))
self.dte.mark_set("click", "%s linestart" % text_index)
self.dte.tag_remove("sel", "1.0", "end")
self.dte.tag_add("sel", "%s linestart" % text_index, "%s lineend+1c" % text_index)
self.dte.mark_set("insert", "%s linestart" % text_index)
except Exception, e:
print "drat:", e
import sys; sys.stdout.flush()
def _on_linenumber_move(self, event):
try:
text_index = self.dte.index("@%s,%s" % (event.x, event.y))
self.dte.tag_remove("sel", "1.0", "end")
if self.dte.compare(text_index, ">", "click"):
self.dte.tag_add("sel", "click", "%s lineend+1c" % text_index)
else:
self.dte.tag_add("sel", "%s linestart" % text_index, "click lineend+1c")
self.dte.mark_set("insert", "%s lineend" % text_index)
except Exception, e:
print "drat:", e
def _on_name_trace(self, *args):
self.name = self._name_var.get()
self.event_generate("<<NameChanged>>")
def focus(self):
self.dte.focus_set()
def toggle_linenumbers(self, show=None):
if show is None:
show = not self.linenumbers.winfo_visible()
if show:
self.linenumbers.grid()
else:
self.linenumbers.grid_remove()
def get_selected_rows(self):
return self.dte.get_selected_rows()
def get_row(self, linenumber):
'''Return the row for the passed-in line number'''
return self.dte.get_row(linenumber)
def get_text(self):
'''Return all the text in the editor page'''
return self.dte.get(1.0, "end-1c")
def set_path(self, path):
'''Sets the path associated with this page'''
self.path = path
self.name = os.path.basename(path)
self.namepath.configure(text=path)
# I do not know why, but this code sometimes crashes on my box
# if I remove the following statement. WTF?
sys.stdout.flush()
self._name_var.set(self.name)
def _create_widgets(self):
self.nameframe = tk.Frame(self, background='white')
# self.nameentry = tk.Entry(self.nameframe, relief="flat",
self.nameentry = tk.Label(self.nameframe, relief="flat",
font=("Helvetica", 16, "normal"),
anchor="w",
# insertbackground="#ff0000",
# insertwidth=1,
# highlightthickness=0,
# textvariable = self._name_var)
)
self.close_button = tk.Button(self.nameframe, text="[x]",
borderwidth=1, relief="raised",
highlightthickness=0,
foreground="black",
command=lambda: self.dte.event_generate("<<Close>>"))
# it's busted; might as well disable it for now.
self.close_button.configure(state="disabled")
self.nameentry.configure(text=self.name)
# self.nameentry.insert(0, self.name)
# self.nameentry.pack(fill="both", expand="True")
self.namepath = tk.Label(self.nameframe,
text="<unsaved>",
anchor="sw", borderwidth=0, foreground="gray", background="white")
# background=core.colors.background3)
if self.path is not None:
self.namepath.configure(text=self.path)
# hr = tk.Frame(self.nameframe, background=core.colors.background2, borderwidth=0, height=1)
hr = tk.Frame(self.nameframe, borderwidth=0, height=1)
# self.namepath.pack(side="bottom", fill="x")
# hr.pack(side="bottom", fill="x", padx=8)
self.nameentry.grid(row=0, column=0, sticky="nsew")
self.close_button.grid(row=0, column=1, sticky="e", padx=4, ipadx=4)
hr.grid(row=1, column=0, columnspan=2, sticky="ew")
self.namepath.grid(row=2, column=0, columnspan=2, sticky="nswe")
self.nameframe.grid_columnconfigure(0, weight=1)
em = self.app.fonts.fixed.measure("M")
tabwidth = 2 # needs to be a user preference...
# wrapping doesn't play well with auto horizontal
# scrollbars... if we allow word wrapping we need to turn off
# the horizontal scrollbar...
wrap = "none"
self.dte = RobotDTE(self,
borderwidth=0,
insertbackground="#ff0000",
wrap=wrap,
insertwidth=1,
highlightthickness=0,
tabstyle="wordprocessor",
undo=True, autoseparators=True,
tabs = tabwidth*em,
font=self.app.fonts.fixed)
self.linenumbers = DteMargin(self, background="#f2f2f2",
borderwidth=0,
highlightthickness=0, width=4*em)
self.linenumbers.attach(self.dte)
self.configure(background=self.dte.cget("background"))
vsb = ttk.Scrollbar(self,
command=self.dte.yview,
orient="vertical")
hsb = ttk.Scrollbar(self,
command=self.dte.xview,
orient="horizontal")
filler = tk.Frame(self,
borderwidth=0, highlightthickness=0, background=self.nameframe.cget("background"))
self.dte.configure(xscrollcommand=hsb.set, yscrollcommand=self.OnYviewChanged)
self.nameframe.grid(row=0, column=0, sticky="nsew", columnspan=3)
self.dte.grid(row=1, column=1, sticky="nsew", padx=4, pady=4)
vsb.grid(row=1, column=2, sticky="ns", pady=(4,0))
hsb.grid(row=2, column=0, columnspan=2, sticky="ew")
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(1, weight=1)
self.vsb = vsb
self._define_tags()
self.dte.add_post_change_hook(self.OnDTEChange)
self.dte.bind("<<AutoComplete>>", self.on_autocomplete)
self.dte.bind("<*>", self.on_star)
self.dte.bind("<$>", self._on_dollar)
self.popup_menu = tk.Menu(self.dte, tearoff=False)
self.popup_menu.add_command(label="Cut", underline=2,
command=lambda: self.dte.event_generate("<<Cut>>"))
self.popup_menu.add_command(label="Copy", underline=0,
command=lambda: self.dte.event_generate("<<Copy>>"))
self.popup_menu.add_command(label="Paste", underline=0,
command=lambda: self.dte.event_generate("<<Paste>>"))
self.popup_menu.add_separator()
self.popup_menu.add_cascade(label="Tools", menu=self.app.tools_menu)
self.popup_menu.add_separator()
self.popup_menu.add_command(label="Close this file",
command=lambda: self.dte.event_generate("<<Close>>"))
# hmmm; on my mac the right button does a "<2>",
# but on windows it does a "<3>"
self.dte.bind("<<Popup>>", self.on_popup)
# I don't know why, but on the mac there's a ButtonRelease-2
# binding on something that seems to paste the selection.
# this turns that off.
self.dte.bind("<ButtonRelease-2>", lambda event: "break")
# self.notebook.add(self, text=os.path.basename(name))
# self.notebook.select(self)
def _compute_checksum(self):
text = self.dte.get(1.0, "end-1c")
try:
import zlib
encoded_text = text.encode(self.encoding)
self._checksum = zlib.adler32(encoded_text)
except Exception, e:
print "hey, computing the checksum failed:", e
def save(self):
self._checksum = self._compute_checksum()
# For saving we'll strip off all trailing whitespace and
# ensure the text ends with a newline.
text = self.dte.get(1.0, "end-1c").rstrip() + "\n"
if self.encoding == "ascii" and not self._is_ascii(text):
self.app.status_message("unicode detected; switching to utf-8")
self.encoding="utf-8-sig"
encoded_text = text.encode(self.encoding)
with open(self.path, "w") as f:
f.write(encoded_text)
def _is_ascii(self, s):
try:
s.decode('ascii')
return True
except UnicodeEncodeError:
return False
def get_text_widget(self):
'''Return the text widget associated with this page'''
return self.dte
def on_autocomplete(self, event):
line = self.dte.get("insert linestart", "insert lineend")
if line.startswith("*"):
# I'm not crazy about this; I don't like putting the
# "***" in the choices, but for now its necessary
# because the widget doesn't recognize the words
# between the leading "***" and trailing "***" as
# a cell -- meaning, the choice overrides the whole
# line.
self.dte.set_completion_choices(["*** Test Cases ***",
"*** Keywords ***",
"*** Settings ***",
"*** Variables ***"])
else:
try:
cell_contents = self.dte.get("current_cell.first", "current_cell.last")
cell_contents = cell_contents.lower()
except tk.TclError:
return
if cell_contents.startswith("["):
# I need to determine which table the cursor is in
# and make the list correct for the table...
meta = set(KEYWORD_SETTINGS + TESTCASE_SETTINGS)
choices = [s for s in meta if s.lower().startswith(cell_contents)]
self.dte.set_completion_choices(choices)
elif cell_contents.startswith("${"):
# HMMM. This fails completely if the user types something
# like ${foo}${bar}... in a perfect world the completion
# is on a word, but keywords can have spaces so we can't
# do that.
local_vars = re.findall(r'\${.*?}', self.dte.get(1.0, "insert"))
varnames = sorted(set(local_vars + AUTOMATIC_VARS))
choices = [s for s in varnames if s.lower().startswith(cell_contents.rstrip("}"))]
self.dte.set_completion_choices(choices)
else:
keywords = sorted(self.app.kwdb.get_keywords())
choices = [s for s in keywords if s.lower().startswith(cell_contents)]
self.dte.set_completion_choices(choices)
def on_popup(self, event=None):
self.popup_menu.tk_popup(event.x_root, event.y_root)
def _on_dollar(self, event):
'''If there is a selection, surround it with ${}'''
sel = self.dte.tag_ranges("sel")
if len(sel) > 0:
self.dte.insert(sel[1], "}")
self.dte.insert(sel[0], "${")
return "break"
def on_star(self, event):
'''If the user types '*' on an otherwise blank line, insert '*** ***'
then place cursor in the middle
'''
if self.dte.compare("insert linestart", "==", "insert lineend"):
# must be a new, blank line
self.dte.insert("insert", "*** ***\n| ")
self.dte.mark_set("insert", "insert linestart -1 line + 4c")
self.dte.see("insert+1line")
return "break"
def on_close(self):
print "closing!"
def load(self, path):
'''Load a file given by the filename'''
if self.path is None:
raise Exception("nothing to load")
self.dte.delete(1.0, "end")
data = ""
try:
if path.startswith("http:") or path.startswith("https:"):
f = urllib2.urlopen(self.path)
data = f.read().replace("\r\n", "\n")
f.close()
else:
if os.path.exists(self.path) or True:
f = open(self.path, "rU")
data = f.read().replace("\r\n", "\n")
f.close()
except Exception, e:
self.app.log.debug("error opening '%s': %s" % (self.path, str(e)))
# hmmm; this will cause problems if the very last line
# of a file ends in backspace-space. What's the
# likelihood of that happening?
data = data.rstrip()
if data.startswith(codecs.BOM_UTF8):
# N.B. visual studio creates utf-8 files with a
# BOM as the first character. We need to remove
# the BOM, otherwise it will appear as a garbage
# character in the text widget.
data = data.decode("utf-8-sig")
self.encoding="utf-8-sig"
else:
self.encoding="ascii"
self.dte.insert(1.0, data)
self.dte.edit_reset()
self.dte.mark_set("insert", 1.0)
self._checksum = self._compute_checksum()
self._loaded = True
def get_test_cases(self):
regions = self.get_table_regions("test cases")
testcases = []
for (start, end) in regions:
indexes = self.tk.call(str(self.dte), "search", "-all", "-regexp",
r'^ *\| *([^|\s].+)$', start, end)
for index in indexes:
line = self.dte.get(str(index), "%s lineend" % str(index))
testcases.append(line.strip("| "))
return testcases
def get_table_regions(self, table):
'''Return the range of lines for a robot table
'table' must be one of "Test Cases", "Keywords", "Variables" or "Settings"
Some rough timings show this only takes 1ms or so with a test
file with 2k lines. Nice.
'''
if table.lower() not in ("settings","test cases", "keywords", "variables"):
raise Exception("invalid table '%s': " % table +
"must be one of 'settings', " +
"'test cases', 'keywords' or 'variables'")
any_heading_pattern=r'^ *\*+[\* ]*(Test Cases?|(User )?Keywords?|Settings?|Variables?)[\* ]*$'
# N.B. this pattern assumes 'table' is plural (note how
# a '?' is appended immediately after the table name)
heading_pattern = r'^ *\*+ *%s?[ \*]*$' % table.lower()
result = []
end = "1.0"
while True:
start = self.dte.search(heading_pattern, end, stopindex="end", regexp=True, nocase=True)
if (start == ""): break
end = self.dte.search(any_heading_pattern, "%s lineend+1c" % start,
stopindex="end", regexp=True, nocase=True)
if end == "": end=self.dte.index("end")
result.append((start, end))
return result
def _define_tags(self):
FORESTGREEN="#228b22"
FIREBRICK="#b22222"
DARKBLUE="#00006f"
self.dte.tag_configure("name", foreground=FORESTGREEN,
font=self.app.fonts.fixed_bold)
self.dte.tag_configure("ellipsis", foreground="lightgray")
self.dte.tag_configure("heading", font=self.app.fonts.fixed_bold, background="gray")
self.dte.tag_configure("variable", foreground=DARKBLUE,
font=self.app.fonts.fixed_bold)
self.dte.tag_configure("noise", foreground="lightgray")
self.dte.tag_configure("comment", foreground=FIREBRICK)
self.dte.tag_configure("bold", font=self.app.fonts.fixed_bold)
# make sure this is last so it has higher priority than the
# other tags
self.dte.tag_configure("column_marker", foreground="lightgray")
# make sure the selection has highest precedence; otherwise, selecting blue
# text (assuming the selection is blue) obscurs the text.
self.dte.tag_raise("sel")
def OnYviewChanged(self, *args):
'''Handle scrolling of widget and updating line numbers
This gives dreadful performance when the user resizes the window,
since each pixel change in the size causes the line numbers to
recalculate. I need to figure out how to optimize for that case...
'''
result = self.vsb.set(*args)
self.linenumbers.refresh()
return result
def OnDTEChange(self, *args):
'''Called after a material change to the text widget contents
First element of *args will be the text widget commannd
that caused the change ("insert", "delete", or "replace")
the rest of the arguments will vary depending on that
command
'''
# make sure there is always a newline at the end (well, two
# newlines since tk always adds one). Why? So the user
# always has a blank line on which to click. It's just a
# little usability thing
if (self.dte.get("end-2c", "end") != "\n\n"):
insert = self.dte.index("insert")
self.dte.insert("end", "\n")
self.dte.mark_set("insert", insert)
# remove all special tags
block_start = self.dte.index("start_change linestart")
block_end = self.dte.index("end_change lineend +1c")
for tag in ("name", "variable", "noise","heading",
"comment", "keyword", "bold", "cell", "ellipsis"):
self.dte.tag_remove(tag, block_start, block_end)
# now add all the special highlighting
self.dte.highlight_pattern(r'^\*+\s*(Test Cases?|(User\s+)?Keywords?|Settings?|Variables?)\s*\**',
"heading", block_start, block_end, whole_lines = True)
self.dte.highlight_pattern(r'\$\{.*?\}', "variable", block_start, block_end)
self.dte.highlight_pattern(r'^\s*\*+[^\*]+\*+\s*$', "bold", block_start, block_end)
self.dte.highlight_pattern(r'^\s*#[^\n]*?$', "comment", block_start, block_end)
self.dte.highlight_pattern(r'\|\s+#[^\n]*?$', "comment", block_start, block_end)
self.dte.highlight_pattern('\|', "column_marker", block_start, block_end)
self.dte.highlight_pattern(" \.\.\. ", "ellipsis", block_start, block_end)
# only do this for test cases
# TODO: make sure we only do this in a test case table. ie: don't
# mark things with "name" if they are in the settings table.
name_pattern = r'^\|\s+[^\|\s][^\n]*?$'
for (start, end) in self.get_table_regions("test cases"):
self.dte.highlight_pattern(name_pattern, "name", start, end)
# ... and keywords
for (start, end) in self.get_table_regions("keywords"):
self.dte.highlight_pattern(name_pattern, "name", start, end)
number_of_lines = int(float(self.dte.index("end")))-1
self.dte.tag_configure("cell", background="pink")
start_line = int(float(block_start))
end_line = int(float(block_end))
self.event_generate("<<FileChanged>>", data="this is bogus")
|
{
"content_hash": "a1ae823201cbce156c847799412f4bd5",
"timestamp": "",
"source": "github",
"line_count": 514,
"max_line_length": 108,
"avg_line_length": 44.233463035019454,
"alnum_prop": 0.5476776917663617,
"repo_name": "boakley/robotframework-workbench",
"id": "088f6827f0e3404e646545bccd285cf36dbd31a7",
"size": "22736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rwb/editor/editor_page.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "439210"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from gaebusiness.business import Command
from gaebusiness.gaeutil import ModelSearchCommand
from gaecookie import facade as cookie_facade
from gaegraph.business_base import NodeSearch
from gaepermission import inspector
from gaepermission.base_commands import UpdateUserGroups, GetMainUserByEmail, SaveUserCmd
from gaepermission.base_commands2 import LoginCheckingEmail
from gaepermission.facebook.commands import GetFacebookApp, SaveOrUpdateFacebookApp, LogFacebookUserIn, FetchFacebook
from gaepermission.google.commands import GoogleLogin
from gaepermission.model import MainUser
from gaepermission.passwordless.commands import SaveOrUpdateApp, GetApp, SengLoginEmail, Login
from tekton import router
USER_COOKIE_NAME = 'userck'
def save_user_cmd(email, name=None, groups=None, locale='en_US', timezone='US/Eastern'):
"""
Command to save a user
:param email: user email
:param name: user name
:param groups: user permission groups
:return: A command that validate date and save the user
"""
if name is None:
name = email
if groups is None:
groups = []
return SaveUserCmd(name=name, email=email, groups=groups, locale=locale, timezone=timezone)
def get_user_by_email(email):
"""
Returns a command that find MainUser by her email address
:param email: email to use in search
:return: Command that look for user on DB
"""
return GetMainUserByEmail(email)
def web_path_security_info():
"""
Returns a generator that returns all paths from the application if information about groups and csrf security
"""
return inspector.web_paths_security_info(router.package_base)
def logout(response):
"""
Returns a command that log the user out, removing her id from cookie
"""
return cookie_facade.delete_cookie(response, USER_COOKIE_NAME)
def logged_user(request):
"""
Returns a command that retrieves the current logged user based on secure cookie
If there is no logged user, the result from command is None
"""
dct = cookie_facade.retrive_cookie_data(request, USER_COOKIE_NAME).execute().result
if dct is None:
return Command()
return NodeSearch(dct['id'])
def login_google(google_user, response):
"""
Google user must be the user returned from get_current_user from users module provided by App Engine
Returns a command that log user in based on her google account credentials.
The logged user (MainUser) is provides on result or None if the user is not logged with her Google Account
"""
return GoogleLogin(google_user, response, USER_COOKIE_NAME)
def login_passwordless(ticket, response, detail_url='https://pswdless.appspot.com/rest/detail'):
"""
Log user in using Passwordless service
:param ticket: ticket returned from Passwordless
:param response: Response object from webapp2
:param detail_url: url to check ticket and user data
:return: a Command that log user in when executed
"""
return Login(ticket, response, USER_COOKIE_NAME, detail_url)
def login_checking_email(pending_id, ticket, response, detail_url='https://pswdless.appspot.com/rest/detail'):
"""
Log user in using Passwordless service
:param pending_id: PendingExternalToMainUser's id
:param ticket: ticket returned from Passwordless
:param response: Response object from webapp2
:param detail_url: url to check ticket and user data
:return: a Command that log user in when executed
"""
return LoginCheckingEmail(pending_id, ticket, response, USER_COOKIE_NAME, detail_url)
def update_user_groups(user_id, groups):
"""
Returns a command that updates user's groups of respective user_id.
"""
return UpdateUserGroups(user_id, groups)
def find_users_by_email_starting_with(email_prefix=None, cursor=None, page_size=30):
"""
Returns a command that retrieves users by its email_prefix, ordered by email.
It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in
a next call. It is provided in cursor attribute from command.
"""
email_prefix = email_prefix or ''
return ModelSearchCommand(MainUser.query_email_starts_with(email_prefix),
page_size, cursor, cache_begin=None)
def find_users_by_email_and_group(email_prefix=None, group=None, cursor=None, page_size=30):
"""
Returns a command that retrieves users by its email_prefix, ordered by email and by Group.
If Group is None, only users without any group are going to be searched
It returns a max number of users defined by page_size arg. Next result can be retrieved using cursor, in
a next call. It is provided in cursor attribute from command.
"""
email_prefix = email_prefix or ''
return ModelSearchCommand(MainUser.query_email_and_group(email_prefix, group),
page_size, cursor, cache_begin=None)
def send_passwordless_login_link(email, return_url, lang='en_US', url_login='https://pswdless.appspot.com/rest/login'):
"""
:param app_id: The Passwordless app's id
:param token: The Passwordless app's token
:param return_url: The url user will be redirected after clicking login link
:return: command that communicate with passsworless to sent the email
"""
return SengLoginEmail(email, return_url, lang, url_login)
def save_or_update_passwordless_app_data(id=None, token=None):
"""
:param id: The App's id
:param token: The App's token
:return: a command that save or update existing Passwordless App Data
See https://pswdless.appspot.com/api#register-sites
"""
return SaveOrUpdateApp(id, token)
def get_passwordless_app_data():
"""
:return: a command that returns the Passwordless App Data from db
"""
return GetApp()
def get_facebook_app_data():
"""
:return: a command that returns the Facebook App Data from db
"""
return GetFacebookApp()
def save_or_update_facebook_app_data(id=None, token=None):
"""
:param id: The App's id
:param token: The App's token
:return: a command that save or update existing Facebook App Data
See https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow/v2.0
"""
return SaveOrUpdateFacebookApp(id, token)
def login_facebook(token, response):
"""
:param token: facebook request token
:param response: http response from webapp2
:return: a command that log the facebook user in
"""
return LogFacebookUserIn(token, response, USER_COOKIE_NAME)
|
{
"content_hash": "e61eca31d3e32641125ff1df4f45c4b1",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 119,
"avg_line_length": 35.854838709677416,
"alnum_prop": 0.7192982456140351,
"repo_name": "renzon/gaepermission",
"id": "ab1b83002bbd832eacd2c5dbaa788e639867068f",
"size": "6693",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "gaepermission/facade.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69266"
}
],
"symlink_target": ""
}
|
from django.conf.urls import include, url
from django.contrib import admin
admin.autodiscover()
urlpatterns = [
url(r"^admin/", admin.site.urls),
url(r"^edge/", include("edge.urls")),
]
|
{
"content_hash": "99f5f22f069a4f9c4c61b349f3e30635",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 41,
"avg_line_length": 21.77777777777778,
"alnum_prop": 0.6887755102040817,
"repo_name": "ginkgobioworks/edge",
"id": "a770d850e9420c4e0f6a3b988f49eee9573b31ef",
"size": "196",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/server/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2553"
},
{
"name": "Dockerfile",
"bytes": "1203"
},
{
"name": "HTML",
"bytes": "32885"
},
{
"name": "JavaScript",
"bytes": "27599"
},
{
"name": "Makefile",
"bytes": "3665"
},
{
"name": "Python",
"bytes": "826040"
},
{
"name": "Shell",
"bytes": "227"
}
],
"symlink_target": ""
}
|
"""A script which generates DHCP configuration for hosts matching a regex.
Usage:
find_vms_by_regex.py <regex> <compute_resource>
e.g.
find_vms_by_regex.py 'ssi2+' 'Online Engineering'
"""
import re
import sys
from psphere.client import Client
from psphere.managedobjects import ComputeResource
client = Client()
vm_regex = sys.argv[1]
p = re.compile(vm_regex)
compute_resource = sys.argv[2]
cr = ComputeResource.get(client, name=compute_resource)
cr.resourcePool.preload("vm", properties=["name"])
for vm in sorted(cr.resourcePool.vm):
if p.match(vm.name) is None:
continue
print(vm.name)
client.logout()
|
{
"content_hash": "f8cbaa05a2843ce18bb286ef347a4e6a",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 74,
"avg_line_length": 22.75,
"alnum_prop": 0.7237048665620094,
"repo_name": "intr1nsic/omoto",
"id": "123194624d5f5b5b5151afaeb4c52cdc252168f8",
"size": "655",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/find_vms_by_regex.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "180119"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow.ops.argmax_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class ArgMaxTest(test.TestCase):
def _testArg(self,
method,
x,
axis,
expected_values,
use_gpu=False,
expected_err_re=None):
with self.session(use_gpu=use_gpu):
ans = method(x, axis=axis)
if expected_err_re is None:
tf_ans = self.evaluate(ans)
# Defaults to int64 output.
self.assertEqual(np.int64, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
self.assertShapeEqual(expected_values, ans)
else:
with self.assertRaisesOpError(expected_err_re):
self.evaluate(ans)
def _testBothArg(self,
method,
x,
axis,
expected_values,
expected_err_re=None):
self._testArg(method, x, axis, expected_values, True, expected_err_re)
# Compilation time is too large with XLA/CPU autojit.
if not test_util.is_xla_enabled():
self._testArg(method, x, axis, expected_values, False, expected_err_re)
def _testBasic(self, dtype):
x = np.arange(200, dtype=dtype)
np.random.shuffle(x)
# Check that argmin and argmax match numpy along the primary axis
self._testBothArg(math_ops.argmax, x, 0, x.argmax())
self._testBothArg(math_ops.argmin, x, 0, x.argmin())
def _testDim(self, dtype):
shape = (3, 2, 4, 5, 6, 3, 7)
x = np.arange(functools.reduce(lambda x, y: x * y, shape), dtype=dtype)
np.random.shuffle(x)
x = x.reshape(shape)
# Check that argmin and argmax match numpy along all axes
for axis in range(-7, 7):
self._testBothArg(math_ops.argmax, x, axis, x.argmax(axis))
self._testBothArg(math_ops.argmin, x, axis, x.argmin(axis))
def testFloat(self):
self._testBasic(np.float32)
self._testDim(np.float32)
def testFloatInt32Output(self):
x = np.asarray(100 * np.random.randn(200), dtype=np.float32)
expected_values = x.argmax()
with self.session(use_gpu=True):
ans = math_ops.argmax(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
# The values are equal when comparing int32 to int64 because
# the values don't have a range that exceeds 32-bit integers.
self.assertAllEqual(tf_ans, expected_values)
expected_values = x.argmin()
with self.session(use_gpu=True):
ans = math_ops.argmin(x, axis=0, output_type=dtypes.int32)
tf_ans = self.evaluate(ans)
self.assertEqual(np.int32, tf_ans.dtype)
self.assertAllEqual(tf_ans, expected_values)
def testDouble(self):
self._testBasic(np.float64)
self._testDim(np.float64)
def testInt32(self):
self._testBasic(np.int32)
self._testDim(np.int32)
def testInt64(self):
self._testBasic(np.int64)
self._testDim(np.int64)
def testEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
with self.assertRaisesOpError(
r"Reduction axis 0 is empty in shape \[0\]"):
op([], 0).eval()
@test_util.run_deprecated_v1
def testDefaultAxis(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ans = op([1]).eval()
self.assertAllEqual(ans, 0)
@test_util.run_deprecated_v1
def testOutputEmpty(self):
with self.cached_session():
for op in math_ops.argmin, math_ops.argmax:
ret = op(array_ops.zeros(shape=[1, 0, 2]), axis=-1).eval()
self.assertEqual(ret.shape, (1, 0))
if __name__ == "__main__":
test.main()
|
{
"content_hash": "0c272fdbba71ea82737f5603c5021626",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 77,
"avg_line_length": 32.49193548387097,
"alnum_prop": 0.6383718044179697,
"repo_name": "renyi533/tensorflow",
"id": "86d2941b8d3fb00186190b34c180401bdaca7d07",
"size": "4718",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/argmax_op_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "31572"
},
{
"name": "Batchfile",
"bytes": "55269"
},
{
"name": "C",
"bytes": "903309"
},
{
"name": "C#",
"bytes": "8562"
},
{
"name": "C++",
"bytes": "82507951"
},
{
"name": "CMake",
"bytes": "6967"
},
{
"name": "Dockerfile",
"bytes": "113964"
},
{
"name": "Go",
"bytes": "1871425"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "988219"
},
{
"name": "Jupyter Notebook",
"bytes": "550861"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "2073744"
},
{
"name": "Makefile",
"bytes": "66796"
},
{
"name": "Objective-C",
"bytes": "116558"
},
{
"name": "Objective-C++",
"bytes": "319021"
},
{
"name": "PHP",
"bytes": "4236"
},
{
"name": "Pascal",
"bytes": "318"
},
{
"name": "Pawn",
"bytes": "20422"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "37811412"
},
{
"name": "RobotFramework",
"bytes": "1779"
},
{
"name": "Roff",
"bytes": "2705"
},
{
"name": "Ruby",
"bytes": "7464"
},
{
"name": "SWIG",
"bytes": "6846"
},
{
"name": "Shell",
"bytes": "696058"
},
{
"name": "Smarty",
"bytes": "35725"
},
{
"name": "Starlark",
"bytes": "3655758"
},
{
"name": "Swift",
"bytes": "62814"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from sqlalchemy.ext.declarative import declared_attr
from ggrc import db
from ggrc.models.associationproxy import association_proxy
from ggrc.models import mixins
from ggrc.models.object_document import Documentable
from ggrc.models.object_owner import Ownable
from ggrc.models.reflection import PublishOnly
from ggrc.models.relationship import Relatable
from ggrc.models.track_object_state import HasObjectState
class Risk(HasObjectState, mixins.CustomAttributable, mixins.Stateful,
Relatable, Documentable, mixins.Described, Ownable,
mixins.WithContact, mixins.Titled, mixins.Timeboxed,
mixins.Slugged, mixins.Base, db.Model):
__tablename__ = 'risks'
VALID_STATES = [
'Draft',
'Final',
'Effective',
'Ineffective',
'Launched',
'Not Launched',
'In Scope',
'Not in Scope',
'Deprecated',
]
# Overriding mixin to make mandatory
@declared_attr
def description(cls):
return mixins.deferred(db.Column(db.Text, nullable=False), cls.__name__)
risk_objects = db.relationship(
'RiskObject', backref='risk', cascade='all, delete-orphan')
objects = association_proxy('risk_objects', 'object', 'RiskObject')
_publish_attrs = [
'risk_objects',
PublishOnly('objects'),
]
_aliases = {
"contact": "Contact",
"secondary_contact": None,
}
|
{
"content_hash": "ca79cbbe55879b7f5752e64c9094f2a6",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 28.604166666666668,
"alnum_prop": 0.6904588492352512,
"repo_name": "hasanalom/ggrc-core",
"id": "687b18b0f0ef286fd3f8f1e15f27585ea9bf60ce",
"size": "1817",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/ggrc_risks/models/risk.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "235548"
},
{
"name": "Cucumber",
"bytes": "140478"
},
{
"name": "HTML",
"bytes": "943449"
},
{
"name": "JavaScript",
"bytes": "1205686"
},
{
"name": "Makefile",
"bytes": "5936"
},
{
"name": "Mako",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "1874549"
},
{
"name": "Ruby",
"bytes": "1496"
},
{
"name": "Shell",
"bytes": "11719"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://repo.maven.apache.org/maven2'
_GROUP_NAME = 'net/sf/kxml'
_MODULE_NAME = 'kxml2'
_FILE_EXT = 'jar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
|
{
"content_hash": "401207509789c0e6b67fcc4939a8968f",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 31.285714285714285,
"alnum_prop": 0.5917808219178082,
"repo_name": "chromium/chromium",
"id": "13a251b38a8f9bc9b976d8a29352784b286d2ed8",
"size": "2457",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "third_party/android_deps/libs/net_sf_kxml_kxml2/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import optparse
import os
import re
import shutil
import subprocess
import sys
import common
import pngdiffer
import suppressor
# Nomenclature:
# x_root - "x"
# x_filename - "x.ext"
# x_path - "path/to/a/b/c/x.ext"
# c_dir - "path/to/a/b/c"
def test_one_file(input_filename, source_dir, working_dir,
pdfium_test_path, image_differ):
input_path = os.path.join(source_dir, input_filename)
pdf_path = os.path.join(working_dir, input_filename)
try:
shutil.copyfile(input_path, pdf_path)
sys.stdout.flush()
subprocess.check_call([pdfium_test_path, '--png', pdf_path])
except subprocess.CalledProcessError as e:
print "FAILURE: " + input_filename + "; " + str(e)
return False
if image_differ.HasDifferences(input_filename, source_dir, working_dir):
return False
return True
def main():
parser = optparse.OptionParser()
parser.add_option('--build-dir', default=os.path.join('out', 'Debug'),
help='relative path from the base source directory')
options, args = parser.parse_args()
finder = common.DirectoryFinder(options.build_dir)
pdfium_test_path = finder.ExecutablePath('pdfium_test')
if not os.path.exists(pdfium_test_path):
print "FAILURE: Can't find test executable '%s'" % pdfium_test_path
print "Use --build-dir to specify its location."
return 1
working_dir = finder.WorkingDir(os.path.join('testing', 'corpus'))
if not os.path.exists(working_dir):
os.makedirs(working_dir)
test_suppressor = suppressor.Suppressor(finder)
image_differ = pngdiffer.PNGDiffer(finder)
# test files are under .../pdfium/testing/corpus.
failures = []
walk_from_dir = finder.TestingDir('corpus');
input_file_re = re.compile('^[a-zA-Z0-9_.]+[.]pdf$')
for source_dir, _, filename_list in os.walk(walk_from_dir):
for input_filename in filename_list:
if input_file_re.match(input_filename):
input_path = os.path.join(source_dir, input_filename)
if os.path.isfile(input_path):
if test_suppressor.IsSuppressed(input_filename):
continue
if not test_one_file(input_filename, source_dir, working_dir,
pdfium_test_path, image_differ):
failures.append(input_path)
if failures:
print '\n\nSummary of Failures:'
for failure in failures:
print failure
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "3e13053ffd1cc3f65ed9a18582e215c7",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 74,
"avg_line_length": 32.1578947368421,
"alnum_prop": 0.662847790507365,
"repo_name": "pwaller/pdfium",
"id": "1876581e4069e4332a4564eff9fe29f3658de1b6",
"size": "2627",
"binary": false,
"copies": "4",
"ref": "refs/heads/get-origin",
"path": "testing/tools/run_corpus_tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8560543"
},
{
"name": "C++",
"bytes": "10431841"
},
{
"name": "CMake",
"bytes": "4070"
},
{
"name": "Makefile",
"bytes": "855"
},
{
"name": "Objective-C",
"bytes": "30562"
},
{
"name": "Python",
"bytes": "64925"
}
],
"symlink_target": ""
}
|
import maya.cmds as mc
import maya.mel as mel
samplerInfo = mc.shadingNode("samplerInfo", asUtility=True)
extraTex = mel.eval("vrayAddRenderElement ExtraTexElement;")
mc.connectAttr("%s.pointWorld" %samplerInfo, "%s.vray_texture_extratex" %extraTex, force = True)
mc.setAttr("%s.vray_considerforaa_extratex" %extraTex, 0)
mc.setAttr("%s.vray_filtering_extratex" %extraTex, 0)
name = "PWORLD"
extraTex = mc.rename(extraTex, name)
mc.setAttr("%s.vray_name_extratex" %extraTex, name, type="string")
mc.setAttr("%s.vray_explicit_name_extratex" %extraTex, name, type="string")
|
{
"content_hash": "7df21f1ccd2414f3b23642b703daddb3",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 96,
"avg_line_length": 41,
"alnum_prop": 0.7526132404181185,
"repo_name": "hibernationTheory/maya_python_misc_scripts",
"id": "ff2bfe38837f5c2bff285a0e01e01e027643d050",
"size": "574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vray_misc/vray_utility-pworld.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7488"
}
],
"symlink_target": ""
}
|
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine, Column, Integer, ForeignKey, String
from sqlalchemy.orm import sessionmaker
import os
# Database connection
_DATABASE = 'sqlite:///db.sqlite3'
_DEBUG = False
# ORM base
_Base = declarative_base()
class Users(_Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
token = Column(String)
class Images(_Base):
__tablename__ = 'images'
id = Column(Integer, primary_key=True)
elo = Column(Integer, nullable=False, default=1000)
class Matches(_Base):
__tablename__ = 'matches'
id = Column(Integer, primary_key=True)
user = Column(Integer, ForeignKey(Users.id), nullable=False)
winner = Column(Integer, ForeignKey(Images.id), nullable=False)
loser = Column(Integer, ForeignKey(Images.id), nullable=False)
# Connect to database
_engine = create_engine(_DATABASE, echo=_DEBUG, convert_unicode=True)
_session_factory = sessionmaker(bind=_engine)
Session = _session_factory()
# Initialize database if it doesn't exist
if not os.path.exists('db.sqlite3'):
_Base.metadata.create_all(_engine)
with open('list') as f:
ids = [int(line[:-5]) for line in f if line]
Session.bulk_save_objects(Images(id=id) for id in set(ids))
Session.commit()
|
{
"content_hash": "4c0908f5db436c6746305b2b9c610ca1",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 73,
"avg_line_length": 28.58695652173913,
"alnum_prop": 0.7019011406844107,
"repo_name": "Atilla106/facemash",
"id": "dd457bcb9524153abfe5c33f05069b5ecb9ca821",
"size": "1315",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "server/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "622"
},
{
"name": "HTML",
"bytes": "22100"
},
{
"name": "JavaScript",
"bytes": "9390"
},
{
"name": "Python",
"bytes": "4899"
}
],
"symlink_target": ""
}
|
'''
Created on 25 Mar 2016
@author: bogdan
python3 required for operation -- due to Unicode issues
'''
import sys, re, os
import copy
# from p010graphems.levenshtein import levenshtein
from collections import defaultdict
from collections import Counter
class clGraphonolev(object):
'''
class computes Levenshtein distance for graphonological representations
the purpose is to plug the module into external programmes to compute modified variants of Lev edit distance
'''
def __init__(self, Debug = False, DebugFile = 'md060graphonolev-debug.txt', DebugMode = 'a'):
'''
Constructor
'''
# self.DFeatures = {}
self.readFeat()
self.BDebug = False
if Debug == True:
self.BDebug = True
self.FDebug = open(DebugFile, DebugMode)
def readFeat(self):
'''
reading a table of phonological features for each letter, only needed for feature-based levenstein distance calculations
'''
self.DGraphemes = defaultdict(list) # the main dictionary of the project: mapping: grapheme, language --> feature sets
FFeatures = open('md060graphonoLev-phonetic-features.tsv', 'rU')
for SLine in FFeatures:
if re.match('#', SLine):
continue
SLine = SLine.rstrip()
LLine = re.split('\t', SLine)
SGrapheme = LLine[0]
SLanguage = LLine[1]
LFeatures = LLine[2:]
LLanguages = re.split(';', SLanguage)
# main representation mapping: create entries for all respective languages
for lang in LLanguages:
self.DGraphemes[(lang, SGrapheme)] = LFeatures
# debugging, can be removed...
'''
FDebug.write('%(lang)s, %(SGrapheme)s, \n' % locals())
for el in LFeatures:
FDebug.write('\t%(el)s\n' % locals())
'''
def str2Features(self, SWord, SLangID):
LGraphFeat = [] # list of tuples: character + list - for each character in the word we get feature list
LWordChars = list(SWord)
for ch in LWordChars:
# FDebug.write('%(SLangID)s, %(ch)s\t' % locals())
try:
LFeatures = self.DGraphemes[(SLangID, ch)]
LGraphFeat.append((ch, LFeatures)) # data structure for LGraphFeat - list of graphemic features
# FDebug.write('features: %(LFeatures)s\n' % locals())
except:
# FDebug.write('no features found\n')
sys.stderr.write('no features found\n')
return LGraphFeat # return list of lists
def compareGraphFeat(self, LGraphFeatA, LGraphFeatB):
# works for pairs of characters (their feature lists).
# Prec, Rec, FMeasure = (0, 0, 0)
# IOverlap = 0
ILenA = len(LGraphFeatA)
ILenB = len(LGraphFeatB)
a_multiset = Counter(LGraphFeatA)
b_multiset = Counter(LGraphFeatB)
overlap = list((a_multiset & b_multiset).elements())
IOverlap = len(overlap)
# a_remainder = list((a_multiset - b_multiset).elements())
# b_remainder = list((b_multiset - a_multiset).elements())
# Precision of List A:
try:
Prec = IOverlap / ILenA
Rec = IOverlap / ILenB
FMeasure = (2 * Prec * Rec) / (Prec + Rec)
except:
Prec, Rec, FMeasure = (0, 0, 0)
return FMeasure
def computeLevenshtein(self, SW1, SW2, SLangID1, SLangID2):
'''
converts character string to two lists of two two tuples : (character , phonological feature list)
'''
s1 = self.str2Features(SW1, SLangID1)
s2 = self.str2Features(SW2, SLangID2)
l1 = len(s1)
l2 = len(s2)
# lAve = (l1 + l2) / 2 # maximum for edit distance ?
lAve = max(l1, l2)
lAveFeats1 = 0 # number of features in each word
lAveFeats2 = 0
for (ch, el) in s1:
if self.BDebug == True:
SEl = str(el)
self.FDebug.write('%(ch)s\t%(SEl)s\n' % locals())
lAveFeats1 += len(el)
for (ch, el) in s2:
if self.BDebug == True:
SEl = str(el)
self.FDebug.write('%(ch)s\t%(SEl)s\n' % locals())
lAveFeats2 += len(el)
lAveFeats = (lAveFeats1 + lAveFeats2) / 2 # average number of features per two words
matrix = [list(range(l1 + 1))] * (l2 + 1)
matrix0 = copy.deepcopy(matrix)
for zz in range(l2 + 1):
matrix[zz] = list(range(zz,zz + l1 + 1))
matrix0[zz] = copy.deepcopy(matrix[zz])
for zz in range(0,l2):
for sz in range(0,l1):
# here: 1. compare sets of features; add the minimal substitution score here...
# calculate P, R, F-measure of the feature sets for each symbol, report F-measure:
# print(str(s1[sz]) + '\t' + str(s2[zz]))
(ch1, LFeat1) = s1[sz]
(ch2, LFeat2) = s2[zz]
# FMeasure = self.compareGraphFeat(s1[sz], s2[zz])
FMeasure = self.compareGraphFeat(LFeat1, LFeat2)
OneMinusFMeasure = 1 - FMeasure
# print('FMeasure ' + str(FMeasure))
# if F-Measure = 1 then feature vectors are identical; we need to subtract it from 1 (at the end):
# matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz] + 1)
# Main work is here: # experimental question:
matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 1, matrix[zz][sz+1] + 1, matrix[zz][sz] + OneMinusFMeasure)
# matrix[zz+1][sz+1] = min(matrix[zz+1][sz] + 0.4, matrix[zz][sz+1] + 0.4, matrix[zz][sz] + OneMinusFMeasure)
# insertion cost adjustment -- revert to 1 or lowering to 0.4 ?
# now classical levenshtein distance
# if s1[sz] == s2[zz]:
if ch1 == ch2:
matrix0[zz+1][sz+1] = min(matrix0[zz+1][sz] + 1, matrix0[zz][sz+1] + 1, matrix0[zz][sz])
else:
matrix0[zz+1][sz+1] = min(matrix0[zz+1][sz] + 1, matrix0[zz][sz+1] + 1, matrix0[zz][sz] + 1)
# print("That's the Levenshtein-Matrix:")
# self.printMatrix(matrix)
Levenshtein0 = matrix0[l2][l1] # classical Levenshtein distance
Levenshtein1 = matrix[l2][l1]
# debug:
if self.BDebug == True:
self.printMatrix(matrix0)
self.printMatrix(matrix)
try:
Levenshtein0Norm = Levenshtein0 / lAve
except:
Levenshtein0Norm = 1
try:
# Levenshtein1Norm = Levenshtein1 / lAveFeats
Levenshtein1Norm = Levenshtein1 / lAve
except:
Levenshtein1Norm = 1
# sys.stderr.write('%(SW1)s, %(SW2)s, \n\t%(s1)s\n\t%(s2)s\n\t%(Levenshtein1).3f\n\t%(lAveFeats)\n\n' % locals())
try:
sys.stderr.write('%(SW1)s\n' % locals())
except:
sys.stderr.write('cannot write\n')
try:
sys.stderr.write('%(SW2)s\n' % locals())
except:
sys.stderr.write('cannot write\n')
try:
sys.stderr.write('%(s1)s\n' % locals())
except:
sys.stderr.write('cannot write s1\n')
try:
sys.stderr.write('%(s2)s\n' % locals())
except:
sys.stderr.write('cannot write s2\n')
return (Levenshtein0, Levenshtein1, Levenshtein0Norm, Levenshtein1Norm)
def printMatrix(self, m):
self.FDebug.write(' \n')
for line in m:
spTupel = ()
breite = len(line)
for column in line:
spTupel = spTupel + (column, )
self.FDebug.write(" %3.1f "*breite % spTupel)
self.FDebug.write('\n')
# using the class: initialising and computing Lev distances
if __name__ == '__main__':
FInput = open(sys.argv[1], 'rU')
SLangID1 = sys.argv[2]
SLangID2 = sys.argv[3]
SDebug = sys.argv[4]
if SDebug == 'Debug':
BDebug = True
else:
BDebug = False
OGraphonolev = clGraphonolev(BDebug)
# OGraphonolev.readFeat()
for SLine in FInput:
SLine = SLine.rstrip()
try:
(SW1, SW2) = re.split('\t', SLine, 1)
except:
SW1 = '' ; SW2 = ''
# FDebug.write('SW1 = %(SW1)s; SLangID1 = %(SLangID1)s\n' % locals())
# LGraphFeat1 = OGraphonolev.str2Features(SW1, SLangID1)
# FDebug.write('SW2 = %(SW2)s; SLangID2 = %(SLangID2)s\n' % locals())
# LGraphFeat2 = OGraphonolev.str2Features(SW2, SLangID2)
(Lev0, Lev1, Lev0Norm, Lev1Norm) = OGraphonolev.computeLevenshtein(SW1, SW2, SLangID1, SLangID2)
sys.stdout.write('%(SW1)s, %(SW2)s, %(Lev0)d, %(Lev1).4f, %(Lev0Norm).4f, %(Lev1Norm).4f\n' % locals())
|
{
"content_hash": "74786f511ec41e99596630b2e8a15171",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 122,
"avg_line_length": 31.17142857142857,
"alnum_prop": 0.6460652088516433,
"repo_name": "bogdanbabych/morphosyntax",
"id": "46511244cc2d58022c8d0b95e64ffbda576f3def",
"size": "7637",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/s010cognatematch/md060graphonoLev.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "179613"
},
{
"name": "Shell",
"bytes": "47106"
},
{
"name": "TeX",
"bytes": "6441"
}
],
"symlink_target": ""
}
|
__author__ = 'Asus'
import sys
import getopt
from QuestionsHandling.QuestionBase import QuestionBase
from Classifiers.GloveCenteredESLExtendedClassifier import GloveClassifier
from Utils.utilities import load_stf
from Utils.retrofitNew_gloveInstance import retrofit_new
from Utils.retrofitNew_gloveInstance import read_lexicon
import numpy as np
help_message = '''
$ python questionAnswering.py -v <vectorsFile> -q <questionsFile> -d <dimensions> [-o outputFile] [-h]
-v or --vectors to specify path to the word vectors input file in Glove text format
-q or --questions to specify path to the questions input file in "Question...[questionWord]...Question.|answer1|answer2|answer3|answer4":correctAnswer" format
-o or --output to optionally set path to output word sense vectors file (<vectorsFile>.results is used by default)
-h or --help (this message is displayed)
-r or --retro 1 to run the retrofit postprocessing on the vector space model 0 to skip
'''
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def readCommandLineInput(argv):
try:
try:
#specify the possible option switches
opts, _ = getopt.getopt(argv[1:], "hv:q:o:d:t:r:l:", ["help", "vectors=","questions=", "output=", "dimensions=", "type=","retro=","lexicon="])
except getopt.error, msg:
raise Usage(msg)
vectorsFile = None
questionsFile = None
outputFile = None
type = "Turney"
dims = 0
setOutput = False
retro = 1
lexicon = None
for option, value in opts:
if option in ("-h", "--help"):
raise Usage(help_message)
elif option in ("-v", "--vectors"):
vectorsFile = value
elif option in ("-q", "--ontology"):
questionsFile = value
elif option in ("-o", "--output"):
outputFile = value
setOutput = True
elif option in ("-d", "--dimensions"):
dims = value
elif option in ("-t", "--type"):
type = value
elif option in ("-r","--retro"):
retro = value
elif option in ("-l","--lexicon"):
lexicon = value
if (vectorsFile==None) or (questionsFile==None) or (dims==None):
raise Usage(help_message)
else:
if not setOutput:
outputFile = vectorsFile + '.results'
return (vectorsFile, questionsFile, dims, outputFile, type,retro, lexicon)
except Usage, err:
print str(err.msg)
return 2
if __name__ == "__main__":
commandParse = readCommandLineInput(sys.argv)
#commandParse = ('glove.6B.50d.txt','toefl.qst',50,'util.results','TOEFL')
if commandParse==2:
sys.exit(2)
qb = QuestionBase(commandParse[1],commandParse[4])
print(commandParse)
instance = load_stf(commandParse[0],int(commandParse[2]))
lexicon = read_lexicon(commandParse[6], instance)
print "starting retrofit procedure"
instance_r = retrofit_new(instance, lexicon, 10)
print "retrofit done"
classifier = GloveClassifier()
classifier.GloveInstance = instance_r
classifier.Centroids = np.load('//mnt/raid0/kuba/vsm/models/centroids_dir/ppdb_centroids').item()
oFile = open(commandParse[3],'w+')
qb.classify(classifier,oFile)
oFile.close()
|
{
"content_hash": "7b9b4c46449dbbb8123878d7ed666fc5",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 158,
"avg_line_length": 38.57303370786517,
"alnum_prop": 0.6154966501602097,
"repo_name": "dudenzz/word_embedding",
"id": "09fb93edfe3ef5370efc4a1c9d67b7936edacc50",
"size": "3433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SimilarityClassification/answerQuestions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "154250"
}
],
"symlink_target": ""
}
|
import contextlib
import logging
from unittest import mock
from neutron.api import extensions as api_ext
from neutron.common import config
import neutron.extensions as nextensions
from oslo_config import cfg
from oslo_utils import importutils
from oslo_utils import uuidutils
import webob.exc
from networking_sfc.db import flowclassifier_db as fdb
from networking_sfc.db import sfc_db
from networking_sfc import extensions
from networking_sfc.extensions import flowclassifier as fc_ext
from networking_sfc.extensions import servicegraph as sg_ext
from networking_sfc.extensions import sfc
from networking_sfc.extensions import tap as tap_ext
from networking_sfc.tests import base
from networking_sfc.tests.unit.db import test_flowclassifier_db
DB_SFC_PLUGIN_CLASS = (
"networking_sfc.db.sfc_db.SfcDbPlugin"
)
extensions_path = ':'.join(extensions.__path__ + nextensions.__path__)
class SfcDbPluginTestCaseBase(
base.BaseTestCase
):
def _assert_port_chain_equal(self, res_port_chain, expected):
# Flow classifiers are stored in a list, only check items for them
for k, v in expected.items():
if type(v) is list:
self.assertCountEqual(res_port_chain[k], v)
else:
self.assertEqual(res_port_chain[k], v)
def _create_port_chain(
self, fmt, port_chain=None, expected_res_status=None, **kwargs
):
ctx = kwargs.get('context', None)
tenant_id = kwargs.get('tenant_id', self._tenant_id)
data = {'port_chain': port_chain or {}}
if ctx is None:
data['port_chain'].update({'tenant_id': tenant_id})
req = self.new_create_request(
'port_chains', data, fmt, context=ctx
)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
return res
@contextlib.contextmanager
def port_chain(self, fmt=None, port_chain=None, do_delete=True, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_port_chain(fmt, port_chain, **kwargs)
if res.status_int >= 400:
logging.error('create port chain result: %s', res)
raise webob.exc.HTTPClientError(code=res.status_int)
port_chain = self.deserialize(fmt or self.fmt, res)
yield port_chain
if do_delete:
self._delete('port_chains', port_chain['port_chain']['id'])
def _create_port_pair_group(
self, fmt, port_pair_group=None, expected_res_status=None, **kwargs
):
ctx = kwargs.get('context', None)
tenant_id = kwargs.get('tenant_id', self._tenant_id)
data = {'port_pair_group': port_pair_group or {}}
if ctx is None:
data['port_pair_group'].update({'tenant_id': tenant_id})
req = self.new_create_request(
'port_pair_groups', data, fmt, context=ctx
)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
return res
@contextlib.contextmanager
def port_pair_group(
self, fmt=None, port_pair_group=None, do_delete=True, **kwargs
):
if not fmt:
fmt = self.fmt
res = self._create_port_pair_group(fmt, port_pair_group, **kwargs)
if res.status_int >= 400:
logging.error('create port pair group result: %s', res)
raise webob.exc.HTTPClientError(code=res.status_int)
port_pair_group = self.deserialize(fmt or self.fmt, res)
yield port_pair_group
if do_delete:
self._delete(
'port_pair_groups',
port_pair_group['port_pair_group']['id'])
def _create_port_pair(
self, fmt, port_pair=None, expected_res_status=None, **kwargs
):
ctx = kwargs.get('context', None)
tenant_id = kwargs.get('tenant_id', self._tenant_id)
data = {'port_pair': port_pair or {}}
if ctx is None:
data['port_pair'].update({'tenant_id': tenant_id})
req = self.new_create_request(
'port_pairs', data, fmt, context=ctx
)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
return res
@contextlib.contextmanager
def port_pair(self, fmt=None, port_pair=None, do_delete=True, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_port_pair(fmt, port_pair, **kwargs)
if res.status_int >= 400:
logging.error('create port pair result: %s', res)
raise webob.exc.HTTPClientError(code=res.status_int)
port_pair = self.deserialize(fmt or self.fmt, res)
yield port_pair
if do_delete:
self._delete('port_pairs', port_pair['port_pair']['id'])
def _create_service_graph(
self, fmt, service_graph=None, expected_res_status=None, **kwargs
):
ctx = kwargs.get('context', None)
project_id = kwargs.get('project_id', self._tenant_id)
data = {'service_graph': service_graph or {}}
if ctx is None:
data['service_graph'].update({'project_id': project_id})
req = self.new_create_request(
'service_graphs', data, fmt, context=ctx
)
res = req.get_response(self.ext_api)
if expected_res_status:
self.assertEqual(expected_res_status, res.status_int)
return res
@contextlib.contextmanager
def service_graph(self, fmt=None,
service_graph=None, do_delete=True, **kwargs):
if not fmt:
fmt = self.fmt
res = self._create_service_graph(fmt, service_graph, **kwargs)
if res.status_int >= 400:
logging.error('create Service Graph result: %s', res)
raise webob.exc.HTTPClientError(code=res.status_int)
service_graph = self.deserialize(fmt or self.fmt, res)
yield service_graph
if do_delete:
self._delete('service_graphs', service_graph[
'service_graph']['id'])
def _get_expected_port_pair(self, port_pair):
return {
'name': port_pair.get('name') or '',
'description': port_pair.get('description') or '',
'egress': port_pair.get('egress'),
'ingress': port_pair.get('ingress'),
'service_function_parameters': port_pair.get(
'service_function_parameters') or {
'correlation': None, 'weight': 1
}
}
def _test_create_port_pair(self, port_pair, expected_port_pair=None):
if expected_port_pair is None:
expected_port_pair = self._get_expected_port_pair(port_pair)
with self.port_pair(port_pair=port_pair) as pp:
for k, v in expected_port_pair.items():
self.assertEqual(pp['port_pair'][k], v)
def _test_create_port_pairs(
self, port_pairs, expected_port_pairs=None
):
if port_pairs:
port_pair = port_pairs.pop()
if expected_port_pairs:
expected_port_pair = expected_port_pairs.pop()
else:
expected_port_pair = self._get_expected_port_pair(port_pair)
with self.port_pair(port_pair=port_pair) as pp:
for k, v in expected_port_pair.items():
self.assertEqual(pp['port_pair'][k], v)
def _get_expected_port_pair_group(self, port_pair_group):
ret = {
'name': port_pair_group.get('name') or '',
'description': port_pair_group.get('description') or '',
'port_pairs': port_pair_group.get('port_pairs') or [],
'port_pair_group_parameters': port_pair_group.get(
'port_pair_group_parameters'
) or {'lb_fields': [],
'ppg_n_tuple_mapping': {'ingress_n_tuple': {},
'egress_n_tuple': {}}}
}
if port_pair_group.get('group_id'):
ret['group_id'] = port_pair_group['group_id']
return ret
def _test_create_port_pair_group(
self, port_pair_group, expected_port_pair_group=None
):
if expected_port_pair_group is None:
expected_port_pair_group = self._get_expected_port_pair_group(
port_pair_group)
with self.port_pair_group(port_pair_group=port_pair_group) as pg:
for k, v in expected_port_pair_group.items():
self.assertEqual(pg['port_pair_group'][k], v)
def _test_create_port_pair_groups(
self, port_pair_groups, expected_port_pair_groups=None
):
if port_pair_groups:
port_pair_group = port_pair_groups.pop()
if expected_port_pair_groups:
expected_port_pair_group = expected_port_pair_groups.pop()
else:
expected_port_pair_group = self._get_expected_port_pair_group(
port_pair_group)
with self.port_pair_group(port_pair_group=port_pair_group) as pg:
for k, v in expected_port_pair_group.items():
self.assertEqual(pg['port_pair_group'][k], v)
@staticmethod
def _get_expected_port_chain(port_chain):
chain_params = port_chain.get('chain_parameters') or dict()
chain_params.setdefault('correlation', 'mpls')
chain_params.setdefault('symmetric', False)
ret = {
'name': port_chain.get('name') or '',
'description': port_chain.get('description') or '',
'port_pair_groups': port_chain['port_pair_groups'],
'flow_classifiers': port_chain.get('flow_classifiers') or [],
'chain_parameters': chain_params
}
if port_chain.get('chain_id'):
ret['chain_id'] = port_chain['chain_id']
return ret
def _test_create_port_chain(self, port_chain, expected_port_chain=None):
if expected_port_chain is None:
expected_port_chain = self._get_expected_port_chain(port_chain)
with self.port_chain(port_chain=port_chain) as pc:
for k, v in expected_port_chain.items():
self.assertEqual(pc['port_chain'][k], v)
def _test_create_port_chains(
self, port_chains, expected_port_chains=None
):
if port_chains:
port_chain = port_chains.pop()
if expected_port_chains:
expected_port_chain = expected_port_chains.pop()
else:
expected_port_chain = self._get_expected_port_chain(
port_chain)
with self.port_chain(port_chain=port_chain) as pc:
for k, v in expected_port_chain.items():
self.assertEqual(pc['port_chain'][k], v)
@staticmethod
def _get_expected_graph(service_graph):
ret = {
'name': service_graph.get('name') or '',
'description': service_graph.get('description') or '',
'port_chains': service_graph.get('port_chains')
}
return ret
def _test_create_service_graph(self, service_graph, expected_graph=None):
if expected_graph is None:
expected_graph = self._get_expected_graph(service_graph)
with self.service_graph(service_graph=service_graph) as graph:
for k, v in expected_graph.items():
self.assertEqual(graph['service_graph'][k], v)
class SfcDbPluginTestCase(
base.NeutronDbPluginV2TestCase,
test_flowclassifier_db.FlowClassifierDbPluginTestCaseBase,
SfcDbPluginTestCaseBase
):
resource_prefix_map = dict([
(k, sfc.SFC_PREFIX)
for k in sfc.RESOURCE_ATTRIBUTE_MAP.keys()
] + [
(k, fc_ext.FLOW_CLASSIFIER_PREFIX)
for k in fc_ext.RESOURCE_ATTRIBUTE_MAP.keys()
] + [
(k, sg_ext.SG_PREFIX)
for k in sg_ext.RESOURCE_ATTRIBUTE_MAP.keys()
])
def setUp(self, core_plugin=None, sfc_plugin=None,
flowclassifier_plugin=None, ext_mgr=None):
mock_log_p = mock.patch.object(sfc_db, 'LOG')
self.mock_log = mock_log_p.start()
cfg.CONF.register_opts(sfc.sfc_quota_opts, 'QUOTAS')
if not sfc_plugin:
sfc_plugin = DB_SFC_PLUGIN_CLASS
if not flowclassifier_plugin:
flowclassifier_plugin = (
test_flowclassifier_db.DB_FLOWCLASSIFIER_PLUGIN_CLASS)
service_plugins = {
sfc.SFC_EXT: sfc_plugin,
fc_ext.FLOW_CLASSIFIER_EXT: flowclassifier_plugin
}
sfc_db.SfcDbPlugin.supported_extension_aliases = [
sfc.SFC_EXT, sg_ext.SG_EXT, tap_ext.TAP_EXT]
sfc_db.SfcDbPlugin.path_prefix = sfc.SFC_PREFIX
fdb.FlowClassifierDbPlugin.supported_extension_aliases = [
fc_ext.FLOW_CLASSIFIER_EXT]
fdb.FlowClassifierDbPlugin.path_prefix = (
fc_ext.FLOW_CLASSIFIER_PREFIX
)
super(SfcDbPluginTestCase, self).setUp(
ext_mgr=ext_mgr,
plugin=core_plugin,
service_plugins=service_plugins
)
if not ext_mgr:
self.sfc_plugin = importutils.import_object(sfc_plugin)
self.flowclassifier_plugin = importutils.import_object(
flowclassifier_plugin)
# Note (vks1): Auto-load extensions.
ext_mgr = api_ext.PluginAwareExtensionManager.get_instance()
app = config.load_paste_app('extensions_test_app')
self.ext_api = api_ext.ExtensionMiddleware(app, ext_mgr=ext_mgr)
def test_create_port_chain(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'port_pair_groups': [pg['port_pair_group']['id']]})
def test_quota_create_port_chain(self):
cfg.CONF.set_override('quota_port_chain', 3, group='QUOTAS')
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg4:
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg1['port_pair_group']['id']]
}, expected_res_status=201)
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg2['port_pair_group']['id']]
}, expected_res_status=201)
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg3['port_pair_group']['id']]
}, expected_res_status=201)
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg4['port_pair_group']['id']]
}, expected_res_status=409)
def test_create_port_chain_all_fields(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [],
'name': 'abc',
'description': 'def',
'chain_parameters': {'symmetric': False, 'correlation': 'mpls'}
})
def test_create_port_chain_all_fields_with_chain_id(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [],
'name': 'abc',
'description': 'def',
'chain_parameters': {'symmetric': False,
'correlation': 'mpls'},
'chain_id': 99
})
def test_create_port_chain_all_fields_with_symmetric(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [],
'name': 'abc',
'description': 'def',
'chain_parameters': {'symmetric': True, 'correlation': 'mpls'}
})
def test_create_port_chain_multi_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
self._test_create_port_chain({
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
})
def test_create_port_chain_shared_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2, self.port_pair_group(
port_pair_group={}
) as pg3:
self._test_create_port_chains([{
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
}, {
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg3['port_pair_group']['id']
]
}])
def test_create_port_chain_shared_port_pair_groups_different_order(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
self._test_create_port_chains([{
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
}, {
'port_pair_groups': [
pg2['port_pair_group']['id'],
pg1['port_pair_group']['id']
]
}])
def test_create_port_chain_with_empty_chain_parameters(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'chain_parameters': {},
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_none_chain_parameters(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'chain_parameters': None,
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_default_chain_parameters(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'chain_parameters': {'symmetric': False,
'correlation': 'mpls'},
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_nsh_correlation(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'chain_parameters': {'symmetric': False,
'correlation': 'nsh'},
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_nsh_correlation_incompatible_ppg_fail(
self):
with self.port(
name='port1',
device_id='default'
) as port1, self.port(
name='port2',
device_id='default'
) as port2:
with self.port_pair(port_pair={
'ingress': port1['port']['id'],
'egress': port1['port']['id'],
'service_function_parameters': {'correlation': 'nsh'}
}) as pp1, self.port_pair(port_pair={
'ingress': port2['port']['id'],
'egress': port2['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp2:
with self.port_pair_group(port_pair_group={
'port_pairs': [
pp1['port_pair']['id']
]
}) as ppg1, self.port_pair_group(port_pair_group={
'port_pairs': [
pp2['port_pair']['id']
]
}) as ppg2:
self._create_port_chain(
self.fmt, {
'chain_parameters': {'symmetric': False,
'correlation': 'nsh'},
'port_pair_groups': [
ppg1['port_pair_group']['id'],
ppg2['port_pair_group']['id']],
}, expected_res_status=400)
def test_create_port_chains_with_conflicting_chain_ids(self):
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2:
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg1['port_pair_group']['id']],
'chain_id': 88
}, expected_res_status=201)
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg2['port_pair_group']['id']],
'chain_id': 88
}, expected_res_status=400
)
def test_create_port_chain_with_none_flow_classifiers(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'flow_classifiers': None,
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_empty_flow_classifiers(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'flow_classifiers': [],
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'logical_source_port': port['port']['id']
}) as fc:
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'flow_classifiers': [fc['flow_classifier']['id']],
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_multi_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port['port']['id']
}) as fc1, self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port['port']['id']
}) as fc2:
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'flow_classifiers': [
fc1['flow_classifier']['id'],
fc2['flow_classifier']['id']
],
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_port_chain_with_flow_classifiers_basic_the_same(self):
with self.port(
name='test1'
) as port1, self.port(
name='test2'
) as port2:
with self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port1['port']['id']
}) as fc1, self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port2['port']['id']
}) as fc2:
with self.port_pair_group(port_pair_group={}) as pg:
self._test_create_port_chain({
'flow_classifiers': [
fc1['flow_classifier']['id'],
fc2['flow_classifier']['id']
],
'port_pair_groups': [pg['port_pair_group']['id']]
})
def test_create_multi_port_chain_with_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port['port']['id']
}) as fc1, self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port['port']['id']
}) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={
'flow_classifiers': [
fc1['flow_classifier']['id']
],
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
):
self._test_create_port_chain({
'flow_classifiers': [
fc2['flow_classifier']['id']
],
'port_pair_groups': [pg2['port_pair_group']['id']]
})
def test_create_multi_port_chain_with_conflict_flow_classifiers(self):
with self.port(
name='test1'
) as port1, self.port(
name='test2'
) as port2:
with self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port1['port']['id']
}) as fc1, self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port2['port']['id']
}) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={
'flow_classifiers': [
fc1['flow_classifier']['id']
],
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
):
self._create_port_chain(
self.fmt, {
'flow_classifiers': [
fc2['flow_classifier']['id']
],
'port_pair_groups': [
pg2['port_pair_group']['id']
]
},
expected_res_status=400
)
def test_create_multi_port_chain_with_same_flow_classifier(self):
with self.port(
name='test1'
) as port1:
with self.flow_classifier(flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port1['port']['id']
}) as fc:
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={
'flow_classifiers': [
fc['flow_classifier']['id']
],
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
):
self._create_port_chain(
self.fmt, {
'flow_classifiers': [
fc['flow_classifier']['id']
],
'port_pair_groups': [
pg2['port_pair_group']['id']
]
},
expected_res_status=409
)
def test_create_port_chain_with_port_pairs(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pp2:
with self.port_pair_group(port_pair_group={
'port_pairs': [
pp1['port_pair']['id']
]
}) as pg1, self.port_pair_group(port_pair_group={
'port_pairs': [
pp2['port_pair']['id']
]
}) as pg2:
self._test_create_port_chain({
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
})
def test_create_port_chain_with_empty_port_pair_groups(self):
self._create_port_chain(
self.fmt, {'port_pair_groups': []},
expected_res_status=400
)
def test_create_port_chain_with_nonuuid_port_pair_group_id(self):
self._create_port_chain(
self.fmt, {'port_pair_groups': ['unknown']},
expected_res_status=400
)
def test_create_port_chain_with_unknown_port_pair_group_id(self):
self._create_port_chain(
self.fmt, {'port_pair_groups': [uuidutils.generate_uuid()]},
expected_res_status=404
)
def test_create_port_chain_with_same_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(
port_chain={
'port_pair_groups': [pg['port_pair_group']['id']]
}
):
self._create_port_chain(
self.fmt, {
'port_pair_groups': [pg['port_pair_group']['id']]
}, expected_res_status=409
)
def test_create_port_chain_with_no_port_pair_groups(self):
self._create_port_chain(
self.fmt, {}, expected_res_status=400
)
def test_create_port_chain_with_consecutive_tap_port_pair_groups(self):
with self.port(
name='port1',
device_id='tap_device1'
) as tap_port1, self.port(
name='port2',
device_id='tap_device2'
) as tap_port2:
with self.port_pair(
port_pair={
'ingress': tap_port1['port']['id'],
'egress': tap_port1['port']['id']
}
) as tap_pp1, self.port_pair(
port_pair={
'ingress': tap_port2['port']['id'],
'egress': tap_port2['port']['id']
}
) as tap_pp2:
with self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp1['port_pair']['id']],
'tap_enabled': True
}
) as pg1, self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp2['port_pair']['id']],
'tap_enabled': True
}
) as pg2:
self._create_port_chain(
self.fmt,
{
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
},
expected_res_status=400
)
def test_create_port_chain_with_non_consecutive_tap_port_pair_groups(self):
with self.port(
name='port1',
device_id='tap_device1'
) as tap_port1, self.port(
name='port2',
device_id='default_device'
) as ingress_default, self.port(
name='port3',
device_id='default_device'
) as egress_default, self.port(
name='port4',
device_id='tap_device2'
) as tap_port2:
with self.port_pair(
port_pair={
'ingress': tap_port1['port']['id'],
'egress': tap_port1['port']['id']
}
) as tap_pp1, self.port_pair(
port_pair={
'ingress': ingress_default['port']['id'],
'egress': egress_default['port']['id']
}
) as default_pp, self.port_pair(
port_pair={
'ingress': tap_port2['port']['id'],
'egress': tap_port2['port']['id']
}
) as tap_pp2:
with self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp1['port_pair']['id']],
'tap_enabled': True,
'port_pair_group_parameters': {
'lb_fields': [],
'ppg_n_tuple_mapping': {'ingress_n_tuple': {},
'egress_n_tuple': {}}}
}
) as tap_pg1, self.port_pair_group(
self.fmt,
{
'port_pairs': [default_pp['port_pair']['id']],
'tap_enabled': False,
'port_pair_group_parameters': {
'lb_fields': [],
'ppg_n_tuple_mapping': {'ingress_n_tuple': {},
'egress_n_tuple': {}}
}
}
) as default_pg, self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp2['port_pair']['id']],
'tap_enabled': True,
'port_pair_group_parameters': {
'lb_fields': [],
'ppg_n_tuple_mapping': {'ingress_n_tuple': {},
'egress_n_tuple': {}
}
}
}
) as tap_pg2:
self._test_create_port_chain(
{
'port_pair_groups': [
tap_pg1['port_pair_group']['id'],
default_pg['port_pair_group']['id'],
tap_pg2['port_pair_group']['id']
]
}
)
def test_create_port_chain_with_invalid_chain_parameters(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._create_port_chain(
self.fmt, {
'chain_parameters': {'correlation': 'unknown'},
'port_pair_groups': [pg['port_pair_group']['id']]
}, expected_res_status=400
)
def test_create_port_chain_with_invalid_chain_parameters_symmetric(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._create_port_chain(
self.fmt, {
'chain_parameters': {'symmetric': 'abc'},
'port_pair_groups': [pg['port_pair_group']['id']]
}, expected_res_status=400
)
def test_create_port_chain_unknown_flow_classifiers(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._create_port_chain(
self.fmt, {
'flow_classifiers': [uuidutils.generate_uuid()],
'port_pair_groups': [pg['port_pair_group']['id']]
}, expected_res_status=404
)
def test_create_port_chain_nouuid_flow_classifiers(self):
with self.port_pair_group(port_pair_group={}) as pg:
self._create_port_chain(
self.fmt, {
'flow_classifiers': ['unknown'],
'port_pair_groups': [pg['port_pair_group']['id']]
}, expected_res_status=400
)
def test_list_port_chains(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]
}) as pc1, self.port_chain(port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']]
}) as pc2:
port_chains = [pc1, pc2]
self._test_list_resources(
'port_chain', port_chains
)
def test_list_port_chains_with_params(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'name': 'test1',
'port_pair_groups': [pg1['port_pair_group']['id']]
}) as pc1, self.port_chain(port_chain={
'name': 'test2',
'port_pair_groups': [pg2['port_pair_group']['id']]
}) as pc2:
self._test_list_resources(
'port_chain', [pc1],
query_params='name=test1'
)
self._test_list_resources(
'port_chain', [pc2],
query_params='name=test2'
)
self._test_list_resources(
'port_chain', [],
query_params='name=test3'
)
def test_list_port_chains_with_unknown_params(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'name': 'test1',
'port_pair_groups': [pg1['port_pair_group']['id']]
}) as pc1, self.port_chain(port_chain={
'name': 'test2',
'port_pair_groups': [pg2['port_pair_group']['id']]
}) as pc2:
self._test_list_resources(
'port_chain', [pc1, pc2],
query_params='hello=test3'
)
def test_show_port_chain(self):
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'name': 'test1',
'description': 'portchain',
'port_pair_groups': [pg['port_pair_group']['id']]
}) as pc:
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
expected = self._get_expected_port_chain(pc['port_chain'])
self._assert_port_chain_equal(res['port_chain'], expected)
def test_show_port_chain_noexist(self):
req = self.new_show_request(
'port_chains', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_update_port_chain_add_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port['port']['id']
}
) as fc1, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port['port']['id']
}
) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'name': 'test1',
'description': 'desc1',
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [fc1['flow_classifier']['id']]
}) as pc:
updates = {
'name': 'test2',
'description': 'desc2',
'flow_classifiers': [
fc1['flow_classifier']['id'],
fc2['flow_classifier']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(
res['port_chain'], expected
)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(
res['port_chain'], expected
)
def test_update_port_chain_remove_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port['port']['id']
}
) as fc1, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port['port']['id']
}
) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'name': 'test1',
'description': 'desc1',
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [
fc1['flow_classifier']['id'],
fc2['flow_classifier']['id']
]
}) as pc:
updates = {
'name': 'test2',
'description': 'desc2',
'flow_classifiers': [
fc1['flow_classifier']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(
res['port_chain'], expected
)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(
res['port_chain'], expected
)
def test_update_port_chain_replace_flow_classifiers(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port['port']['id']
}
) as fc1, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port['port']['id']
}
) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'name': 'test1',
'description': 'desc1',
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [fc1['flow_classifier']['id']]
}) as pc:
updates = {
'name': 'test2',
'description': 'desc2',
'flow_classifiers': [fc2['flow_classifier']['id']]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(
res['port_chain'], expected
)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(
res['port_chain'], expected
)
def test_update_port_chain_flow_classifiers_basic_the_same(self):
with self.port(
name='test1'
) as port1, self.port(
name='test2'
) as port2:
with self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port1['port']['id']
}
) as fc1, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port2['port']['id']
}
) as fc2:
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'name': 'test1',
'description': 'desc1',
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [fc1['flow_classifier']['id']]
}) as pc:
updates = {
'name': 'test2',
'description': 'desc2',
'flow_classifiers': [fc2['flow_classifier']['id']]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(
res['port_chain'], expected
)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(
res['port_chain'], expected
)
def test_update_port_chain_conflict_flow_classifiers(self):
with self.port(
name='test1'
) as port1, self.port(
name='test2'
) as port2:
with self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port1['port']['id']
}
) as fc1, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.101.0/24',
'logical_source_port': port1['port']['id']
}
) as fc2, self.flow_classifier(
flow_classifier={
'source_ip_prefix': '192.168.100.0/24',
'logical_source_port': port2['port']['id']
}
) as fc3:
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']],
'flow_classifiers': [fc1['flow_classifier']['id']]
}), self.port_chain(port_chain={
'name': 'test2',
'port_pair_groups': [pg2['port_pair_group']['id']],
'flow_classifiers': [fc2['flow_classifier']['id']]
}) as pc2:
updates = {
'flow_classifiers': [fc3['flow_classifier']['id']]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc2['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_update_port_chain_add_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']],
}) as pc:
updates = {
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(res['port_chain'], expected)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(res['port_chain'], expected)
def test_update_port_chain_remove_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id'],
],
}) as pc:
updates = {
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(res['port_chain'], expected)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(res['port_chain'], expected)
def test_update_port_chain_replace_port_pair_groups(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']],
}) as pc:
updates = {
'port_pair_groups': [pg2['port_pair_group']['id']]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(res['port_chain'], expected)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
self._assert_port_chain_equal(res['port_chain'], expected)
def test_update_port_chain_chain_parameters(self):
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'port_pair_groups': [pg['port_pair_group']['id']],
}) as pc:
updates = {
'chain_parameters': {'correlation': 'mpls'}
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_update_port_chain_part_of_graph_fail(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]
}) as pc1, self.port_chain(port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']]
}) as pc2:
with self.service_graph(service_graph={
'name': 'test1',
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']]}
}):
updates = {
'port_pair_groups': [uuidutils.generate_uuid()]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc1['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
updates = {
'flow_classifiers': [uuidutils.generate_uuid()]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc2['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
updates = {
'name': 'new name',
'description': 'new description'
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc1['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
def test_update_port_chain_consistency_with_consecutive_tap_ppg(self):
with self.port(
name='port1',
device_id='tap_device1'
) as tap_port1, self.port(
name='port2',
device_id='tap_device2'
) as tap_port2:
with self.port_pair(
port_pair={
'ingress': tap_port1['port']['id'],
'egress': tap_port1['port']['id']
}
) as tap_pp1, self.port_pair(
port_pair={
'ingress': tap_port2['port']['id'],
'egress': tap_port2['port']['id']
}
) as tap_pp2:
with self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp1['port_pair']['id']],
'tap_enabled': True
}
) as pg1, self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp2['port_pair']['id']],
'tap_enabled': True
}
) as pg2:
with self.port_chain(
port_chain={
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
) as pc:
updates = {
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_update_tap_port_chain_consistency(self):
with self.port(
name='port1',
device_id='tap_device1'
) as tap_port1, self.port(
name='port2',
device_id='tap_device2'
) as tap_port2:
with self.port_pair(
port_pair={
'ingress': tap_port1['port']['id'],
'egress': tap_port1['port']['id']
}
) as tap_pp1, self.port_pair(
port_pair={
'ingress': tap_port2['port']['id'],
'egress': tap_port2['port']['id']
}
) as tap_pp2:
with self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp1['port_pair']['id']],
'tap_enabled': True
}
) as pg1, self.port_pair_group(
self.fmt,
{
'port_pairs': [tap_pp2['port_pair']['id']],
'tap_enabled': False
}
) as pg2:
with self.port_chain(
port_chain={
'port_pair_groups': [
pg1['port_pair_group']['id']
]
}
) as pc:
updates = {
'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']
]
}
req = self.new_update_request(
'port_chains', {'port_chain': updates},
pc['port_chain']['id']
)
resp = req.get_response(self.ext_api)
self.assertEqual(200, resp.status_int)
res = self.deserialize(self.fmt, resp)
expected = pc['port_chain']
expected.update(updates)
self._assert_port_chain_equal(res['port_chain'],
expected)
def test_delete_port_chain(self):
with self.port_pair_group(
port_pair_group={}
) as pg:
with self.port_chain(port_chain={
'port_pair_groups': [pg['port_pair_group']['id']]
}, do_delete=False) as pc:
req = self.new_delete_request(
'port_chains', pc['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
req = self.new_show_request(
'port_chains', pc['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
req = self.new_show_request(
'port_pair_groups', pg['port_pair_group']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
def test_delete_port_chain_noexist(self):
req = self.new_delete_request(
'port_chains', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_delete_port_chain_part_of_graph_fail(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]
}) as pc1, self.port_chain(port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']]
}) as pc2:
with self.service_graph(service_graph={
'name': 'test1',
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']]}
}):
req = self.new_delete_request(
'port_chains', pc1['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
req = self.new_delete_request(
'port_chains', pc2['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
def test_delete_flow_classifier_port_chain_exist(self):
with self.port(
name='test1'
) as port:
with self.flow_classifier(flow_classifier={
'logical_source_port': port['port']['id']
}) as fc:
with self.port_pair_group(port_pair_group={
}) as pg:
with self.port_chain(port_chain={
'port_pair_groups': [pg['port_pair_group']['id']],
'flow_classifiers': [fc['flow_classifier']['id']]
}):
req = self.new_delete_request(
'flow_classifiers', fc['flow_classifier']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
def test_create_port_pair_group(self):
self._test_create_port_pair_group({})
def test_quota_create_port_pair_group_quota(self):
cfg.CONF.set_override('quota_port_pair_group', 3, group='QUOTAS')
self._create_port_pair_group(
self.fmt, {'port_pairs': []}, expected_res_status=201
)
self._create_port_pair_group(
self.fmt, {'port_pairs': []}, expected_res_status=201
)
self._create_port_pair_group(
self.fmt, {'port_pairs': []}, expected_res_status=201
)
self._create_port_pair_group(
self.fmt, {'port_pairs': []}, expected_res_status=409
)
def test_create_port_pair_group_all_fields(self):
self._test_create_port_pair_group({
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'tap_enabled': False,
'port_pair_group_parameters': {
'lb_fields': ['ip_src', 'ip_dst'],
'ppg_n_tuple_mapping': {
'ingress_n_tuple': {'source_ip_prefix': None},
'egress_n_tuple': {'destination_ip_prefix': None}}
}
})
def test_create_port_pair_group_with_empty_parameters(self):
self._test_create_port_pair_group({
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'port_pair_group_parameters': {}
})
def test_create_port_pair_group_with_none_parameters(self):
self._test_create_port_pair_group({
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'port_pair_group_parameters': None
})
def test_create_port_pair_group_with_default_parameters(self):
self._test_create_port_pair_group({
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'tap_enabled': False,
'port_pair_group_parameters': {
'lb_fields': [],
'ppg_n_tuple_mapping': {}
}
})
def test_create_port_pair_group_with_tap_enabled_parameter_true(self):
self._test_create_port_pair_group(
{
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'tap_enabled': True,
'port_pair_group_parameters': {}
},
expected_port_pair_group={
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'tap_enabled': True,
'port_pair_group_parameters': {
'lb_fields': [],
'ppg_n_tuple_mapping': {u'egress_n_tuple': {},
u'ingress_n_tuple': {}},
}
}
)
def test_create_ppg_with_all_params_and_tap_enabled_parameter_true(self):
self._create_port_pair_group(
self.fmt,
{
'name': 'test1',
'description': 'desc1',
'port_pairs': [],
'tap_enabled': True,
'port_pair_group_parameters': {
'lb_fields': ['ip_src', 'ip_dst'],
'ppg_n_tuple_mapping': {
'ingress_n_tuple': {'source_ip_prefix': None},
'egress_n_tuple': {'destination_ip_prefix': None}}
}
})
def test_create_port_pair_group_with_port_pairs(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pp2:
self._test_create_port_pair_group({
'port_pairs': [
pp1['port_pair']['id'],
pp2['port_pair']['id']
]
})
def test_create_tap_port_pair_group_with_single_port_pair(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1:
self._test_create_port_pair_group(
{
'port_pairs': [
pp1['port_pair']['id'],
],
'tap_enabled': True
}
)
def test_create_tap_pair_group_with_multiple_port_pairs(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pp2:
self._create_port_pair_group(
self.fmt,
{
'port_pairs': [
pp1['port_pair']['id'],
pp2['port_pair']['id']
],
'tap_enabled': True
},
expected_res_status=400
)
def test_create_port_pair_group_consistent_correlations(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp2:
self._test_create_port_pair_group({
'port_pairs': [
pp1['port_pair']['id'],
pp2['port_pair']['id']
]
})
def test_create_port_pair_group_inconsistent_correlations(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id'],
'service_function_parameters': {'correlation': None}
}) as pp2:
self._create_port_pair_group(
self.fmt,
{'port_pairs': [
pp1['port_pair']['id'],
pp2['port_pair']['id']
]},
expected_res_status=400)
def test_create_port_pair_group_with_nouuid_port_pair_id(self):
self._create_port_pair_group(
self.fmt, {'port_pairs': ['unknown']},
expected_res_status=400
)
def test_create_port_pair_group_with_unknown_port_pair_id(self):
self._create_port_pair_group(
self.fmt, {'port_pairs': [uuidutils.generate_uuid()]},
expected_res_status=404
)
def test_create_port_pair_group_share_port_pair_id(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp:
with self.port_pair_group(port_pair_group={
'port_pairs': [pp['port_pair']['id']]
}):
self._create_port_pair_group(
self.fmt, {'port_pairs': [pp['port_pair']['id']]},
expected_res_status=409
)
def test_list_port_pair_groups(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}) as pc1, self.port_pair_group(port_pair_group={
'name': 'test2'
}) as pc2:
port_pair_groups = [pc1, pc2]
self._test_list_resources(
'port_pair_group', port_pair_groups
)
def test_list_port_pair_groups_with_params(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}) as pc1, self.port_pair_group(port_pair_group={
'name': 'test2'
}) as pc2:
self._test_list_resources(
'port_pair_group', [pc1],
query_params='name=test1'
)
self._test_list_resources(
'port_pair_group', [pc2],
query_params='name=test2'
)
self._test_list_resources(
'port_pair_group', [],
query_params='name=test3'
)
def test_list_port_pair_groups_with_unknown_params(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}) as pc1, self.port_pair_group(port_pair_group={
'name': 'test2'
}) as pc2:
self._test_list_resources(
'port_pair_group', [pc1, pc2],
query_params='hello=test3'
)
def test_show_port_pair_group(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}) as pc:
req = self.new_show_request(
'port_pair_groups', pc['port_pair_group']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in pc['port_pair_group'].items():
self.assertEqual(res['port_pair_group'][k], v)
def test_show_port_pair_group_noexist(self):
req = self.new_show_request(
'port_pair_groups', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_update_port_pair_group(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pp2:
with self.port_pair_group(port_pair_group={
'name': 'test1',
'description': 'desc1',
'port_pairs': [pp1['port_pair']['id']]
}) as pg:
updates = {
'name': 'test2',
'description': 'desc2',
'port_pairs': [pp2['port_pair']['id']]
}
req = self.new_update_request(
'port_pair_groups', {'port_pair_group': updates},
pg['port_pair_group']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pg['port_pair_group']
expected.update(updates)
for k, v in expected.items():
self.assertEqual(res['port_pair_group'][k], v)
req = self.new_show_request(
'port_pair_groups', pg['port_pair_group']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in expected.items():
self.assertEqual(res['port_pair_group'][k], v)
def test_update_port_pair_group_consistency_checks(self):
with self.port(
name='port1',
device_id='default'
) as port1, self.port(
name='port2',
device_id='default'
) as port2, self.port(
name='port3',
device_id='default'
) as port3, self.port(
name='port4',
device_id='default'
) as port4:
with self.port_pair(port_pair={
'ingress': port1['port']['id'],
'egress': port2['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp1, self.port_pair(port_pair={
'ingress': port2['port']['id'],
'egress': port3['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp2, self.port_pair(port_pair={
'ingress': port3['port']['id'],
'egress': port4['port']['id'],
'service_function_parameters': {'correlation': None}
}) as pp3, self.port_pair(port_pair={
'ingress': port4['port']['id'],
'egress': port1['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}) as pp4:
with self.port_pair_group(port_pair_group={
'name': 'test1',
'description': 'desc1',
'port_pairs': [pp1['port_pair']['id'],
pp2['port_pair']['id']]
}) as pg:
updates = {
'name': 'test2',
'description': 'desc2',
'port_pairs': [pp1['port_pair']['id'],
pp2['port_pair']['id'],
pp3['port_pair']['id']]
}
req = self.new_update_request(
'port_pair_groups', {'port_pair_group': updates},
pg['port_pair_group']['id']
)
resp = req.get_response(self.ext_api)
self.assertEqual(400, resp.status_int)
updates = {
'name': 'test3',
'description': 'desc3',
'port_pairs': [pp1['port_pair']['id'],
pp2['port_pair']['id'],
pp4['port_pair']['id']]
}
req = self.new_update_request(
'port_pair_groups', {'port_pair_group': updates},
pg['port_pair_group']['id']
)
resp = req.get_response(self.ext_api)
res = self.deserialize(self.fmt, resp)
expected = pg['port_pair_group']
expected.update(updates)
for k, v in expected.items():
self.assertEqual(res['port_pair_group'][k], v)
req = self.new_show_request(
'port_pair_groups', pg['port_pair_group']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in expected.items():
self.assertEqual(res['port_pair_group'][k], v)
def test_update_tap_port_pair_group_consistency(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pp2:
with self.port_pair_group(port_pair_group={
'name': 'test1',
'description': 'desc1',
'port_pairs': [pp1['port_pair']['id']],
}) as pg:
updates = {
'name': 'test2',
'description': 'desc2',
'port_pairs': [pp1['port_pair']['id'],
pp2['port_pair']['id']],
'tap_enabled': True
}
req = self.new_update_request(
'port_pair_groups', {'port_pair_group': updates},
pg['port_pair_group']['id']
)
resp = req.get_response(self.ext_api)
self.assertEqual(400, resp.status_int)
def test_delete_port_pair_group(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}, do_delete=False) as pc:
req = self.new_delete_request(
'port_pair_groups', pc['port_pair_group']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
req = self.new_show_request(
'port_pair_groups', pc['port_pair_group']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_delete_port_pair_group_port_chain_exist(self):
with self.port_pair_group(port_pair_group={
'name': 'test1'
}) as pg:
with self.port_chain(port_chain={
'port_pair_groups': [pg['port_pair_group']['id']]
}):
req = self.new_delete_request(
'port_pair_groups', pg['port_pair_group']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
def test_delete_port_pair_group_noexist(self):
req = self.new_delete_request(
'port_pair_groups', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_create_port_pair(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
self._test_create_port_pair({
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
})
def test_quota_create_port_pair_quota(self):
cfg.CONF.set_override('quota_port_pair', 3, group='QUOTAS')
with self.port(
name='port1',
device_id='default'
) as src_port1, self.port(
name='port2',
device_id='default'
) as dst_port1, self.port(
name='port3',
device_id='default'
) as src_port2, self.port(
name='port4',
device_id='default'
) as dst_port2, self.port(
name='port5',
device_id='default'
) as src_port3, self.port(
name='port6',
device_id='default'
) as dst_port3, self.port(
name='port7',
device_id='default'
) as src_port4, self.port(
name='port8',
device_id='default'
) as dst_port4:
self._create_port_pair(
self.fmt, {
'ingress': src_port1['port']['id'],
'egress': dst_port1['port']['id']
}, expected_res_status=201)
self._create_port_pair(
self.fmt, {
'ingress': src_port2['port']['id'],
'egress': dst_port2['port']['id']
}, expected_res_status=201)
self._create_port_pair(
self.fmt, {
'ingress': src_port3['port']['id'],
'egress': dst_port3['port']['id']
}, expected_res_status=201)
self._create_port_pair(
self.fmt, {
'ingress': src_port4['port']['id'],
'egress': dst_port4['port']['id']
}, expected_res_status=409)
def test_create_port_pair_all_fields(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
self._test_create_port_pair({
'name': 'test1',
'description': 'desc1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id'],
'service_function_parameters': {
'correlation': None, 'weight': 2}
})
def test_create_port_pair_none_service_function_parameters(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
self._test_create_port_pair({
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id'],
'service_function_parameters': None
})
def test_create_port_pair_empty_service_function_parameters(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
self._test_create_port_pair({
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id'],
'service_function_parameters': {}
})
def test_create_port_pair_with_src_dst_same_port(self):
with self.port(
name='port1',
device_id='default'
) as src_dst_port:
self._test_create_port_pair({
'ingress': src_dst_port['port']['id'],
'egress': src_dst_port['port']['id']
})
def test_create_port_pair_empty_input(self):
self._create_port_pair(self.fmt, {}, expected_res_status=400)
def test_create_port_pair_with_no_ingress(self):
with self.port(
name='port1',
device_id='default'
) as dst_port:
self._create_port_pair(
self.fmt,
{
'egress': dst_port['port']['id']
},
expected_res_status=400
)
def test_create_port_pair_with_no_egress(self):
with self.port(
name='port1',
device_id='default'
) as src_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_port['port']['id']
},
expected_res_status=400
)
def test_create_port_pair_with_nouuid_ingress(self):
with self.port(
name='port1',
device_id='default'
) as dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': '1',
'egress': dst_port['port']['id']
},
expected_res_status=400
)
def test_create_port_pair_with_unknown_ingress(self):
with self.port(
name='port1',
device_id='default'
) as dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': uuidutils.generate_uuid(),
'egress': dst_port['port']['id']
},
expected_res_status=404
)
def test_create_port_pair_with_nouuid_egress(self):
with self.port(
name='port1',
device_id='default'
) as src_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_port['port']['id'],
'egress': '1'
},
expected_res_status=400
)
def test_create_port_pair_with_unknown_egress(self):
with self.port(
name='port1',
device_id='default'
) as src_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_port['port']['id'],
'egress': uuidutils.generate_uuid()
},
expected_res_status=404
)
def test_create_port_pair_ingress_egress_different_hosts(self):
with self.port(
name='port1',
device_id='device1'
) as src_port, self.port(
name='port2',
device_id='device2'
) as dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
},
expected_res_status=400
)
def test_create_port_pair_with_invalid_service_function_parameters(self):
with self.port(
name='port1',
device_id='default'
) as src_dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_dst_port['port']['id'],
'egress': src_dst_port['port']['id'],
'service_function_parameters': {'abc': 'def'}
},
expected_res_status=400
)
def test_create_port_pair_with_invalid_correlation(self):
with self.port(
name='port1',
device_id='default'
) as src_dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_dst_port['port']['id'],
'egress': src_dst_port['port']['id'],
'service_function_parameters': {'correlation': 'def'}
},
expected_res_status=400
)
def test_create_port_pair_with_invalid_weight(self):
with self.port(
name='port1',
device_id='default'
) as src_dst_port:
self._create_port_pair(
self.fmt,
{
'ingress': src_dst_port['port']['id'],
'egress': src_dst_port['port']['id'],
'service_function_parameters': {'weight': -1}
},
expected_res_status=400
)
def test_list_port_pairs(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc1, self.port_pair(port_pair={
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pc2:
port_pairs = [pc1, pc2]
self._test_list_resources(
'port_pair', port_pairs
)
def test_list_port_pairs_with_params(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc1, self.port_pair(port_pair={
'name': 'test2',
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pc2:
self._test_list_resources(
'port_pair', [pc1],
query_params='name=test1'
)
self._test_list_resources(
'port_pair', [pc2],
query_params='name=test2'
)
self._test_list_resources(
'port_pair', [],
query_params='name=test3'
)
def test_list_port_pairs_with_unknown_params(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc1, self.port_pair(port_pair={
'name': 'test2',
'ingress': dst_port['port']['id'],
'egress': src_port['port']['id']
}) as pc2:
port_pairs = [pc1, pc2]
self._test_list_resources(
'port_pair', port_pairs,
query_params='hello=test3'
)
def test_show_port_pair(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc:
req = self.new_show_request(
'port_pairs', pc['port_pair']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in pc['port_pair'].items():
self.assertEqual(res['port_pair'][k], v)
def test_show_port_pair_noexist(self):
req = self.new_show_request(
'port_pairs', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_update_port_pair(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'description': 'desc1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc:
updates = {
'name': 'test2',
'description': 'desc2'
}
req = self.new_update_request(
'port_pairs', {'port_pair': updates},
pc['port_pair']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = pc['port_pair']
expected.update(updates)
for k, v in expected.items():
self.assertEqual(res['port_pair'][k], v)
req = self.new_show_request(
'port_pairs', pc['port_pair']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in expected.items():
self.assertEqual(res['port_pair'][k], v)
def test_update_port_pair_service_function_parameters(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'description': 'desc1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc:
updates = {
'service_function_parameters': {
'correlation': None, 'weight': 2,
}
}
req = self.new_update_request(
'port_pairs', {'port_pair': updates},
pc['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_update_port_pair_ingress(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'description': 'desc1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc:
updates = {
'ingress': dst_port['port']['id']
}
req = self.new_update_request(
'port_pairs', {'port_pair': updates},
pc['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_update_port_pair_egress(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'name': 'test1',
'description': 'desc1',
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pc:
updates = {
'egress': src_port['port']['id']
}
req = self.new_update_request(
'port_pairs', {'port_pair': updates},
pc['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
def test_delete_port_pair(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}, do_delete=False) as pc:
req = self.new_delete_request(
'port_pairs', pc['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
req = self.new_show_request(
'port_pairs', pc['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_delete_port_pair_noexist(self):
req = self.new_delete_request(
'port_pairs', '1'
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
def test_delete_port_pair_port_pair_group_exist(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}) as pp:
with self.port_pair_group(port_pair_group={
'port_pairs': [pp['port_pair']['id']]
}):
req = self.new_delete_request(
'port_pairs', pp['port_pair']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(409, res.status_int)
def test_delete_ingress_port_pair_exist(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}):
req = self.new_delete_request(
'ports', src_port['port']['id']
)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
def test_delete_egress_port_pair_exist(self):
with self.port(
name='port1',
device_id='default'
) as src_port, self.port(
name='port2',
device_id='default'
) as dst_port:
with self.port_pair(port_pair={
'ingress': src_port['port']['id'],
'egress': dst_port['port']['id']
}):
req = self.new_delete_request(
'ports', dst_port['port']['id']
)
res = req.get_response(self.api)
self.assertEqual(500, res.status_int)
def _test_create_service_graph_branching_ppg(
self, src_corr, dst_corr, status):
with self.port(
name='port1',
device_id='default'
) as port1, self.port(
name='port2',
device_id='default'
) as port2, self.port(
name='port3',
device_id='default'
) as port3, self.port(
name='port4',
device_id='default'
) as port4:
with self.port_pair(port_pair={
'ingress': port1['port']['id'],
'egress': port1['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}, do_delete=False) as pp1, self.port_pair(port_pair={
'ingress': port2['port']['id'],
'egress': port2['port']['id'],
'service_function_parameters': {'correlation': src_corr}
}, do_delete=False) as pp2, self.port_pair(port_pair={
'ingress': port3['port']['id'],
'egress': port3['port']['id'],
'service_function_parameters': {'correlation': dst_corr}
}, do_delete=False) as pp3, self.port_pair(port_pair={
'ingress': port4['port']['id'],
'egress': port4['port']['id'],
'service_function_parameters': {'correlation': 'mpls'}
}, do_delete=False) as pp4:
with self.port_pair_group(
port_pair_group={'port_pairs': [pp1['port_pair']['id']]},
do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={'port_pairs': [pp2['port_pair']['id']]},
do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={'port_pairs': [pp3['port_pair']['id']]},
do_delete=False
) as pg3, self.port_pair_group(
port_pair_group={'port_pairs': [pp4['port_pair']['id']]},
do_delete=False
) as pg4:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [
pg1['port_pair_group']['id'],
pg2['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [
pg3['port_pair_group']['id'],
pg4['port_pair_group']['id']]}
) as pc2:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [
pc2['port_chain']['id']]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=status)
def test_create_service_graph_branching_ppg_no_src_corr_fail(self):
self._test_create_service_graph_branching_ppg(None, 'mpls', 400)
def test_create_service_graph_branching_ppg_no_dst_corr_fail(self):
self._test_create_service_graph_branching_ppg('mpls', None, 400)
def test_create_service_graph_branching_ppg_both_corrs_ok(self):
self._test_create_service_graph_branching_ppg('mpls', 'mpls', 201)
def test_create_service_graph_linear_dependency_only(self):
# this test will create a graph consisting of 1 port chain being
# dependent on 1 other port chain, thus with no branching.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=201)
def test_create_service_graph_branching_no_class(self):
# this test will create a graph where 1 port chain will act
# as a dependency to 2 other port chains, effectively
# creating a branching service function chain.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [
pc2['port_chain']['id'],
pc3['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=201)
def test_create_service_graph_same_chain_fail(self):
# this test will attempt to create a graph with a single branching
# point having 2 port chains - which are actually the same port chain.
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]
}
) as pc1, self.port_chain(
port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']]
}
) as pc2:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [
pc2['port_chain']['id'],
pc2['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=400)
def test_create_service_graph_with_already_used_pcs_fail(self):
# this test will attempt to create a graph that maps
# port-chains which have already been mapped to other graphs.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [
pc2['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=201)
self._create_service_graph(self.fmt, {
'port_chains': {
pc3['port_chain']['id']: [
pc1['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=409)
def test_create_service_graph_with_multiple_starts(self):
# this test will create a graph with multiple starting chains (tails)
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg4:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg4['port_pair_group']['id']]}
) as pc4:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']],
pc3['port_chain']['id']: [pc4['port_chain']['id']],
pc4['port_chain']['id']: [pc2['port_chain']['id']]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=201)
def _test_create_service_graph_single_branching_two_fcs_each(
self, fc1_dict, fc2_dict, fc3_dict, fc4_dict, expected_res_status
):
with self.flow_classifier(
flow_classifier=fc1_dict, do_delete=False
) as fc1, self.flow_classifier(
flow_classifier=fc2_dict, do_delete=False
) as fc2, self.flow_classifier(
flow_classifier=fc3_dict, do_delete=False
) as fc3, self.flow_classifier(
flow_classifier=fc4_dict, do_delete=False
) as fc4:
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3:
with self.port_chain(
port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]
},
do_delete=False
) as pc1, self.port_chain(
port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']],
'flow_classifiers': [
fc1['flow_classifier']['id'],
fc2['flow_classifier']['id']
]
},
do_delete=False
) as pc2, self.port_chain(
port_chain={
'port_pair_groups': [pg3['port_pair_group']['id']],
'flow_classifiers': [
fc3['flow_classifier']['id'],
fc4['flow_classifier']['id']
]
},
do_delete=False
) as pc3:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [
pc2['port_chain']['id'],
pc3['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=expected_res_status)
def test_create_service_graph_unambiguous_branch(self):
# this test will create a graph where 1 port chain will act
# as a dependency to 2 other port chains, using different
# classifications for the dependent chains, which must succeed.
with self.port(
name='test1', do_delete=False
) as port1, self.port(
name='test2', do_delete=False
) as port2, self.port(
name='test3', do_delete=False
) as port3, self.port(
name='test4', do_delete=False
) as port4:
fc1_dict = {
'name': 'fc1',
'ethertype': 'IPv4',
'protocol': 'tcp',
'logical_source_port': port1['port']['id']
}
fc2_dict = {
'name': 'fc2',
'ethertype': 'IPv6',
'protocol': 'tcp',
'logical_source_port': port2['port']['id']
}
fc3_dict = {
'name': 'fc3',
'ethertype': 'IPv4',
'protocol': 'udp',
'logical_source_port': port3['port']['id']
}
fc4_dict = {
'name': 'fc4',
'ethertype': 'IPv6',
'protocol': 'udp',
'logical_source_port': port4['port']['id']
}
self._test_create_service_graph_single_branching_two_fcs_each(
fc1_dict, fc2_dict, fc3_dict, fc4_dict,
expected_res_status=201)
def test_create_service_graph_with_direct_loop_fail(self):
# this test will attempt to create a graph where there is a direct
# loop, i.e. a chain linked to itself - specifically pc2->pc2.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']],
pc2['port_chain']['id']: [pc2['port_chain']['id']]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=400)
def test_create_service_graph_with_indirect_loop_fail(self):
# this test will attempt to create a graph where there is an indirect
# loop, i.e. a chain is linked to a chain providing a path back to
# the first chain again - specifically pc2->pc3->pc4->pc2.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg4, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg5:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg4['port_pair_group']['id']]}
) as pc4, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg5['port_pair_group']['id']]}
) as pc5:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']],
pc2['port_chain']['id']: [pc3['port_chain']['id']],
pc3['port_chain']['id']: [pc4['port_chain']['id']],
pc4['port_chain']['id']: [
pc2['port_chain']['id'],
pc5['port_chain']['id']
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=400)
def test_create_service_graph_with_inexistent_port_chains(self):
# this test will attempt to create a graph where one
# of the referenced port chains do not exist, and fail.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']],
pc2['port_chain']['id']: [
pc3['port_chain']['id'],
uuidutils.generate_uuid()
]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=404)
def test_create_service_graph_with_joining_branches(self):
# this test will create a graph that including "joining" branches, i.e.
# a set of at least 2 branches that will be linked to the same next
# port chain, thus joining traffic at that point.
with self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg1, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg2, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg3, self.port_pair_group(
port_pair_group={}, do_delete=False
) as pg4:
with self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg3['port_pair_group']['id']]}
) as pc3, self.port_chain(
do_delete=False,
port_chain={'port_pair_groups': [pg4['port_pair_group']['id']]}
) as pc4:
self._create_service_graph(self.fmt, {
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']],
pc2['port_chain']['id']: [
pc3['port_chain']['id'], pc4['port_chain']['id']
],
pc3['port_chain']['id']: [pc4['port_chain']['id']]
},
'name': 'abc',
'description': 'def'
}, expected_res_status=201)
def test_update_service_graph(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={'port_pair_groups': [pg1['port_pair_group']['id']]}
) as pc1, self.port_chain(
port_chain={'port_pair_groups': [pg2['port_pair_group']['id']]}
) as pc2:
with self.service_graph(service_graph={
'name': 'test1',
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']]
}
}) as graph:
updates = {
'name': 'test2',
'description': 'desc2'
}
req = self.new_update_request(
'service_graphs', {'service_graph': updates},
graph['service_graph']['id']
)
res = self.deserialize(
self.fmt,
req.get_response(self.ext_api)
)
expected = graph['service_graph']
expected.update(updates)
for k, v in expected.items():
self.assertEqual(res['service_graph'][k], v)
req = self.new_show_request(
'service_graphs', graph['service_graph']['id']
)
res = self.deserialize(
self.fmt, req.get_response(self.ext_api)
)
for k, v in expected.items():
self.assertEqual(res['service_graph'][k], v)
def test_delete_service_graph(self):
with self.port_pair_group(
port_pair_group={}
) as pg1, self.port_pair_group(
port_pair_group={}
) as pg2:
with self.port_chain(
port_chain={
'port_pair_groups': [pg1['port_pair_group']['id']]},
) as pc1, self.port_chain(
port_chain={
'port_pair_groups': [pg2['port_pair_group']['id']]},
) as pc2:
with self.service_graph(service_graph={
'name': 'test1',
'port_chains': {
pc1['port_chain']['id']: [pc2['port_chain']['id']]
}
}, do_delete=False) as graph:
req = self.new_delete_request(
'service_graphs', graph['service_graph']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(204, res.status_int)
req = self.new_show_request(
'service_graphs', graph['service_graph']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(404, res.status_int)
req = self.new_show_request(
'port_chains', pc1['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
req = self.new_show_request(
'port_chains', pc2['port_chain']['id']
)
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
|
{
"content_hash": "4af680fa91eab965e941e390c650048a",
"timestamp": "",
"source": "github",
"line_count": 3249,
"max_line_length": 79,
"avg_line_length": 40.1320406278855,
"alnum_prop": 0.4383345220839181,
"repo_name": "openstack/networking-sfc",
"id": "a6e0a94951b08b1f98ea932bdc2a91a8ed0bdced",
"size": "131013",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "networking_sfc/tests/unit/db/test_sfc_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1041"
},
{
"name": "Python",
"bytes": "1334579"
},
{
"name": "Shell",
"bytes": "3076"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
from .inputs import inputs
def iterator(dataset, eval_data, batch_size=1, scale_inputs=1,
distort_inputs=False, zero_mean_inputs=False, num_epochs=1,
shuffle=False):
"""Returns a function which iterates over a dataset in batches.
Args:
dataset: The dataset.
eval_data: Boolean indicating if one should use the train or eval data
set.
batch_size: Number of data per batch (optional).
scale_inputs: Float defining the scaling to use for resizing the
record's data (optional).
distort_inputs: Boolean whether to distort the inputs (optional).
zero_mean_inputs: Boolean indicating if one should linearly scales the
record's data to have zero mean and unit norm (optional).
num_epochs: Number indicating the maximal number of epoch iterations
(optional).
shuffle: Boolean indiciating if one wants to shuffle the inputs
(optional).
Returns:
A function that iterates over the dataset.
"""
def _iterate(each, before=None, done=None):
"""Iterates over a dataset defined by the iterator.
Args:
each: Function that is called for every passed batch.
output_batch: The output_batch computed by the session.
index: The currently passed number of records.
last_index: The maximal number of records to iterate. Can be
None.
before: Function that is called before running the iterator. Its
return value is passed as the operation to run (optional). If
None, the data_batch and label_batch gets passed to the session.
data_batch: The data batch tensor.
label_batch: The label batch tensor.
done: Function that is called after running the iterator
(optional).
index: The passed number of records.
last_index: The maximal number of records to iterate. Can be
None.
"""
index = 0
if not eval_data:
num_examples_per_epoch = dataset.num_examples_per_epoch_for_train
else:
num_examples_per_epoch = dataset.num_examples_per_epoch_for_eval
if num_epochs is not None:
last_index = num_epochs * num_examples_per_epoch
else:
last_index = None
# Build up a new graph.
with tf.Graph().as_default():
data_batch, label_batch = inputs(dataset, eval_data=eval_data,
batch_size=batch_size,
scale_inputs=scale_inputs,
distort_inputs=distort_inputs,
zero_mean_inputs=zero_mean_inputs,
num_epochs=num_epochs,
shuffle=shuffle)
if batch_size == 1:
# Remove the first dimension, because we only consider batch
# sizes of one.
data_batch = tf.squeeze(data_batch, squeeze_dims=[0])
label_batch = tf.squeeze(label_batch, squeeze_dims=[0])
# Customize input batch with the optional before callback.
if before is None:
input_batch = [data_batch, label_batch]
else:
input_batch = before(data_batch, label_batch)
try:
# Run a controlled tensorflow session.
with tf.train.MonitoredTrainingSession(
save_checkpoint_secs=None,
save_summaries_steps=None,
) as monitored_session:
while not monitored_session.should_stop():
index += batch_size
# Index can't be greater than the last index.
if last_index is not None:
index = min(index, last_index)
output_batch = monitored_session.run(input_batch)
# Call the callback for each computed output batch.
each(output_batch, index, last_index)
except KeyboardInterrupt:
pass
finally:
# Call the optional done callback.
if done is not None:
done(index, last_index)
return _iterate
|
{
"content_hash": "ad2d2a46a12541a2e7b8d999c87da340",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 79,
"avg_line_length": 40.767857142857146,
"alnum_prop": 0.5424879544459045,
"repo_name": "rusty1s/graph-based-image-classification",
"id": "db1a075bddad5e7f62554eca2aa94da628658477",
"size": "4566",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/helper/iterator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "17345"
},
{
"name": "Jupyter Notebook",
"bytes": "2682"
},
{
"name": "Python",
"bytes": "138155"
},
{
"name": "Shell",
"bytes": "2452"
}
],
"symlink_target": ""
}
|
import os
import unittest
import IECore
import IECoreRI
class TestSHWDeepImageWriter( unittest.TestCase ) :
__shw = "test/IECoreRI/data/shw/translucentBoxes.shw"
__shwOrtho = "test/IECoreRI/data/shw/constantPlaneOrtho.shw"
__exr = "test/IECoreRI/data/dtex/groundPlane.exr"
__output = "test/IECoreRI/data/shw/written.shw"
def testConstructor( self ) :
self.failUnless( "shw" in IECore.DeepImageWriter.supportedExtensions() )
self.failUnless( "shw" in IECore.DeepImageWriter.supportedExtensions( IECore.TypeId.DeepImageWriter ) )
writer = IECoreRI.SHWDeepImageWriter()
self.failUnless( isinstance( writer, IECoreRI.SHWDeepImageWriter ) )
self.assertEqual( writer.typeId(), IECoreRI.SHWDeepImageWriter.staticTypeId() )
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
self.failUnless( isinstance( writer, IECoreRI.SHWDeepImageWriter ) )
self.assertEqual( writer.typeId(), IECoreRI.SHWDeepImageWriter.staticTypeId() )
writer = IECore.DeepImageWriter.create( TestSHWDeepImageWriter.__output )
self.failUnless( isinstance( writer, IECoreRI.SHWDeepImageWriter ) )
self.assertEqual( writer.typeId(), IECoreRI.SHWDeepImageWriter.staticTypeId() )
def testCanWrite( self ) :
self.failUnless( not IECoreRI.SHWDeepImageWriter.canWrite( "" ) )
self.failUnless( IECoreRI.SHWDeepImageWriter.canWrite( TestSHWDeepImageWriter.__output ) )
def testDefaultWriter( self ) :
writer = IECoreRI.SHWDeepImageWriter()
self.assertEqual( writer.parameters()['fileName'].getTypedValue(), "" )
self.assertEqual( writer.parameters()['channelNames'].getValue(), IECore.StringVectorData( [ "A" ] ) )
self.assertEqual( writer.parameters()['resolution'].getTypedValue(), IECore.V2i( 2048, 1556 ) )
self.assertEqual( writer.parameters()['tileSize'].getTypedValue(), IECore.V2i( 32, 32 ) )
p = IECore.DeepPixel( "RGBA" )
p.addSample( 1, [ 1, 0, 0, 1 ] )
self.assertRaises( RuntimeError, IECore.curry( writer.writePixel, 0, 0, p ) )
def testParameters( self ) :
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
self.assertEqual( writer.parameters()['fileName'].getTypedValue(), TestSHWDeepImageWriter.__output )
self.assertEqual( writer.parameters()['channelNames'].getValue(), IECore.StringVectorData( [ "A" ] ) )
self.assertEqual( writer.parameters()['resolution'].getTypedValue(), IECore.V2i( 2048, 1556 ) )
self.assertEqual( writer.parameters()['tileSize'].getTypedValue(), IECore.V2i( 32, 32 ) )
self.assertEqual( writer.parameters()['worldToCameraMatrix'].getTypedValue(), IECore.M44f() )
self.assertEqual( writer.parameters()['worldToNDCMatrix'].getTypedValue(), IECore.M44f() )
def testStrictChannels( self ) :
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
p = IECore.DeepPixel( "RGBA" )
p.addSample( 1, [ 1, 0, 0, 1 ] )
self.assertRaises( RuntimeError, IECore.curry( writer.writePixel, 0, 0, p ) )
p = IECore.DeepPixel( "RGB" )
p.addSample( 1, [ 1, 0, 0 ] )
self.assertRaises( RuntimeError, IECore.curry( writer.writePixel, 0, 1, p ) )
p = IECore.DeepPixel( "A" )
p.addSample( 1, [ 1 ] )
writer.writePixel( 0, 2, p )
p = IECore.DeepPixel( "RGBAST" )
p.addSample( 1, [ 1, 0, 0, 1, 0, 1 ] )
self.assertRaises( RuntimeError, IECore.curry( writer.writePixel, 0, 3, p ) )
def testTileSize( self ) :
p = IECore.DeepPixel( "A" )
p.addSample( 1, [ 1, ] )
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['resolution'].setTypedValue( IECore.V2i( 2, 2 ) )
self.assertRaises( RuntimeError, IECore.curry( writer.writePixel, 0, 0, p ) )
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['tileSize'].setTypedValue( IECore.V2i( 2, 2 ) )
writer.writePixel( 0, 0, p )
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['resolution'].setTypedValue( IECore.V2i( 127, 127 ) )
writer.writePixel( 0, 0, p )
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['resolution'].setTypedValue( IECore.V2i( 127, 127 ) )
writer.parameters()['tileSize'].setTypedValue( IECore.V2i( 128, 128 ) )
self.assertRaises( RuntimeError, IECore.curry( writer.writePixel, 0, 0, p ) )
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['tileSize'].setTypedValue( IECore.V2i( 30, 30 ) )
self.assertRaises( RuntimeError, IECore.curry( writer.writePixel, 0, 0, p ) )
def testReadWritePixel( self ) :
reader = IECoreRI.SHWDeepImageReader( TestSHWDeepImageWriter.__shw )
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['resolution'].setTypedValue( reader.dataWindow().size() + IECore.V2i( 1 ) )
self.assertEqual( writer.parameters()['resolution'].getTypedValue(), IECore.V2i( 512, 512 ) )
self.assertEqual( writer.parameters()['channelNames'].getValue(), IECore.StringVectorData( [ "A" ] ) )
writer.parameters()['resolution'].setTypedValue( IECore.V2i( 2, 2 ) )
self.assertEqual( writer.parameters()['resolution'].getTypedValue(), IECore.V2i( 2, 2 ) )
writer.parameters()['tileSize'].setTypedValue( IECore.V2i( 2, 2 ) )
wToC = IECore.M44f( 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9, 10.1, 11.11, 12.12, 13.13, 14.14, 15.15, 16.16 )
cToS = IECore.M44f( 10.1, 20.2, 30.3, 40.4, 50.5, 60.6, 7.7, 80.8, 90.9, 100.1, 110.11, 120.12, 130.13, 140.14, 150.15, 160.16 )
writer.parameters()['worldToCameraMatrix'].setTypedValue( wToC )
writer.parameters()['worldToNDCMatrix'].setTypedValue( cToS )
self.assertEqual( writer.parameters()['worldToCameraMatrix'].getTypedValue(), wToC )
self.assertEqual( writer.parameters()['worldToNDCMatrix'].getTypedValue(), cToS )
# hits ground plane only
p = reader.readPixel( 100, 100 )
self.assertEqual( p.channelNames(), ( "A", ) )
self.assertEqual( p.numSamples(), 1 )
self.assertAlmostEqual( p.getDepth( 0 ), 102.18660736, 6 )
self.assertAlmostEqual( p[0][0], 1.0, 6 )
# hits one box then ground plane
p2 = reader.readPixel( 256, 256 )
self.assertEqual( p2.channelNames(), tuple(reader.channelNames()) )
self.assertEqual( p2.numSamples(), 3 )
self.assertAlmostEqual( p2.getDepth( 0 ), 72.6086884, 6 )
self.assertAlmostEqual( p2.getDepth( 1 ), 77.7386627, 6 )
self.assertAlmostEqual( p2.getDepth( 2 ), 85.6621628, 6 )
expected = ( 0.5, 0.5, 1.0 )
for i in range( 0, len(expected) ) :
self.assertEqual( p2[i][0], expected[i] )
# hits 2 boxes then ground plane
p3 = reader.readPixel( 195, 225 )
self.assertEqual( p3.channelNames(), tuple(reader.channelNames()) )
self.assertEqual( p3.numSamples(), 5 )
self.assertAlmostEqual( p3.getDepth( 0 ), 68.6202545, 6 )
self.assertAlmostEqual( p3.getDepth( 1 ), 75.3051300, 6 )
self.assertAlmostEqual( p3.getDepth( 2 ), 77.4111862, 6 )
self.assertAlmostEqual( p3.getDepth( 3 ), 80.0710297, 6 )
self.assertAlmostEqual( p3.getDepth( 4 ), 88.8488693, 6 )
expected = ( 0.5, 0.75, 0.5, 0.5, 1.0 )
for i in range( 0, len(expected) ) :
self.assertEqual( p3[i][0], expected[i] )
writer.writePixel( 0, 0, p )
writer.writePixel( 0, 1, p2 )
writer.writePixel( 1, 1, p3 )
del writer
reader = IECoreRI.SHWDeepImageReader( TestSHWDeepImageWriter.__output )
self.assertEqual( reader.dataWindow().size() + IECore.V2i( 1 ), IECore.V2i( 2, 2 ) )
self.assertEqual( reader.channelNames(), IECore.StringVectorData( [ "A" ] ) )
self.failUnless( reader.worldToCameraMatrix().equalWithAbsError( wToC, 1e-6 ) )
self.failUnless( reader.worldToNDCMatrix().equalWithAbsError( cToS, 1e-6 ) )
# hits ground plane only
rp = reader.readPixel( 0, 0 )
self.assertEqual( rp.channelNames(), ( "A", ) )
self.assertEqual( rp.numSamples(), 1 )
self.assertAlmostEqual( rp.getDepth( 0 ), 102.18660736, 4 )
self.assertAlmostEqual( rp[0][0], 1.0, 6 )
# hits one box then ground plane
rp2 = reader.readPixel( 0, 1 )
self.assertEqual( rp2.channelNames(), tuple(reader.channelNames()) )
self.assertEqual( rp2.numSamples(), 3 )
self.assertAlmostEqual( rp2.getDepth( 0 ), 72.6086884, 4 )
self.assertAlmostEqual( rp2.getDepth( 1 ), 77.7386627, 4 )
self.assertAlmostEqual( rp2.getDepth( 2 ), 85.6621628, 4 )
expected = ( 0.5, 0.5, 1.0 )
for i in range( 0, len(expected) ) :
self.assertEqual( rp2[i][0], expected[i] )
# hits 2 boxes then ground plane
rp3 = reader.readPixel( 1, 1 )
self.assertEqual( rp3.channelNames(), tuple(reader.channelNames()) )
self.assertEqual( rp3.numSamples(), 5 )
self.assertAlmostEqual( rp3.getDepth( 0 ), 68.6202545, 4 )
self.assertAlmostEqual( rp3.getDepth( 1 ), 75.3051300, 4 )
self.assertAlmostEqual( rp3.getDepth( 2 ), 77.4111862, 4 )
self.assertAlmostEqual( rp3.getDepth( 3 ), 80.0710297, 4 )
self.assertAlmostEqual( rp3.getDepth( 4 ), 88.8488693, 4 )
expected = ( 0.5, 0.75, 0.5, 0.5, 1.0 )
for i in range( 0, len(expected) ) :
self.assertEqual( rp3[i][0], expected[i] )
self.failUnless( reader.readPixel( 1, 0 ) is None )
def testWriteSimplePixel( self ) :
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['channelNames'].setValue( IECore.StringVectorData( [ "A" ] ) )
writer.parameters()['resolution'].setTypedValue( IECore.V2i( 2, 2 ) )
writer.parameters()['tileSize'].setTypedValue( IECore.V2i( 2, 2 ) )
p = IECore.DeepPixel( "A" )
p.addSample( 1, ( 0.25, ) )
p2 = IECore.DeepPixel( "A" )
p2.addSample( 2, ( 0.5, ) )
p3 = IECore.DeepPixel( "A" )
p3.addSample( 1, ( 0.25, ) )
p3.addSample( 2, ( 0.5, ) )
writer.writePixel( 0, 0, p )
writer.writePixel( 0, 1, p2 )
writer.writePixel( 1, 1, p3 )
del writer
reader = IECoreRI.SHWDeepImageReader( TestSHWDeepImageWriter.__output )
self.assertEqual( reader.dataWindow().size() + IECore.V2i( 1 ), IECore.V2i( 2, 2 ) )
self.assertEqual( reader.channelNames(), IECore.StringVectorData( [ "A" ] ) )
rp = reader.readPixel( 0, 0 )
self.assertEqual( rp.channelNames(), tuple(reader.channelNames()) )
self.assertEqual( rp.numSamples(), 1 )
self.assertEqual( rp.getDepth( 0 ), 1 )
self.assertEqual( rp[0], ( 0.25, ) )
rp2 = reader.readPixel( 0, 1 )
self.assertEqual( rp2.channelNames(), tuple(reader.channelNames()) )
self.assertEqual( rp2.numSamples(), 1 )
self.assertEqual( rp2.getDepth( 0 ), 2 )
self.assertEqual( rp2[0], ( 0.5, ) )
rp3 = reader.readPixel( 1, 1 )
self.assertEqual( rp3.channelNames(), tuple(reader.channelNames()) )
self.assertEqual( rp3.numSamples(), 2 )
self.assertEqual( rp3.getDepth( 0 ), 1 )
self.assertEqual( rp3.getDepth( 1 ), 2 )
self.assertEqual( rp3[0], ( 0.25, ) )
self.assertEqual( rp3[1], ( 0.5, ) )
self.assertEqual( rp3.composite(), [ 0.625 ] )
self.failUnless( reader.readPixel( 1, 0 ) is None )
def testEmptyPixel( self ) :
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
p = IECore.DeepPixel( "A" )
writer.writePixel( 0, 0, p )
writer.writePixel( 0, 50, None )
writer.writePixel( 0, 100, p )
p.addSample( 1, [ 0.5 ] )
writer.writePixel( 0, 1, p )
del writer
reader = IECoreRI.SHWDeepImageReader( TestSHWDeepImageWriter.__output )
self.failUnless( reader.readPixel( 0, 0 ) is None )
self.failUnless( reader.readPixel( 0, 50 ) is None )
self.failUnless( reader.readPixel( 0, 100 ) is None )
rp = reader.readPixel( 0, 1 )
self.failUnless( isinstance( rp, IECore.DeepPixel ) )
self.assertEqual( rp[0], ( 0.5, ) )
def __testFileConversionWithFile( self, filename ) :
reader = IECore.DeepImageReader.create( filename )
dataWindow = reader.dataWindow()
writer = IECore.DeepImageWriter.create( TestSHWDeepImageWriter.__output )
writer.parameters()['channelNames'].setValue( reader.channelNames() )
writer.parameters()['resolution'].setTypedValue( dataWindow.size() + IECore.V2i( 1 ) )
writer.parameters()['worldToCameraMatrix'] = reader.worldToCameraMatrix()
writer.parameters()['worldToNDCMatrix'] = reader.worldToNDCMatrix()
writer.parameters()['tileSize'] = IECore.V2i( 16, 16 )
for y in range( dataWindow.min.y, dataWindow.max.y + 1 ) :
for x in range( dataWindow.min.x, dataWindow.max.x + 1 ) :
writer.writePixel( x, y, reader.readPixel( x, y ) )
del writer
reader2 = IECore.DeepImageReader.create( TestSHWDeepImageWriter.__output )
self.assertEqual( reader2.channelNames(), reader.channelNames() )
self.assertEqual( reader2.dataWindow(), reader.dataWindow() )
self.assertEqual( reader2.read(), reader.read() )
for y in range( dataWindow.min.y, dataWindow.max.y + 1 ) :
for x in range( dataWindow.min.x, dataWindow.max.x + 1 ) :
p = reader.readPixel( x, y )
p2 = reader2.readPixel( x, y )
if not p2 and not p :
continue
self.assertEqual( p2.channelNames(), p.channelNames() )
self.assertEqual( p2.numSamples(), p.numSamples() )
for i in range( 0, p.numSamples() ) :
self.assertAlmostEqual( p2.getDepth( i ), p.getDepth( i ), 4 )
self.assertEqual( p2[i], p[i] )
def testFileConversion( self ) :
self.__testFileConversionWithFile( TestSHWDeepImageWriter.__shw )
self.__testFileConversionWithFile( TestSHWDeepImageWriter.__shwOrtho )
def testStrangeOrder( self ) :
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['channelNames'].setValue( IECore.StringVectorData( list("GARB") ) )
p = IECore.DeepPixel( "GARB", 2 )
p.addSample( 1.0, [ 0.25, 0.5, 0.75, 0.5 ] )
p.addSample( 1.5, [ 1.0, 0.25, 0.5, 0.75 ] )
writer.writePixel( 0, 0, p )
del writer
reader = IECore.DeepImageReader.create( TestSHWDeepImageWriter.__output )
self.assertEqual( reader.channelNames(), IECore.StringVectorData( [ "A" ] ) )
rp = reader.readPixel( 0, 0 )
self.assertEqual( rp.channelNames(), ( "A", ) )
self.assertEqual( rp.numSamples(), 2 )
self.assertEqual( rp.getDepth( 0 ), 1.0 )
self.assertEqual( rp.getDepth( 1 ), 1.5 )
self.assertEqual( rp[0], ( 0.5, ) )
self.assertEqual( rp[1], ( 0.25, ) )
def testNoAlpha( self ) :
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['channelNames'].setValue( IECore.StringVectorData( list("RGB") ) )
p = IECore.DeepPixel( "RGB", 2 )
p.addSample( 1.0, [ 0.25, 0.5, 0.75 ] )
p.addSample( 1.5, [ 0.75, 0.25, 0.5 ] )
writer.writePixel( 0, 0, p )
del writer
reader = IECore.DeepImageReader.create( TestSHWDeepImageWriter.__output )
self.assertEqual( reader.channelNames(), IECore.StringVectorData( [ "A" ] ) )
rp = reader.readPixel( 0, 0 )
self.assertEqual( rp.channelNames(), ( "A", ) )
self.assertEqual( rp.numSamples(), 2 )
self.assertEqual( rp.getDepth( 0 ), 1.0 )
self.assertEqual( rp.getDepth( 1 ), 1.5 )
self.assertEqual( rp[0], ( 0.25, ) )
self.assertEqual( rp[1], ( 0.75, ) )
def testArbitraryChannels( self ) :
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['channelNames'].setValue( IECore.StringVectorData( [ "Testing", "Arbitrary", "Channel", "Names" ] ) )
p = IECore.DeepPixel( [ "Testing", "Arbitrary", "Channel", "Names" ], 2 )
p.addSample( 1.0, [ 0.25, 0.5, 0.75, 0.5 ] )
p.addSample( 1.5, [ 1.0, 0.5, 0.25, 0.75 ] )
writer.writePixel( 0, 0, p )
del writer
reader = IECore.DeepImageReader.create( TestSHWDeepImageWriter.__output )
self.assertEqual( reader.channelNames(), IECore.StringVectorData( [ "A" ] ) )
rp = reader.readPixel( 0, 0 )
self.assertEqual( rp.channelNames(), ( "A", ) )
self.assertEqual( rp.numSamples(), 2 )
self.assertEqual( rp.getDepth( 0 ), 1.0 )
self.assertEqual( rp.getDepth( 1 ), 1.5 )
self.assertEqual( rp[0], ( 0.25, ) )
self.assertEqual( rp[1], ( 1.0, ) )
def testExtraChannels( self ) :
writer = IECoreRI.SHWDeepImageWriter( TestSHWDeepImageWriter.__output )
writer.parameters()['channelNames'].setValue( IECore.StringVectorData( list("RGBAST") ) )
p = IECore.DeepPixel( "RGBAST", 2 )
p.addSample( 1.0, [ 0.25, 0.5, 0.75, 0.5, 0.25, 0.5 ] )
p.addSample( 1.5, [ 1.0, 0.5, 0.25, 0.75, 0.5, 0.75 ] )
writer.writePixel( 0, 0, p )
del writer
reader = IECore.DeepImageReader.create( TestSHWDeepImageWriter.__output )
self.assertEqual( reader.channelNames(), IECore.StringVectorData( [ "A" ] ) )
rp = reader.readPixel( 0, 0 )
self.assertEqual( rp.channelNames(), ( "A", ) )
self.assertEqual( rp.numSamples(), 2 )
self.assertEqual( rp.getDepth( 0 ), 1.0 )
self.assertEqual( rp.getDepth( 1 ), 1.5 )
self.assertEqual( rp[0], ( 0.5, ) )
self.assertEqual( rp[1], ( 0.75, ) )
def tearDown( self ) :
if os.path.isfile( TestSHWDeepImageWriter.__output ) :
os.remove( TestSHWDeepImageWriter.__output )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "d190eaea2215f63891d9ae18edbc3c85",
"timestamp": "",
"source": "github",
"line_count": 400,
"max_line_length": 130,
"avg_line_length": 42.27,
"alnum_prop": 0.6795599716110717,
"repo_name": "goddardl/cortex",
"id": "1d685b9c0b1205f8b4dc619cb1b867ef5e86b9cd",
"size": "18692",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/IECoreRI/SHWDeepImageWriterTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "61406"
},
{
"name": "C++",
"bytes": "11489966"
},
{
"name": "CMake",
"bytes": "14161"
},
{
"name": "GLSL",
"bytes": "31098"
},
{
"name": "Mathematica",
"bytes": "255937"
},
{
"name": "Python",
"bytes": "4928755"
},
{
"name": "Slash",
"bytes": "8583"
},
{
"name": "Tcl",
"bytes": "1796"
}
],
"symlink_target": ""
}
|
"""
=========================================================================
2 samples permutation test on source data with spatio-temporal clustering
=========================================================================
Tests if the source space data are significantly different between
2 groups of subjects (simulated here using one subject's data).
The multiple comparisons problem is addressed with a cluster-level
permutation test across space and time.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Eric Larson <larson.eric.d@gmail.com>
# License: BSD (3-clause)
import os.path as op
import numpy as np
from scipy import stats as stats
import mne
from mne import spatial_src_connectivity
from mne.stats import spatio_temporal_cluster_test, summarize_clusters_stc
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
stc_fname = data_path + '/MEG/sample/sample_audvis-meg-lh.stc'
subjects_dir = data_path + '/subjects'
src_fname = subjects_dir + '/fsaverage/bem/fsaverage-ico-5-src.fif'
# Load stc to in common cortical space (fsaverage)
stc = mne.read_source_estimate(stc_fname)
stc.resample(50, npad='auto')
# Read the source space we are morphing to
src = mne.read_source_spaces(src_fname)
fsave_vertices = [s['vertno'] for s in src]
morph = mne.compute_source_morph(stc, 'sample', 'fsaverage',
spacing=fsave_vertices, smooth=20,
subjects_dir=subjects_dir)
stc = morph.apply(stc)
n_vertices_fsave, n_times = stc.data.shape
tstep = stc.tstep
n_subjects1, n_subjects2 = 7, 9
print('Simulating data for %d and %d subjects.' % (n_subjects1, n_subjects2))
# Let's make sure our results replicate, so set the seed.
np.random.seed(0)
X1 = np.random.randn(n_vertices_fsave, n_times, n_subjects1) * 10
X2 = np.random.randn(n_vertices_fsave, n_times, n_subjects2) * 10
X1[:, :, :] += stc.data[:, :, np.newaxis]
# make the activity bigger for the second set of subjects
X2[:, :, :] += 3 * stc.data[:, :, np.newaxis]
# We want to compare the overall activity levels for each subject
X1 = np.abs(X1) # only magnitude
X2 = np.abs(X2) # only magnitude
###############################################################################
# Compute statistic
# -----------------
#
# To use an algorithm optimized for spatio-temporal clustering, we
# just pass the spatial connectivity matrix (instead of spatio-temporal)
print('Computing connectivity.')
connectivity = spatial_src_connectivity(src)
# Note that X needs to be a list of multi-dimensional array of shape
# samples (subjects_k) x time x space, so we permute dimensions
X1 = np.transpose(X1, [2, 1, 0])
X2 = np.transpose(X2, [2, 1, 0])
X = [X1, X2]
# Now let's actually do the clustering. This can take a long time...
# Here we set the threshold quite high to reduce computation.
p_threshold = 0.0001
f_threshold = stats.distributions.f.ppf(1. - p_threshold / 2.,
n_subjects1 - 1, n_subjects2 - 1)
print('Clustering.')
T_obs, clusters, cluster_p_values, H0 = clu =\
spatio_temporal_cluster_test(X, connectivity=connectivity, n_jobs=1,
threshold=f_threshold, buffer_size=None)
# Now select the clusters that are sig. at p < 0.05 (note that this value
# is multiple-comparisons corrected).
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
###############################################################################
# Visualize the clusters
# ----------------------
print('Visualizing clusters.')
# Now let's build a convenient representation of each cluster, where each
# cluster becomes a "time point" in the SourceEstimate
fsave_vertices = [np.arange(10242), np.arange(10242)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
# Let's actually plot the first "time point" in the SourceEstimate, which
# shows all the clusters, weighted by duration
subjects_dir = op.join(data_path, 'subjects')
# blue blobs are for condition A != condition B
brain = stc_all_cluster_vis.plot('fsaverage', hemi='both',
views='lateral', subjects_dir=subjects_dir,
time_label='Duration significant (ms)',
clim=dict(kind='value', lims=[0, 1, 40]))
brain.save_image('clusters.png')
|
{
"content_hash": "1698e014a93f750f4b7538b5368a79e2",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 40.81578947368421,
"alnum_prop": 0.6079948420373952,
"repo_name": "mne-tools/mne-tools.github.io",
"id": "bc7ba59ace60b9c71f9d1469a01e7ab796928321",
"size": "4653",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "0.18/_downloads/e89fd0e967d2088d5c505283f8e003f5/plot_stats_cluster_spatio_temporal_2samp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "708696"
},
{
"name": "Dockerfile",
"bytes": "1820"
},
{
"name": "HTML",
"bytes": "1526247783"
},
{
"name": "JavaScript",
"bytes": "1323087"
},
{
"name": "Jupyter Notebook",
"bytes": "24820047"
},
{
"name": "Python",
"bytes": "18575494"
}
],
"symlink_target": ""
}
|
import unittest
import time
from iso8601 import parse_date
from datetime import timedelta
from openprocurement.api.utils import get_now
from openprocurement.api.constants import SANDBOX_MODE
from openprocurement.tender.belowthreshold.tests.base import test_organization
# TenderContractResourceTest
def create_tender_contract(self):
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.contract_id = response.json['data'][0]['id']
response = self.app.post_json('/tenders/{}/contracts?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'title': 'contract title',
'description': 'contract description',
'awardID': self.award_id}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
# at next steps we test to create contract in 'complete' tender status
# time travel
tender = self.db.get(self.tender_id)
for i in tender.get('awards', []):
if i.get('complaintPeriod', {}): # reporting procedure does not have complaintPeriod
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(tender)
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'complete')
response = self.app.post_json('/tenders/{}/contracts?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'title': 'contract title',
'description': 'contract description',
'awardID': self.award_id}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
# at next steps we test to create contract in 'cancelled' tender status
response = self.app.post_json('/tenders?acc_token={}',
{"data": self.initial_data})
self.assertEqual(response.status, '201 Created')
tender_id = response.json['data']['id']
tender_token = response.json['access']['token']
response = self.app.post_json('/tenders/{}/cancellations?acc_token={}'.format(
tender_id, tender_token), {'data': {'reason': 'cancellation reason', 'status': 'active'}})
self.assertEqual(response.status, '201 Created')
response = self.app.get('/tenders/{}'.format(tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'cancelled')
response = self.app.post_json('/tenders/{}/contracts?acc_token={}'.format(tender_id, tender_token),
{'data': {'title': 'contract title',
'description': 'contract description',
'awardID': self.award_id}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
def patch_tender_contract(self):
response = self.app.get('/tenders/{}/contracts'.format(
self.tender_id))
self.contract_id = response.json['data'][0]['id']
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"value": {"currency": "USD"}}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'][0]["description"], "Can\'t update currency for contract value")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": False}}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'][0]["description"],
"Can\'t update valueAddedTaxIncluded for contract value")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"value": {"amount": 501}}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'][0]["description"],
"Value amount should be less or equal to awarded amount (469.0)")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"value": {"amount": 238}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['value']['amount'], 238)
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
self.assertIn("dateSigned", response.json['data'])
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "cancelled"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (complete) tender status")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "pending"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (complete) tender status")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (complete) tender status")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"awardID": "894917dc8b1244b6aab9ab0ad8c8f48a"}},
status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
# at next steps we test to patch contract in 'cancelled' tender status
response = self.app.post_json('/tenders?acc_token={}',
{"data": self.initial_data})
self.assertEqual(response.status, '201 Created')
tender_id = response.json['data']['id']
tender_token = response.json['access']['token']
response = self.app.post_json('/tenders/{}/awards?acc_token={}'.format(tender_id, tender_token),
{'data': {'suppliers': [test_organization], 'status': 'pending'}})
award_id = response.json['data']['id']
response = self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(tender_id, award_id, tender_token),
{"data": {'qualified': True, "status": "active"}})
response = self.app.get('/tenders/{}/contracts'.format(tender_id))
contract_id = response.json['data'][0]['id']
response = self.app.post_json('/tenders/{}/cancellations?acc_token={}'.format(tender_id, tender_token),
{'data': {'reason': 'cancellation reason', 'status': 'active'}})
self.assertEqual(response.status, '201 Created')
response = self.app.get('/tenders/{}'.format(tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'cancelled')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
tender_id, contract_id, tender_token),
{"data": {"awardID": "894917dc8b1244b6aab9ab0ad8c8f48a"}},
status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
tender_id, contract_id, tender_token), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (cancelled) tender status")
response = self.app.patch_json('/tenders/{}/contracts/some_id?acc_token={}'.format(
self.tender_id, self.tender_token), {"data": {"status": "active"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'contract_id'}
])
response = self.app.patch_json('/tenders/some_id/contracts/some_id', {"data": {"status": "active"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/contracts/{}'.format(self.tender_id, self.contract_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
self.assertEqual(response.json['data']["value"]['amount'], 238)
def tender_contract_signature_date(self):
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.assertNotIn("dateSigned", response.json['data'][0])
self.contract_id = response.json['data'][0]['id']
one_hour_in_furure = (get_now() + timedelta(hours=1)).isoformat()
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"dateSigned": one_hour_in_furure}},
status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'],
[{u'description': [u"Contract signature date can't be in the future"],
u'location': u'body',
u'name': u'dateSigned'}])
custom_signature_date = get_now().isoformat()
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"dateSigned": custom_signature_date}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
self.assertEqual(response.json['data']["dateSigned"], custom_signature_date)
self.assertIn("dateSigned", response.json['data'])
def get_tender_contract(self):
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.contract_id = response.json['data'][0]['id']
response = self.app.get('/tenders/{}/contracts/some_id'.format(self.tender_id), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'contract_id'}
])
response = self.app.get('/tenders/some_id/contracts/some_id', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
def get_tender_contracts(self):
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
response = self.app.get('/tenders/some_id/contracts', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
def award_id_change_is_not_allowed(self):
response = self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(
self.tender_id, self.award_id, self.tender_token), {"data": {"status": "cancelled"}})
old_award_id = self.award_id
# upload new award
response = self.app.post_json('/tenders/{}/awards?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'suppliers': [test_organization]}})
award = response.json['data']
response = self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(
self.tender_id, award['id'], self.tender_token), {"data": {'qualified': True, "status": "active"}})
response = self.app.get('/tenders/{}/contracts'.format(
self.tender_id))
contract = response.json['data'][-1]
self.assertEqual(contract['awardID'], award['id'])
self.assertNotEqual(contract['awardID'], old_award_id)
# try to update awardID value
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token), {"data": {"awardID": old_award_id}})
response = self.app.get('/tenders/{}/contracts'.format(
self.tender_id))
contract = response.json['data'][-1]
self.assertEqual(contract['awardID'], award['id'])
self.assertNotEqual(contract['awardID'], old_award_id)
# TenderNegotiationContractResourceTest
def patch_tender_negotiation_contract(self):
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.contract_id = response.json['data'][0]['id']
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"status": "active"}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertIn("Can't sign contract before stand-still period end (", response.json['errors'][0]["description"])
response = self.app.get('/tenders/{}/awards'.format(self.tender_id))
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(len(response.json['data']), 1)
award = response.json['data'][0]
start = parse_date(award['complaintPeriod']['startDate'])
end = parse_date(award['complaintPeriod']['endDate'])
delta = end - start
self.assertEqual(delta.days, 0 if SANDBOX_MODE else self.stand_still_period_days)
# at next steps we test to patch contract in 'complete' tender status
tender = self.db.get(self.tender_id)
for i in tender.get('awards', []):
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(tender)
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"value": {"currency": "USD"}}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'][0]["description"], "Can\'t update currency for contract value")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"value": {"valueAddedTaxIncluded": False}}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'][0]["description"],
"Can\'t update valueAddedTaxIncluded for contract value")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"value": {"amount": 501}}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'][0]["description"],
"Value amount should be less or equal to awarded amount (469.0)")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"value": {"amount": 238}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['value']['amount'], 238)
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
self.assertIn(u"dateSigned", response.json['data'])
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "cancelled"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (complete) tender status")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "pending"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (complete) tender status")
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (complete) tender status")
# at next steps we test to patch contract in 'cancelled' tender status
response = self.app.post_json('/tenders?acc_token={}', {"data": self.initial_data})
self.assertEqual(response.status, '201 Created')
tender_id = response.json['data']['id']
tender_token = response.json['access']['token']
response = self.app.post_json('/tenders/{}/awards?acc_token={}'.format(tender_id, tender_token),
{'data': {'suppliers': [test_organization], 'status': 'pending'}})
award_id = response.json['data']['id']
response = self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(tender_id, award_id, tender_token),
{"data": {'qualified': True, "status": "active"}})
response = self.app.get('/tenders/{}/contracts'.format(tender_id))
contract_id = response.json['data'][0]['id']
response = self.app.post_json('/tenders/{}/cancellations?acc_token={}'.format(tender_id, tender_token),
{'data': {'reason': 'cancellation reason', 'status': 'active'}})
self.assertEqual(response.status, '201 Created')
response = self.app.get('/tenders/{}'.format(tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'cancelled')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
tender_id, contract_id, tender_token),
{"data": {"awardID": "894917dc8b1244b6aab9ab0ad8c8f48a"}},
status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.content_type, 'application/json')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
tender_id, contract_id, tender_token), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'][0]["description"],
"Can't update contract in current (cancelled) tender status")
response = self.app.patch_json('/tenders/{}/contracts/some_id?acc_token={}'.format(
self.tender_id, self.tender_token), {"data": {"status": "active"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'contract_id'}
])
response = self.app.patch_json('/tenders/some_id/contracts/some_id', {"data": {"status": "active"}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location':
u'url', u'name': u'tender_id'}
])
response = self.app.get('/tenders/{}/contracts/{}'.format(self.tender_id, self.contract_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
def tender_negotiation_contract_signature_date(self):
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.assertNotIn("dateSigned", response.json['data'][0])
self.contract_id = response.json['data'][0]['id']
tender = self.db.get(self.tender_id)
for i in tender.get('awards', []):
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(tender)
one_hour_in_furure = (get_now() + timedelta(hours=1)).isoformat()
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"dateSigned": one_hour_in_furure}},
status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'],
[{u'description': [u"Contract signature date can't be in the future"],
u'location': u'body',
u'name': u'dateSigned'}])
before_stand_still = i['complaintPeriod']['startDate']
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token),
{"data": {"dateSigned": before_stand_still}},
status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [{u'description': [u'Contract signature date should be after award complaint period end date ({})'.format(i['complaintPeriod']['endDate'])], u'location': u'body', u'name': u'dateSigned'}])
custom_signature_date = get_now().isoformat()
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"dateSigned": custom_signature_date}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']["status"], "active")
self.assertEqual(response.json['data']["dateSigned"], custom_signature_date)
self.assertIn("dateSigned", response.json['data'])
def items(self):
response = self.app.get('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token))
tender = response.json['data']
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.contract1_id = response.json['data'][0]['id']
self.assertEqual([item['id'] for item in response.json['data'][0]['items']],
[item['id'] for item in tender['items']])
# TenderNegotiationLotContractResourceTest
def lot_items(self):
response = self.app.get('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token))
tender = response.json['data']
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.contract1_id = response.json['data'][0]['id']
self.assertEqual([item['id'] for item in response.json['data'][0]['items']],
[item['id'] for item in tender['items'] if item['relatedLot'] == self.lot1['id']])
def lot_award_id_change_is_not_allowed(self):
self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(
self.tender_id, self.award_id, self.tender_token), {"data": {"status": "cancelled"}})
old_award_id = self.award_id
# upload new award
response = self.app.post_json('/tenders/{}/awards?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'suppliers': [test_organization],
'lotID': self.lot1['id']}})
award = response.json['data']
self.app.patch_json('/tenders/{}/awards/{}?acc_token={}'.format(
self.tender_id, award['id'], self.tender_token), {"data": {'qualified': True, "status": "active"}})
response = self.app.get('/tenders/{}/contracts'.format(
self.tender_id))
contract = response.json['data'][-1]
self.assertEqual(contract['awardID'], award['id'])
self.assertNotEqual(contract['awardID'], old_award_id)
# try to update awardID value
self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, contract['id'], self.tender_token), {"data": {"awardID": old_award_id}})
response = self.app.get('/tenders/{}/contracts'.format(
self.tender_id))
contract = response.json['data'][-1]
self.assertEqual(contract['awardID'], award['id'])
self.assertNotEqual(contract['awardID'], old_award_id)
def activate_contract_cancelled_lot(self):
response = self.app.get('/tenders/{}/lots'.format(self.tender_id))
lot = response.json['data'][0]
# Create cancellation on lot
response = self.app.post_json('/tenders/{}/cancellations?acc_token={}'.format(self.tender_id,
self.tender_token),
{'data': {'reason': 'cancellation reason',
'cancellationOf': 'lot',
'relatedLot': lot['id']}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.json['data']['status'], 'pending')
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
contract = response.json['data'][0]
# time travel
tender = self.db.get(self.tender_id)
for i in tender.get('awards', []):
if i.get('complaintPeriod', {}): # reporting procedure does not have complaintPeriod
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(tender)
# Try to sign (activate) contract
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(self.tender_id, contract['id'],
self.tender_token),
{'data': {'status': 'active'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'][0]["description"], "Can\'t update contract while cancellation for corresponding lot exists", )
# TenderNegotiationLot2ContractResourceTest
def sign_second_contract(self):
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.contract1_id = response.json['data'][0]['id']
self.contract2_id = response.json['data'][1]['id']
# at next steps we test to create contract in 'complete' tender status
# time travel
tender = self.db.get(self.tender_id)
for i in tender.get('awards', []):
if i.get('complaintPeriod', {}): # reporting procedure does not have complaintPeriod
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(tender)
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract2_id, self.tender_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract1_id, self.tender_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'complete')
def create_two_contract(self):
response = self.app.get('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token))
tender = response.json['data']
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.contract1_id = response.json['data'][0]['id']
self.contract2_id = response.json['data'][1]['id']
self.assertEqual([item['id'] for item in response.json['data'][0]['items']],
[item['id'] for item in tender['items'] if item['relatedLot'] == self.lot1['id']])
self.assertEqual([item['id'] for item in response.json['data'][1]['items']],
[item['id'] for item in tender['items'] if item['relatedLot'] == self.lot2['id']])
response = self.app.post_json('/tenders/{}/contracts?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'title': 'contract title',
'description': 'contract description',
'awardID': self.award1_id}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
# at next steps we test to create contract in 'complete' tender status
# time travel
tender = self.db.get(self.tender_id)
for i in tender.get('awards', []):
if i.get('complaintPeriod', {}): # reporting procedure does not have complaintPeriod
i['complaintPeriod']['endDate'] = i['complaintPeriod']['startDate']
self.db.save(tender)
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract1_id, self.tender_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertNotEqual(response.json['data']['status'], 'complete')
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract2_id, self.tender_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
response = self.app.get('/tenders/{}'.format(self.tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'complete')
response = self.app.post_json('/tenders/{}/contracts?acc_token={}'.format(self.tender_id, self.tender_token),
{'data': {'title': 'contract title',
'description': 'contract description',
'awardID': self.award1_id}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
# at next steps we test to create contract in 'cancelled' tender status
response = self.app.post_json('/tenders?acc_token={}',
{"data": self.initial_data})
self.assertEqual(response.status, '201 Created')
tender_id = response.json['data']['id']
tender_token = response.json['access']['token']
response = self.app.post_json('/tenders/{}/cancellations?acc_token={}'.format(
tender_id, tender_token), {'data': {'reason': 'cancellation reason', 'status': 'active'}})
self.assertEqual(response.status, '201 Created')
response = self.app.get('/tenders/{}'.format(tender_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'cancelled')
response = self.app.post_json('/tenders/{}/contracts?acc_token={}'.format(tender_id, tender_token),
{'data': {'title': 'contract title',
'description': 'contract description',
'awardID': self.award1_id}},
status=403)
self.assertEqual(response.status, '403 Forbidden')
# TenderNegotiationQuickAccelerationTest
@unittest.skipUnless(SANDBOX_MODE, "not supported accelerator")
def create_tender_contract_negotiation_quick(self):
response = self.app.get('/tenders/{}/contracts'.format(self.tender_id))
self.contract_id = response.json['data'][0]['id']
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "active"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertIn("Can't sign contract before stand-still period end (", response.json['errors'][0]["description"])
time.sleep(self.time_sleep_in_sec)
response = self.app.patch_json('/tenders/{}/contracts/{}?acc_token={}'.format(
self.tender_id, self.contract_id, self.tender_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
|
{
"content_hash": "780df0cbf867d0ce584dfd3fe056d2a1",
"timestamp": "",
"source": "github",
"line_count": 689,
"max_line_length": 234,
"avg_line_length": 52.43541364296081,
"alnum_prop": 0.629041186891054,
"repo_name": "openprocurement/openprocurement.tender.limited",
"id": "0855edb9ed293e1bf8918ba18a9c5d0e2cec3126",
"size": "36152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openprocurement/tender/limited/tests/contract_blanks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "511307"
}
],
"symlink_target": ""
}
|
from pbr import version
version_info = version.VersionInfo('mistral')
version_string = version_info.version_string()
|
{
"content_hash": "f5200992a1b2cfbecb81998cadfc90d1",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 46,
"avg_line_length": 29.5,
"alnum_prop": 0.788135593220339,
"repo_name": "openstack/mistral",
"id": "40ba471c7b828dadd2853437aaf32766b4c5667a",
"size": "726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2091"
},
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "2617595"
},
{
"name": "Shell",
"bytes": "26731"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.