text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Reorganize Pokemon, PokemonForm, etc. to Species, Pokemon, etc.
This is an unmaintained one-shot script, only included in the repo for
reference.
"""
import csv
import os
from pokedex import defaults
number_of_species = 649
high_id_start = 10000
csv_dir = defaults.get_default_csv_dir()
def to_dict(filename):
fullname = os.path.join(csv_dir, filename)
reader = csv.reader(open(fullname))
column_names = reader.next()
entries = dict()
for row in reader:
row_dict = dict(zip(column_names, row))
entries[row_dict.get('id', row_dict.get('pokemon_id'))] = row_dict
return entries, column_names
pokemon, pokemon_columns = to_dict('pokemon.csv')
forms, form_columns = to_dict('pokemon_forms.csv')
form_groups, form_group_columns = to_dict('pokemon_form_groups.csv')
evolution_chains, evolution_chain_columns = to_dict('evolution_chains.csv')
result_columns = dict(
species='''id identifier generation_id evolves_from_species_id
evolution_chain_id color_id shape_id habitat_id
growth_rate_id gender_rate capture_rate base_happiness is_baby
hatch_counter has_gender_differences forms_switchable'''.split(),
pokemon='''id species_id height weight base_experience order'''.split(),
form='''id form_identifier pokemon_id introduced_in_version_group_id
is_default is_battle_only order'''.split(),
chain='''id baby_trigger_item_id'''.split(),
)
def normalize_id(id):
id = int(id)
if id > number_of_species:
id = id - high_id_start + number_of_species
return id
def put(dct, entry):
"""Put entry in dct. If already there, check it's the same.
"""
id = int(entry['id'])
if id in dct:
if entry == dct[id]:
pass
else:
print entry
print dct[id]
assert False
else:
dct[id] = entry
forms_switchable = dict(
castform=True,
unown=False,
darmanitan=True,
basculin=False,
rotom=True,
shaymin=True,
deerling=True,
sawsbuck=True,
arceus=True,
pichu=False,
giratina=True,
burmy=True,
wormadam=False,
deoxys=True,
genesect=True,
meloetta=True,
gastrodon=False,
cherrim=True,
shellos=False,
)
result_species = dict()
result_pokemon = dict()
result_forms = dict()
result_chains = dict()
for form_id, source_form in forms.items():
pokemon_id = source_form['unique_pokemon_id'] or source_form['form_base_pokemon_id']
species_id = source_form['form_base_pokemon_id']
source_pokemon = pokemon[pokemon_id]
source_evolution_chain = evolution_chains[source_pokemon['evolution_chain_id']]
try:
source_group = form_groups[species_id]
except KeyError:
source_group = dict(is_battle_only=0)
all_fields = dict(source_form)
all_fields.update(source_group)
all_fields.update(source_pokemon)
all_fields.update(source_evolution_chain)
del all_fields['id']
new_species = dict()
for column_name in result_columns['species']:
if column_name == 'id':
new_species[column_name] = normalize_id(species_id)
elif column_name == 'evolves_from_species_id':
new_species[column_name] = pokemon[species_id]['evolves_from_pokemon_id']
elif column_name == 'shape_id':
new_species[column_name] = all_fields['pokemon_shape_id']
elif column_name == 'forms_switchable':
if species_id in form_groups:
new_species[column_name] = forms_switchable[source_pokemon['identifier']]
else:
new_species[column_name] = 0
else:
new_species[column_name] = all_fields[column_name]
put(result_species, new_species)
new_pokemon = dict()
for column_name in result_columns['pokemon']:
if column_name == 'id':
new_pokemon[column_name] = normalize_id(pokemon_id)
elif column_name == 'species_id':
new_pokemon[column_name] = species_id
else:
new_pokemon[column_name] = all_fields[column_name]
put(result_pokemon, new_pokemon)
new_form = dict()
for column_name in result_columns['form']:
if column_name == 'id':
new_form[column_name] = normalize_id(form_id)
elif column_name == 'pokemon_id':
new_form[column_name] = normalize_id(pokemon_id)
elif column_name == 'form_identifier':
new_form[column_name] = source_form['identifier']
elif column_name == 'is_battle_only':
if source_form['unique_pokemon_id'] == source_form['form_base_pokemon_id']:
# Default form, herefore not battle-only
new_form[column_name] = '0'
else:
# Keep
new_form[column_name] = all_fields[column_name]
else:
new_form[column_name] = all_fields[column_name]
put(result_forms, new_form)
new_chain = dict(source_evolution_chain)
del new_chain['growth_rate_id']
put(result_chains, new_chain)
def write_csv(dct, fieldnames, filename):
fullname = os.path.join(csv_dir, filename)
reader = csv.DictWriter(open(fullname, 'w'), fieldnames)
reader.writerow(dict((n,n) for n in fieldnames))
for id, row in sorted(dct.items()):
reader.writerow(row)
write_csv(result_species, result_columns['species'], 'pokemon_species.csv')
write_csv(result_pokemon, result_columns['pokemon'], 'pokemon.csv')
write_csv(result_forms, result_columns['form'], 'pokemon_forms.csv')
write_csv(result_chains, result_columns['chain'], 'evolution_chains.csv')
|
{
"content_hash": "70438795c02e48ffd69201c7bce4d97c",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 89,
"avg_line_length": 34.95092024539877,
"alnum_prop": 0.6262945409864841,
"repo_name": "RK905/pokedex-1",
"id": "fd1ba03f271273454b177b52c9122cdc6b408f16",
"size": "5715",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scripts/pokemon_species.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "436178"
}
],
"symlink_target": ""
}
|
""" Utilities for view configuration """
import fnmatch
import re
import functools
import inspect
import six
from pyramid.httpexceptions import HTTPFound
from .params import is_request
def match(pattern, path, flags):
"""
Check if a pattern matches a path
Parameters
----------
pattern : str
Glob or PCRE
path : str or None
The path to check, or None if no path
flags : {'r', 'i', 'a', '?'}
Special match flags. These may be combined (e.g. 'ri?'). See the notes
for an explanation of the different values.
Returns
-------
match : bool or SRE_Match
A boolean indicating the match status, or the regex match object if
there was a successful PCRE match.
Notes
-----
==== ==============================================
Flag Description
==== ==============================================
r Match using PCRE (default glob)
i Case-insensitive match (must be used with 'r')
a ASCII-only match (must be used with 'r', python 3 only)
? Path is optional (return True if path is None)
==== ==============================================
"""
if path is None:
if '?' in flags:
return True
else:
return False
if 'r' in flags:
re_flags = 0
for char in flags:
if char == 'i':
re_flags |= re.I
elif char == 'a' and hasattr(re, 'A'): # pragma: no cover
re_flags |= re.A # pylint: disable=E1101
return re.match('^%s$' % pattern, path, re_flags)
else:
return fnmatch.fnmatchcase(path, pattern)
class SubpathPredicate(object):
"""
Generate a custom predicate that matches subpaths
Parameters
----------
*paths : list
List of match specs.
Notes
-----
A match spec may take one of three forms:
.. code-block:: python
'glob'
'name/glob'
'name/glob/flags'
The name is optional, but if you wish to specify flags then you have to
include the leading slash:
.. code-block:: python
# A match spec with flags and no name
'/foo.*/r'
The names will be accessible from the ``request.named_subpaths`` attribute.
.. code-block:: python
@view_config(context=Root, name='simple', subpath=('package/*', 'version/*/?'))
def simple(request)
pkg = request.named_subpaths['package']
version = request.named_subpaths.get('version')
request.response.body = '<h1>%s</h1>' % package
if version is not None:
request.response.body += '<h4>version: %s</h4>' % version
return request.response
See :meth:`.match` for more information on match flags`
"""
def __init__(self, paths, config):
if isinstance(paths, six.string_types):
paths = (paths,)
self.paths = paths
self.config = config
def text(self):
""" Display name """
return 'subpath = %s' % (self.paths,)
phash = text
def __call__(self, context, request):
named_subpaths = {}
if len(request.subpath) > len(self.paths):
return False
for i in range(len(self.paths)):
spec = self.paths[i]
pieces = spec.split('/', 2)
if len(pieces) == 1:
name, pattern, flags = None, pieces[0], ''
elif len(pieces) == 2:
name, pattern, flags = pieces[0], pieces[1], ''
else:
name, pattern, flags = pieces
if i < len(request.subpath):
path = request.subpath[i]
else:
path = None
result = match(pattern, path, flags)
if not result:
return False
if name and path is not None:
named_subpaths[name] = path
if hasattr(result, 'groupdict'):
named_subpaths.update(result.groupdict())
request.named_subpaths = named_subpaths
return True
def addslash(fxn):
"""
View decorator that adds a trailing slash
Notes
-----
Usage:
.. code-block:: python
@view_config(context=MyCtxt, renderer='json')
@addslash
def do_view(request):
return 'cool data'
"""
argspec = inspect.getargspec(fxn)
@functools.wraps(fxn)
def slash_redirect(*args, **kwargs):
""" Perform the redirect or pass though to view """
# pyramid always calls with (context, request) arguments
if len(args) == 2 and is_request(args[1]):
request = args[1]
if not request.path_url.endswith('/'):
new_url = request.path_url + '/'
if request.query_string:
new_url += '?' + request.query_string
return HTTPFound(location=new_url)
if len(argspec.args) == 1 and argspec.varargs is None:
return fxn(request)
else:
return fxn(*args)
else:
# Otherwise, it's likely a unit test. Don't change anything.
return fxn(*args, **kwargs)
return slash_redirect
def includeme(config):
""" Add the custom view predicates """
config.add_view_predicate('subpath', SubpathPredicate)
|
{
"content_hash": "fdf785437f5cfaafb348276828d39c45",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 87,
"avg_line_length": 28.25130890052356,
"alnum_prop": 0.532060785767235,
"repo_name": "stevearc/pyramid_duh",
"id": "d8c2141a3a0c9b92fa8aa3c36bc06c877ad20159",
"size": "5396",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyramid_duh/view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "77553"
},
{
"name": "Shell",
"bytes": "1292"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from encrypted_fields import EncryptedCharField
from django.core.exceptions import ValidationError
from taggit.managers import TaggableManager
from django.conf import settings
from django.db.models.signals import post_save
from dateutil.relativedelta import relativedelta
import re
import time
import json
import query
class Db(models.Model):
name_short = models.CharField(unique=True, max_length=10)
name_long = models.CharField(unique=True, max_length=128)
type = models.CharField(max_length=10,
choices=(
('MySQL', 'MySQL'), ('Postgres', 'Postgres'),
('Hive2', 'Hive2')),
default='None')
host = models.CharField(max_length=1024)
db = models.CharField(max_length=1024)
port = models.IntegerField()
username = models.CharField(max_length=128)
password_encrypted = EncryptedCharField(
max_length=1024,) # TODO FIX SPELLING MISTAKE
create_time = models.DateTimeField(auto_now_add=True, editable=False)
modified_time = models.DateTimeField(auto_now=True, editable=False)
tags = TaggableManager(blank=True)
def __str__(self):
return self.name_short
def clean(self):
return True # TODO Validate database connection
class Query(models.Model):
title = models.CharField(
unique=True, max_length=124,
help_text='Primary Short Name Used for URL mappings')
description = models.TextField(max_length=200)
description_long = models.TextField(max_length=1024, blank=True)
query_text = models.TextField(max_length=2048, help_text='Query to Run')
insert_limit = models.BooleanField(
default=True, help_text='Insert limit 1000 to end of query')
db = models.ForeignKey(Db)
owner = models.ForeignKey(User)
hide_index = models.BooleanField(
default=False, help_text='Hide from Main Search')
hide_table = models.BooleanField(
default=False, help_text='Supress Data output in display')
chart_type = models.CharField(
max_length=10,
choices=(
('None', 'None'), ('line', 'line'),
('bar', 'bar'), ('column', 'column'),
('area', 'area'), ('country', 'country')
),
default='None'
)
pivot_data = models.BooleanField(
default=False,
help_text='Pivot data around 1rst&2nd columns. Nulls filled with 0')
cumulative = models.BooleanField(
default=False, help_text='Run cumulatie sum')
log_scale_y = models.BooleanField(
default=False, help_text='Log scale Y axis')
stacked = models.BooleanField(default=False, help_text='Stack graph Type')
create_time = models.DateTimeField(auto_now_add=True, editable=False)
modified_time = models.DateTimeField(auto_now=True, editable=False)
# SHOULD INCLUDE default={} in ADMIN!
graph_extra = models.TextField(
blank=True, help_text='JSON form of highcharts formatting')
image = models.ImageField(
upload_to=settings.MEDIA_ROOT + '/thumbnails',
max_length=2048, blank=True)
cacheable = models.BooleanField(
default=True, help_text='allows this query result to be cached')
tags = TaggableManager(blank=True)
def __unicode__(self):
return "%s: %s" % (self.id, self.title)
def __str__(self):
return "%s: %s" % (self.id, self.title)
def get_absolute_url(self):
return '/query/{id}'.format(id=self.id)
def clean(self):
# dont allow queries to contain blacklist words
blacklist = ['delete', 'insert', 'update', 'alter', 'drop']
def find_whole_word(w):
return re.compile(
r'\b({0})\b'.format(w), flags=re.IGNORECASE
).search
for word in blacklist:
if find_whole_word(word)(self.query_text) is not None:
raise ValidationError('Queries can not contain %s' % word)
if self.chart_type == 'None' and self.stacked == 1:
raise ValidationError("Can't stack an invisible chart")
if self.graph_extra == "" or self.graph_extra is None:
self.graph_extra = "{}"
try:
json.loads(self.graph_extra)
except:
raise ValidationError("Graph Extra must be JSON!")
# Validate that query runs!
"""
try:
try:
Q = query.Manipulate_Data(query_text = 'explain '
+ self.query_text, db = self.db, user = self.owner)
Q.run_query()
except Exception, e:
# Somethings are un-explainable
Q = query.Manipulate_Data(query_text = self.query_text,
db = self.db, user = self.owner)
Q.run_query()
except Exception, e:
raise ValidationError("Query must run: %s" % (e))"""
class QueryProcessing(models.Model):
class Meta:
unique_together = ['query', 'attribute']
query = models.ForeignKey(Query)
attribute = models.CharField(max_length=16)
value = models.CharField(max_length=128)
class QueryDefault(models.Model):
class Meta:
unique_together = ['query', 'search_for']
query = models.ForeignKey(Query)
search_for = models.CharField(max_length=128)
replace_with = models.CharField(
max_length=1024,
help_text='For today replace with = today and data_type = Date')
data_type = models.CharField(max_length=10,
choices=(
('Numeric', 'Numeric'),
('String', 'String'),
('Date', 'Date')),
default='String')
def __str__(self):
return "%s : %s " % (self.query, self.search_for[0:10])
def replace_with_cleaned(self):
if self.data_type == "Date" and self.replace_with.lower() == 'today':
return time.strftime("%Y-%m-%d")
else:
return self.replace_with
def clean(self):
def valid_date(datestring):
pass
# TODO CHECK
return True
class QueryPrecedent(models.Model):
final_query = models.ForeignKey(Query)
preceding_query = models.ForeignKey(Query, related_name="+")
def clean(self):
def cycle_check(self):
# TODO Check to ensure no cycles
pass
return True
class Dashboard(models.Model):
title = models.CharField(
unique=True,
max_length=124,
help_text='Primary Short Name Used for URL mappings')
description = models.TextField(max_length=200)
description_long = models.TextField(max_length=1024)
owner = models.ForeignKey(User)
hide_index = models.BooleanField(
default=False, help_text='Hide from Main Search')
create_time = models.DateTimeField(auto_now_add=True, editable=False)
modified_time = models.DateTimeField(auto_now=True, editable=False)
tags = TaggableManager(blank=True)
def __unicode__(self):
return "%s: %s" % (self.id, self.title)
def __str__(self):
return "%s: %s" % (self.id, self.title)
def get_absolute_url(self):
return '/dashboard/{id}'.format(id=self.id)
class DashboardQuery(models.Model):
class Meta:
unique_together = ['query', 'dashboard']
query = models.ForeignKey(Query)
dashboard = models.ForeignKey(Dashboard)
order = models.IntegerField(default=1)
def __str__(self):
return "%s : %s" % (self.query, self.dashboard)
def __unicode__(self):
return "%s : %s" % (self.query, self.dashboard)
class QueryCache(models.Model):
query = models.ForeignKey(Query)
table_name = models.CharField(unique=True, max_length=128)
run_time = models.DateTimeField(auto_now=True, editable=False)
hash = models.CharField(max_length=1024)
def __str__(self):
return "%s : %s : %s" % (self.query, self.table_name, self.run_time)
def is_expired(self, days_back=-1):
if self.run_time < timezone.now() + relativedelta(days=days_back):
return True
else:
return False
class QueryView(models.Model):
user = models.ForeignKey(User)
query = models.ForeignKey(Query)
view_time = models.DateTimeField(auto_now_add=True, editable=False)
used_cache = models.BooleanField(default=False)
execution_time = models.FloatField(default=0.0)
def __str__(self):
return "%s : %s : %s" % (self.user, self.query, self.view_time)
def post_save_handler_query(sender, instance, **kwargs):
# POST SAVE TO CREATE IMAGE FOR QUERY
post_save.disconnect(post_save_handler_query, sender=Query)
if instance.chart_type not in ['None', 'country']:
lq = query.LoadQuery(query_id=instance.id, user=instance.owner)
q = lq.prepare_query()
q.run_query()
q.run_manipulations()
image = q.generate_image()
instance.image = image
instance.save()
post_save.connect(post_save_handler_query, sender=Query)
post_save.connect(post_save_handler_query, sender=Query)
|
{
"content_hash": "af908248cce839d2ba2777f3ed24b137",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 78,
"avg_line_length": 35.64750957854406,
"alnum_prop": 0.6137145313843508,
"repo_name": "sqlviz/sqlviz",
"id": "9926ee341e2f3ebefe7da09df192adc8691b7f18",
"size": "9304",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "website/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ABAP",
"bytes": "962"
},
{
"name": "ActionScript",
"bytes": "1133"
},
{
"name": "Ada",
"bytes": "99"
},
{
"name": "Assembly",
"bytes": "506"
},
{
"name": "AutoHotkey",
"bytes": "720"
},
{
"name": "Batchfile",
"bytes": "260"
},
{
"name": "C#",
"bytes": "83"
},
{
"name": "C++",
"bytes": "1522"
},
{
"name": "COBOL",
"bytes": "4"
},
{
"name": "CSS",
"bytes": "207253"
},
{
"name": "Cirru",
"bytes": "520"
},
{
"name": "Clojure",
"bytes": "794"
},
{
"name": "CoffeeScript",
"bytes": "806"
},
{
"name": "ColdFusion",
"bytes": "86"
},
{
"name": "Common Lisp",
"bytes": "632"
},
{
"name": "Cucumber",
"bytes": "697"
},
{
"name": "D",
"bytes": "324"
},
{
"name": "Dart",
"bytes": "489"
},
{
"name": "Erlang",
"bytes": "487"
},
{
"name": "Forth",
"bytes": "979"
},
{
"name": "GLSL",
"bytes": "512"
},
{
"name": "Go",
"bytes": "641"
},
{
"name": "Groovy",
"bytes": "1080"
},
{
"name": "HTML",
"bytes": "117252"
},
{
"name": "Haskell",
"bytes": "512"
},
{
"name": "Haxe",
"bytes": "447"
},
{
"name": "JSONiq",
"bytes": "4"
},
{
"name": "Java",
"bytes": "1550"
},
{
"name": "JavaScript",
"bytes": "37049866"
},
{
"name": "Julia",
"bytes": "210"
},
{
"name": "LSL",
"bytes": "2080"
},
{
"name": "Liquid",
"bytes": "1883"
},
{
"name": "LiveScript",
"bytes": "5747"
},
{
"name": "Lua",
"bytes": "981"
},
{
"name": "Makefile",
"bytes": "4566"
},
{
"name": "Matlab",
"bytes": "4"
},
{
"name": "Nix",
"bytes": "2212"
},
{
"name": "OCaml",
"bytes": "539"
},
{
"name": "Objective-C",
"bytes": "2672"
},
{
"name": "OpenSCAD",
"bytes": "666"
},
{
"name": "PHP",
"bytes": "351"
},
{
"name": "Pascal",
"bytes": "1412"
},
{
"name": "Perl",
"bytes": "678"
},
{
"name": "PowerShell",
"bytes": "418"
},
{
"name": "Protocol Buffer",
"bytes": "274"
},
{
"name": "Python",
"bytes": "122960"
},
{
"name": "R",
"bytes": "2445"
},
{
"name": "Ruby",
"bytes": "531"
},
{
"name": "Rust",
"bytes": "495"
},
{
"name": "Scala",
"bytes": "1541"
},
{
"name": "Scheme",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "4322"
},
{
"name": "Tcl",
"bytes": "899"
},
{
"name": "TeX",
"bytes": "1345"
},
{
"name": "TypeScript",
"bytes": "1607"
},
{
"name": "VHDL",
"bytes": "830"
},
{
"name": "Vala",
"bytes": "485"
},
{
"name": "Verilog",
"bytes": "274"
},
{
"name": "Visual Basic",
"bytes": "916"
},
{
"name": "XQuery",
"bytes": "114"
}
],
"symlink_target": ""
}
|
"""Factories for compliance app"""
from factory import Faker, SubFactory, Trait
from factory.django import DjangoModelFactory
from factory.fuzzy import FuzzyChoice
from compliance.constants import RESULT_DENIED, RESULT_SUCCESS, RESULT_CHOICES
from compliance.models import ExportsInquiryLog
class ExportsInquiryLogFactory(DjangoModelFactory):
"""Factory for ExportsInquiryLog"""
user = SubFactory("profiles.factories.UserFactory")
computed_result = FuzzyChoice(RESULT_CHOICES)
reason_code = Faker("pyint")
info_code = Faker("numerify", text="###")
encrypted_request = Faker("pystr", max_chars=30)
encrypted_response = Faker("pystr", max_chars=30)
class Meta:
model = ExportsInquiryLog
class Params:
denied = Trait(computed_result=RESULT_DENIED)
success = Trait(computed_result=RESULT_SUCCESS)
|
{
"content_hash": "05f91d4db81609076d07bbf84286923c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 31.88888888888889,
"alnum_prop": 0.7386759581881533,
"repo_name": "mitodl/bootcamp-ecommerce",
"id": "42d6dc22555cdb553ac7cfe36b54d2420da1ce97",
"size": "861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compliance/factories.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "325"
},
{
"name": "Dockerfile",
"bytes": "998"
},
{
"name": "HTML",
"bytes": "70605"
},
{
"name": "JavaScript",
"bytes": "491664"
},
{
"name": "Procfile",
"bytes": "293"
},
{
"name": "Python",
"bytes": "1236492"
},
{
"name": "SCSS",
"bytes": "72463"
},
{
"name": "Shell",
"bytes": "7329"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import pandas as pd
normal = pd.read_csv('normal.csv', header=None)
normal.hist()
plt.savefig('normal_hist.pdf')
q = normal.quantile([0, 0.25, 0.5, 0.75, 1])
q.columns = ['quantiles']
q.to_latex('quantiles.tex')
|
{
"content_hash": "8c782ebeac2524b89b7ffe28a65790f1",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 47,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.6935483870967742,
"repo_name": "clarkfitzg/templates",
"id": "aba06b43d2a278e449ea98e6d4b204a98b48ccdf",
"size": "248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "latex/python/analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1946"
},
{
"name": "C++",
"bytes": "2527"
},
{
"name": "Julia",
"bytes": "449"
},
{
"name": "Makefile",
"bytes": "4964"
},
{
"name": "Python",
"bytes": "384"
},
{
"name": "R",
"bytes": "2862"
},
{
"name": "Shell",
"bytes": "240"
},
{
"name": "TeX",
"bytes": "7786"
}
],
"symlink_target": ""
}
|
"""
Description here
"""
import logging as log
import argparse
import itertools
import madsenlab.axelrod.utils as utils
import madsenlab.axelrod.data as data
import csv
import random
import ming
from bson import ObjectId
def setup():
global args, simconfig
parser = argparse.ArgumentParser()
parser.add_argument("--experiment", help="provide name for experiment", required=True)
parser.add_argument("--debug", help="turn on debugging output")
parser.add_argument("--dbhost", help="database hostname, defaults to localhost", default="localhost")
parser.add_argument("--dbport", help="database port, defaults to 27017", default="27017")
parser.add_argument("--configuration", help="Configuration file for experiment", required=True)
parser.add_argument("--filename", help="path to file for export", required=True)
args = parser.parse_args()
simconfig = utils.TreeStructuredConfiguration(args.configuration)
if args.debug == '1':
log.basicConfig(level=log.DEBUG, format='%(asctime)s %(levelname)s: %(message)s')
else:
log.basicConfig(level=log.INFO, format='%(asctime)s %(levelname)s: %(message)s')
log.debug("experiment name: %s", args.experiment)
#### main program ####
data.set_experiment_name(args.experiment)
data.set_database_hostname(args.dbhost)
data.set_database_port(args.dbport)
config = data.getMingConfiguration(data.modules)
ming.configure(**config)
def main():
fieldnames = data.axelrod_run_treestructured.columns_to_export_for_analysis()
orig_fields = fieldnames[:]
fieldnames.extend(["cultureid", "culture_count", "mean_radii", "sd_radii",
"orbit_number", "autgroupsize", "remaining_density",
"mean_degree", "sd_degree",
"mean_orbit_multiplicity", "sd_orbit_multiplicity",
"max_orbit_multiplicity","order", "msg_lambda", "msg_beta", "mem_beta"])
ofile = open(args.filename, "wb")
writer = csv.DictWriter(ofile, fieldnames=fieldnames, quotechar='"', quoting=csv.QUOTE_ALL)
headers = dict((n,n) for n in fieldnames)
writer.writerow(headers)
structure_class_name = simconfig.POPULATION_STRUCTURE_CLASS
log.info("Configuring TreeStructured Axelrod model with structure class: %s", structure_class_name)
basic_config = utils.TreeStructuredConfiguration(args.configuration)
if basic_config.INTERACTION_RULE_CLASS == 'madsenlab.axelrod.rules.MultipleTreePrerequisitesLearningCopyingRule':
state_space = [
basic_config.POPULATION_SIZES_STUDIED,
basic_config.TRAIT_LEARNING_RATE,
basic_config.MAXIMUM_INITIAL_TRAITS,
basic_config.NUM_TRAIT_TREES,
basic_config.TREE_BRANCHING_FACTOR,
basic_config.TREE_DEPTH_FACTOR,
basic_config.TRAIT_LOSS_RATE,
basic_config.INNOVATION_RATE,
]
else:
log.error("This analytics calss not compatible with rule class: %s", basic_config.INTERACTION_RULE_CLASS)
exit(1)
if basic_config.NETWORK_FACTORY_CLASS == 'madsenlab.axelrod.population.WattsStrogatzSmallWorldFactory':
state_space.append(basic_config.WS_REWIRING_FACTOR)
num_samples = basic_config.REPLICATIONS_PER_PARAM_SET
fieldnames = data.axelrod_run_treestructured.columns_to_export_for_analysis()
orig_fields = fieldnames[:]
fieldnames.extend(["cultureid", "culture_count", "mean_radii", "sd_radii",
"orbit_number", "autgroupsize", "remaining_density",
"mean_degree", "sd_degree",
"mean_orbit_multiplicity", "sd_orbit_multiplicity",
"max_orbit_multiplicity","order", "msg_lambda", "msg_beta", "mem_beta"])
ofile = open(args.filename, "wb")
writer = csv.DictWriter(ofile, fieldnames=fieldnames, quotechar='"', quoting=csv.QUOTE_ALL)
headers = dict((n,n) for n in fieldnames)
writer.writerow(headers)
# The basic idea here is that we run through all parameter combinations
# and for each one, we:
#
# 1. Find all simulation_run_id's in the database with that parameter combination
# 2. Sample n = REPLICATIONS_PER_PARAM_SET from the sim run id list
# 3. For each of the sampled simulation run ID's:
# 4. Get all records for that simulation run ID from the database
# 5. Write those records to CSV
#
# The end result of this procedure should be a constant number of simulation run ID's per parameter set
# This will not result in a constant number of ROWS, however, because a simulation run will have multiple
# samples, and each of those samples may result in a different number of culture region solutions, each
# of which will contribute a row to the result.
for param_combination in itertools.product(*state_space):
popsize = int(param_combination[0])
lrate = float(param_combination[1])
maxtraits = int(param_combination[2])
num_trees = int(param_combination[3])
branching_factor = float(param_combination[4])
depth_factor = float(param_combination[5])
loss_rate = float(param_combination[6])
innov_rate = float(param_combination[7])
# Find all simulation ID's with the combination of params...
res = data.AxelrodStatsTreestructured.m.find(dict(population_size=popsize,
learning_rate=lrate,
max_init_traits=maxtraits,
num_trait_trees=num_trees,
branching_factor=branching_factor,
depth_factor=depth_factor,
loss_rate=loss_rate,
innovation_rate=innov_rate),
dict(simulation_run_id=1)).all()
simruns = set([run.simulation_run_id for run in [x for x in res ]])
if len(simruns) < num_samples:
sample_simruns = simruns
log.info("pc only has %s rows: LR: %s NT: %s BF: %s DF: %s IR: %s", len(simruns),
lrate, num_trees, branching_factor, depth_factor, innov_rate)
else:
sample_simruns = set(random.sample(simruns, num_samples))
log.debug("num ids for param combo: %s ", len(simruns))
id_count = 0
for simid in sample_simruns:
id_count += 1
cursor = data.AxelrodStatsTreestructured.m.find(dict(simulation_run_id=simid))
for sample in cursor:
row = dict()
for field in sorted(orig_fields):
row[field] = sample[field]
# now pull apart the trait graph list - producing a row for each element of the trait graph list
tg_stats = sample['trait_graph_stats']
for tg in tg_stats:
#log.info("tg: %s", tg)
row['cultureid'] = tg['cultureid']
row['culture_count'] = tg['culture_count']
row['mean_radii'] = tg['mean_radii']
row['sd_radii'] = tg['sd_radii']
row['mean_degree'] = tg['mean_degree']
row['sd_degree'] = tg['sd_degree']
row['orbit_number'] = tg['orbit_number']
row['autgroupsize'] = tg['autgroupsize']
row['remaining_density'] = tg['remaining_density']
row['mean_orbit_multiplicity'] = tg['mean_orbit_multiplicity']
row['sd_orbit_multiplicity'] = tg['sd_orbit_multiplicity']
row['max_orbit_multiplicity'] = tg['max_orbit_multiplicity']
row['order'] = tg['order']
row['msg_lambda'] = tg['msg_lambda']
row['msg_beta'] = tg['msg_beta']
row['mem_beta'] = tg['mem_beta']
#log.info("row: %s", row)
writer.writerow(row)
log.debug("sampled %s rows from param combo", id_count)
if __name__ == "__main__":
setup()
main()
|
{
"content_hash": "9da31fe3455748dafa1fca676345b264",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 117,
"avg_line_length": 43.085427135678394,
"alnum_prop": 0.5779099603452298,
"repo_name": "mmadsen/axelrod-ct",
"id": "4625bb7f76e87801a081542b51a4bee1567f6863",
"size": "8778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analytics/treestructured-uniform-sampler.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "249748"
},
{
"name": "Shell",
"bytes": "713"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/lair/tusken_raider/shared_lair_tusken_raider_npc.iff"
result.attribute_template_id = 9
result.stfName("lair_n","tusken_raider")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "ff5518e6c6897f6bc38ea8351cf36f6a",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 89,
"avg_line_length": 24.53846153846154,
"alnum_prop": 0.7021943573667712,
"repo_name": "obi-two/Rebelion",
"id": "bb2fb5f08f037646ea89a98a5be62f0ab65ae459",
"size": "464",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/tangible/lair/tusken_raider/shared_lair_tusken_raider_npc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""
MoinMoin - AttachList Macro
A macro to produce a list of attached files
Usage: <<AttachList([pagename,mime_type])>>
If pagename isn't set, the current pagename is used.
If mime_type isn't given, all files are listed.
@copyright: 2004 Jacob Cohen, Nigel Metheringham,
2006-2013 MoinMoin:ReimarBauer
@license: GNU GPL, see COPYING for details.
"""
import re
from MoinMoin.action.AttachFile import _build_filelist
def macro_AttachList(macro, pagename=None, mime_type=u'*', search_term=u'.+'):
# defaults if we don't get anything better
if not pagename:
pagename = macro.formatter.page.page_name
filterfn = re.compile(search_term, re.U).search
return _build_filelist(macro.request, pagename, 0, 1, mime_type=mime_type, filterfn=filterfn)
|
{
"content_hash": "c360abc16ff1114bdaaccd82d3f2cf7f",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 97,
"avg_line_length": 31.185185185185187,
"alnum_prop": 0.671021377672209,
"repo_name": "Glottotopia/aagd",
"id": "f367737060ca0d679e4dd8002da6fbce2e5f1f3d",
"size": "842",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "moin/local/moin/MoinMoin/macro/AttachList.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "152885"
},
{
"name": "CSS",
"bytes": "454208"
},
{
"name": "ColdFusion",
"bytes": "438820"
},
{
"name": "HTML",
"bytes": "1998354"
},
{
"name": "Java",
"bytes": "510468"
},
{
"name": "JavaScript",
"bytes": "6505329"
},
{
"name": "Lasso",
"bytes": "72399"
},
{
"name": "Makefile",
"bytes": "10216"
},
{
"name": "PHP",
"bytes": "259528"
},
{
"name": "Perl",
"bytes": "137186"
},
{
"name": "Python",
"bytes": "13713475"
},
{
"name": "Shell",
"bytes": "346"
},
{
"name": "XSLT",
"bytes": "15970"
}
],
"symlink_target": ""
}
|
from setuptools import setup
setup(name='awsclpy',
version='0.4',
description='Chain AWSCLI commands in Python.',
long_description='Run AWSCLI commands and use their outputs in next ' +
'commands.',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Development Status :: 4 - Beta',
],
keywords=[
'awsclpy',
'awscli',
'aws',
'amazon web services',
'command line interface',
],
author='Hamid Nazari',
author_email='hn@linux.com',
maintainer='Hamid Nazari',
maintainer_email='hn@linux.com',
url='http://github.com/hamidnazari/awsclpy',
license='MIT',
packages=['awsclpy'],
install_requires=[
'awscli==1.7.36',
'six==1.9.0'
],
zip_safe=False)
|
{
"content_hash": "48101a5ea8d6581660e94ffbd4d57230",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 77,
"avg_line_length": 33.078947368421055,
"alnum_prop": 0.5536992840095465,
"repo_name": "hamidnazari/awsclpy",
"id": "e728a58d5489e7c87edfdd8dfd4903ba7a9b063f",
"size": "1257",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13116"
}
],
"symlink_target": ""
}
|
from django import forms
TOOLS = [
('all', 'All the tools'),
('travel', 'Where you could travel for work'),
('sectors', 'Discover jobs you could do'),
('discovery', 'Find jobs to apply for'),
]
FEEDBACK_TYPES = [
('not_working', 'Something isn\'t working'),
('new_idea', 'I would like to suggest a new idea'),
('confusing_coach', 'I found something confusing'),
('confusing_claimant', 'A claimant found something confusing')
]
class FeedbackForm(forms.Form):
name = forms.CharField(
error_messages={
'required': 'Please provide your name.'
},
required=True, widget=forms.TextInput(attrs={"class": "form-control"})
)
email = forms.EmailField(
error_messages={
'required': 'Please provide an email address.',
'invalid': 'Please provide a valid email address.'
},
required=True, widget=forms.TextInput(attrs={"class": "form-control"})
)
message = forms.CharField(
error_messages={
'required': 'Please provide more detail.'
},
required=True,
widget=forms.Textarea(attrs={"class": "form-control"})
)
tool = forms.ChoiceField(
error_messages={
'required': 'Please select one of the tools.'
},
choices=TOOLS,
widget=forms.RadioSelect
)
feedback_type = forms.ChoiceField(
error_messages={
'required': 'Please select the kind of feedback you are providing.'
},
choices=FEEDBACK_TYPES,
widget=forms.RadioSelect
)
referring_url = forms.CharField(
required=False,
widget=forms.HiddenInput
)
class EmailForm(forms.Form):
email = forms.EmailField(
error_messages={
'required': 'Please provide an email address.',
'invalid': 'Please provide a valid email address.'
},
required=True, widget=forms.TextInput(attrs={"class": "form-control"})
)
|
{
"content_hash": "01a41f88f5b9e0c02e19eaa7c9f2d402",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 79,
"avg_line_length": 30.753846153846155,
"alnum_prop": 0.5927963981990996,
"repo_name": "lm-tools/situational",
"id": "76d5b6723eaf63c2d75cf1ed92cc63b8593951d6",
"size": "1999",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "situational/apps/home_page/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "839823"
},
{
"name": "HTML",
"bytes": "61976"
},
{
"name": "JavaScript",
"bytes": "265408"
},
{
"name": "Python",
"bytes": "152160"
},
{
"name": "Ruby",
"bytes": "551"
},
{
"name": "Shell",
"bytes": "522"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
from unittest import TestCase
import six
__all__ = ['call', 'parametrize', 'ParametrizedTestCase']
try:
# Try Python3 version, maybe someone will back-port this in the future?
from functools import partialmethod
except ImportError:
def partialmethod(func, *partial_args, **partial_kwargs):
def new_method(self, *args, **kwargs):
inner_kwargs = partial_kwargs.copy()
inner_kwargs.update(kwargs)
return func(self, *(partial_args + args), **inner_kwargs)
return new_method
def call(*args, **kwargs):
return args, kwargs
def parametrize(*unnamed_cases, **named_cases):
def decorator(func):
func.parameters = unnamed_cases, named_cases
return func
return decorator
class ParametrizedMeta(type):
def __new__(mcs, name, bases, class_dict):
for name in list(class_dict): # Need a copy of keys, because we'll possibly be changing dict on the go
attribute = class_dict[name]
if hasattr(attribute, 'parameters'):
# This is a parametrized method, create new versions and remove original one
del class_dict[name]
unnamed_cases, named_cases = attribute.parameters
for i, (args, kwargs) in enumerate(unnamed_cases, start=1):
new_name = '{0}_case_{1:d}'.format(name, i)
class_dict[new_name] = partialmethod(attribute, *args, **kwargs)
for case_name, (args, kwargs) in six.iteritems(named_cases):
new_name = '{0}_{1}'.format(name, case_name)
class_dict[new_name] = partialmethod(attribute, *args, **kwargs)
return super(ParametrizedMeta, mcs).__new__(mcs, name, bases, class_dict)
class ParametrizedTestCase(six.with_metaclass(ParametrizedMeta, TestCase)):
pass
|
{
"content_hash": "e6ad9e2ef1760bb191510f69119ed064",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 111,
"avg_line_length": 34.30357142857143,
"alnum_prop": 0.6272774596564289,
"repo_name": "canni/ptestcase",
"id": "53233da1396f8c07b770ea637986c177bf7a2483",
"size": "1945",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ptestcase/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4896"
}
],
"symlink_target": ""
}
|
subreddit = 'DunderMifflin'
t_channel = '@DunderMiff'
def send_post(submission, r2t):
return r2t.send_simple(submission, min_upvotes_limit=1)
|
{
"content_hash": "fbfce5e894c6b493659036d2082bdbfa",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 59,
"avg_line_length": 24.666666666666668,
"alnum_prop": 0.7364864864864865,
"repo_name": "Fillll/reddit2telegram",
"id": "9d58ca61dd8729fc1557c161a67ee77b1f70f137",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reddit2telegram/channels/~inactive/dundermiff/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "301463"
},
{
"name": "Shell",
"bytes": "153"
}
],
"symlink_target": ""
}
|
"""
babel.core
~~~~~~~~~~
Core locale representation and locale data access.
:copyright: (c) 2013 by the Babel Team.
:license: BSD, see LICENSE for more details.
"""
import os
from babel import localedata
from babel._compat import pickle, string_types
__all__ = ['UnknownLocaleError', 'Locale', 'default_locale', 'negotiate_locale',
'parse_locale']
_global_data = None
def _raise_no_data_error():
raise RuntimeError('The babel data files are not available. '
'This usually happens because you are using '
'a source checkout from Babel and you did '
'not build the data files. Just make sure '
'to run "python setup.py import_cldr" before '
'installing the library.')
def get_global(key):
"""Return the dictionary for the given key in the global data.
The global data is stored in the ``babel/global.dat`` file and contains
information independent of individual locales.
>>> get_global('zone_aliases')['UTC']
u'Etc/GMT'
>>> get_global('zone_territories')['Europe/Berlin']
u'DE'
.. versionadded:: 0.9
:param key: the data key
"""
global _global_data
if _global_data is None:
dirname = os.path.join(os.path.dirname(__file__))
filename = os.path.join(dirname, 'global.dat')
if not os.path.isfile(filename):
_raise_no_data_error()
fileobj = open(filename, 'rb')
try:
_global_data = pickle.load(fileobj)
finally:
fileobj.close()
return _global_data.get(key, {})
LOCALE_ALIASES = {
'ar': 'ar_SY', 'bg': 'bg_BG', 'bs': 'bs_BA', 'ca': 'ca_ES', 'cs': 'cs_CZ',
'da': 'da_DK', 'de': 'de_DE', 'el': 'el_GR', 'en': 'en_US', 'es': 'es_ES',
'et': 'et_EE', 'fa': 'fa_IR', 'fi': 'fi_FI', 'fr': 'fr_FR', 'gl': 'gl_ES',
'he': 'he_IL', 'hu': 'hu_HU', 'id': 'id_ID', 'is': 'is_IS', 'it': 'it_IT',
'ja': 'ja_JP', 'km': 'km_KH', 'ko': 'ko_KR', 'lt': 'lt_LT', 'lv': 'lv_LV',
'mk': 'mk_MK', 'nl': 'nl_NL', 'nn': 'nn_NO', 'no': 'nb_NO', 'pl': 'pl_PL',
'pt': 'pt_PT', 'ro': 'ro_RO', 'ru': 'ru_RU', 'sk': 'sk_SK', 'sl': 'sl_SI',
'sv': 'sv_SE', 'th': 'th_TH', 'tr': 'tr_TR', 'uk': 'uk_UA'
}
class UnknownLocaleError(Exception):
"""Exception thrown when a locale is requested for which no locale data
is available.
"""
def __init__(self, identifier):
"""Create the exception.
:param identifier: the identifier string of the unsupported locale
"""
Exception.__init__(self, 'unknown locale %r' % identifier)
#: The identifier of the locale that could not be found.
self.identifier = identifier
class Locale(object):
"""Representation of a specific locale.
>>> locale = Locale('en', 'US')
>>> repr(locale)
"Locale('en', territory='US')"
>>> locale.display_name
u'English (United States)'
A `Locale` object can also be instantiated from a raw locale string:
>>> locale = Locale.parse('en-US', sep='-')
>>> repr(locale)
"Locale('en', territory='US')"
`Locale` objects provide access to a collection of locale data, such as
territory and language names, number and date format patterns, and more:
>>> locale.number_symbols['decimal']
u'.'
If a locale is requested for which no locale data is available, an
`UnknownLocaleError` is raised:
>>> Locale.parse('en_DE')
Traceback (most recent call last):
...
UnknownLocaleError: unknown locale 'en_DE'
For more information see :rfc:`3066`.
"""
def __init__(self, language, territory=None, script=None, variant=None):
"""Initialize the locale object from the given identifier components.
>>> locale = Locale('en', 'US')
>>> locale.language
'en'
>>> locale.territory
'US'
:param language: the language code
:param territory: the territory (country or region) code
:param script: the script code
:param variant: the variant code
:raise `UnknownLocaleError`: if no locale data is available for the
requested locale
"""
#: the language code
self.language = language
#: the territory (country or region) code
self.territory = territory
#: the script code
self.script = script
#: the variant code
self.variant = variant
self.__data = None
identifier = str(self)
if not localedata.exists(identifier):
raise UnknownLocaleError(identifier)
@classmethod
def default(cls, category=None, aliases=LOCALE_ALIASES):
"""Return the system default locale for the specified category.
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LC_MESSAGES']:
... os.environ[name] = ''
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
>>> Locale.default('LC_MESSAGES')
Locale('fr', territory='FR')
The following fallbacks to the variable are always considered:
- ``LANGUAGE``
- ``LC_ALL``
- ``LC_CTYPE``
- ``LANG``
:param category: one of the ``LC_XXX`` environment variable names
:param aliases: a dictionary of aliases for locale identifiers
"""
# XXX: use likely subtag expansion here instead of the
# aliases dictionary.
locale_string = default_locale(category, aliases=aliases)
return cls.parse(locale_string)
@classmethod
def negotiate(cls, preferred, available, sep='_', aliases=LOCALE_ALIASES):
"""Find the best match between available and requested locale strings.
>>> Locale.negotiate(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
Locale('de', territory='DE')
>>> Locale.negotiate(['de_DE', 'en_US'], ['en', 'de'])
Locale('de')
>>> Locale.negotiate(['de_DE', 'de'], ['en_US'])
You can specify the character used in the locale identifiers to separate
the differnet components. This separator is applied to both lists. Also,
case is ignored in the comparison:
>>> Locale.negotiate(['de-DE', 'de'], ['en-us', 'de-de'], sep='-')
Locale('de', territory='DE')
:param preferred: the list of locale identifers preferred by the user
:param available: the list of locale identifiers available
:param aliases: a dictionary of aliases for locale identifiers
"""
identifier = negotiate_locale(preferred, available, sep=sep,
aliases=aliases)
if identifier:
return Locale.parse(identifier, sep=sep)
@classmethod
def parse(cls, identifier, sep='_', resolve_likely_subtags=True):
"""Create a `Locale` instance for the given locale identifier.
>>> l = Locale.parse('de-DE', sep='-')
>>> l.display_name
u'Deutsch (Deutschland)'
If the `identifier` parameter is not a string, but actually a `Locale`
object, that object is returned:
>>> Locale.parse(l)
Locale('de', territory='DE')
This also can perform resolving of likely subtags which it does
by default. This is for instance useful to figure out the most
likely locale for a territory you can use ``'und'`` as the
language tag:
>>> Locale.parse('und_AT')
Locale('de', territory='AT')
:param identifier: the locale identifier string
:param sep: optional component separator
:param resolve_likely_subtags: if this is specified then a locale will
have its likely subtag resolved if the
locale otherwise does not exist. For
instance ``zh_TW`` by itself is not a
locale that exists but Babel can
automatically expand it to the full
form of ``zh_hant_TW``. Note that this
expansion is only taking place if no
locale exists otherwise. For instance
there is a locale ``en`` that can exist
by itself.
:raise `ValueError`: if the string does not appear to be a valid locale
identifier
:raise `UnknownLocaleError`: if no locale data is available for the
requested locale
"""
if identifier is None:
return None
elif isinstance(identifier, Locale):
return identifier
elif not isinstance(identifier, string_types):
raise TypeError('Unxpected value for identifier: %r' % (identifier,))
parts = parse_locale(identifier, sep=sep)
input_id = get_locale_identifier(parts)
def _try_load(parts):
try:
return cls(*parts)
except UnknownLocaleError:
return None
def _try_load_reducing(parts):
# Success on first hit, return it.
locale = _try_load(parts)
if locale is not None:
return locale
# Now try without script and variant
locale = _try_load(parts[:2])
if locale is not None:
return locale
locale = _try_load(parts)
if locale is not None:
return locale
if not resolve_likely_subtags:
raise UnknownLocaleError(input_id)
# From here onwards is some very bad likely subtag resolving. This
# whole logic is not entirely correct but good enough (tm) for the
# time being. This has been added so that zh_TW does not cause
# errors for people when they upgrade. Later we should properly
# implement ICU like fuzzy locale objects and provide a way to
# maximize and minimize locale tags.
language, territory, script, variant = parts
language = get_global('language_aliases').get(language, language)
territory = get_global('territory_aliases').get(territory, (territory,))[0]
script = get_global('script_aliases').get(script, script)
variant = get_global('variant_aliases').get(variant, variant)
if territory == 'ZZ':
territory = None
if script == 'Zzzz':
script = None
parts = language, territory, script, variant
# First match: try the whole identifier
new_id = get_locale_identifier(parts)
likely_subtag = get_global('likely_subtags').get(new_id)
if likely_subtag is not None:
locale = _try_load_reducing(parse_locale(likely_subtag))
if locale is not None:
return locale
# If we did not find anything so far, try again with a
# simplified identifier that is just the language
likely_subtag = get_global('likely_subtags').get(language)
if likely_subtag is not None:
language2, _, script2, variant2 = parse_locale(likely_subtag)
locale = _try_load_reducing((language2, territory, script2, variant2))
if locale is not None:
return locale
raise UnknownLocaleError(input_id)
def __eq__(self, other):
for key in ('language', 'territory', 'script', 'variant'):
if not hasattr(other, key):
return False
return (self.language == other.language) and \
(self.territory == other.territory) and \
(self.script == other.script) and \
(self.variant == other.variant)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
parameters = ['']
for key in ('territory', 'script', 'variant'):
value = getattr(self, key)
if value is not None:
parameters.append('%s=%r' % (key, value))
parameter_string = '%r' % self.language + ', '.join(parameters)
return 'Locale(%s)' % parameter_string
def __str__(self):
return get_locale_identifier((self.language, self.territory,
self.script, self.variant))
@property
def _data(self):
if self.__data is None:
self.__data = localedata.LocaleDataDict(localedata.load(str(self)))
return self.__data
def get_display_name(self, locale=None):
"""Return the display name of the locale using the given locale.
The display name will include the language, territory, script, and
variant, if those are specified.
>>> Locale('zh', 'CN', script='Hans').get_display_name('en')
u'Chinese (Simplified, China)'
:param locale: the locale to use
"""
if locale is None:
locale = self
locale = Locale.parse(locale)
retval = locale.languages.get(self.language)
if self.territory or self.script or self.variant:
details = []
if self.script:
details.append(locale.scripts.get(self.script))
if self.territory:
details.append(locale.territories.get(self.territory))
if self.variant:
details.append(locale.variants.get(self.variant))
details = filter(None, details)
if details:
retval += ' (%s)' % u', '.join(details)
return retval
display_name = property(get_display_name, doc="""\
The localized display name of the locale.
>>> Locale('en').display_name
u'English'
>>> Locale('en', 'US').display_name
u'English (United States)'
>>> Locale('sv').display_name
u'svenska'
:type: `unicode`
""")
def get_language_name(self, locale=None):
"""Return the language of this locale in the given locale.
>>> Locale('zh', 'CN', script='Hans').get_language_name('de')
u'Chinesisch'
.. versionadded:: 1.0
:param locale: the locale to use
"""
if locale is None:
locale = self
locale = Locale.parse(locale)
return locale.languages.get(self.language)
language_name = property(get_language_name, doc="""\
The localized language name of the locale.
>>> Locale('en', 'US').language_name
u'English'
""")
def get_territory_name(self, locale=None):
"""Return the territory name in the given locale."""
if locale is None:
locale = self
locale = Locale.parse(locale)
return locale.territories.get(self.territory)
territory_name = property(get_territory_name, doc="""\
The localized territory name of the locale if available.
>>> Locale('de', 'DE').territory_name
u'Deutschland'
""")
def get_script_name(self, locale=None):
"""Return the script name in the given locale."""
if locale is None:
locale = self
locale = Locale.parse(locale)
return locale.scripts.get(self.script)
script_name = property(get_script_name, doc="""\
The localized script name of the locale if available.
>>> Locale('ms', 'SG', script='Latn').script_name
u'Latin'
""")
@property
def english_name(self):
"""The english display name of the locale.
>>> Locale('de').english_name
u'German'
>>> Locale('de', 'DE').english_name
u'German (Germany)'
:type: `unicode`"""
return self.get_display_name(Locale('en'))
#{ General Locale Display Names
@property
def languages(self):
"""Mapping of language codes to translated language names.
>>> Locale('de', 'DE').languages['ja']
u'Japanisch'
See `ISO 639 <http://www.loc.gov/standards/iso639-2/>`_ for
more information.
"""
return self._data['languages']
@property
def scripts(self):
"""Mapping of script codes to translated script names.
>>> Locale('en', 'US').scripts['Hira']
u'Hiragana'
See `ISO 15924 <http://www.evertype.com/standards/iso15924/>`_
for more information.
"""
return self._data['scripts']
@property
def territories(self):
"""Mapping of script codes to translated script names.
>>> Locale('es', 'CO').territories['DE']
u'Alemania'
See `ISO 3166 <http://www.iso.org/iso/en/prods-services/iso3166ma/>`_
for more information.
"""
return self._data['territories']
@property
def variants(self):
"""Mapping of script codes to translated script names.
>>> Locale('de', 'DE').variants['1901']
u'Alte deutsche Rechtschreibung'
"""
return self._data['variants']
#{ Number Formatting
@property
def currencies(self):
"""Mapping of currency codes to translated currency names. This
only returns the generic form of the currency name, not the count
specific one. If an actual number is requested use the
:func:`babel.numbers.get_currency_name` function.
>>> Locale('en').currencies['COP']
u'Colombian Peso'
>>> Locale('de', 'DE').currencies['COP']
u'Kolumbianischer Peso'
"""
return self._data['currency_names']
@property
def currency_symbols(self):
"""Mapping of currency codes to symbols.
>>> Locale('en', 'US').currency_symbols['USD']
u'$'
>>> Locale('es', 'CO').currency_symbols['USD']
u'US$'
"""
return self._data['currency_symbols']
@property
def number_symbols(self):
"""Symbols used in number formatting.
>>> Locale('fr', 'FR').number_symbols['decimal']
u','
"""
return self._data['number_symbols']
@property
def decimal_formats(self):
"""Locale patterns for decimal number formatting.
>>> Locale('en', 'US').decimal_formats[None]
<NumberPattern u'#,##0.###'>
"""
return self._data['decimal_formats']
@property
def currency_formats(self):
"""Locale patterns for currency number formatting.
>>> print Locale('en', 'US').currency_formats[None]
<NumberPattern u'\\xa4#,##0.00'>
"""
return self._data['currency_formats']
@property
def percent_formats(self):
"""Locale patterns for percent number formatting.
>>> Locale('en', 'US').percent_formats[None]
<NumberPattern u'#,##0%'>
"""
return self._data['percent_formats']
@property
def scientific_formats(self):
"""Locale patterns for scientific number formatting.
>>> Locale('en', 'US').scientific_formats[None]
<NumberPattern u'#E0'>
"""
return self._data['scientific_formats']
#{ Calendar Information and Date Formatting
@property
def periods(self):
"""Locale display names for day periods (AM/PM).
>>> Locale('en', 'US').periods['am']
u'AM'
"""
return self._data['periods']
@property
def days(self):
"""Locale display names for weekdays.
>>> Locale('de', 'DE').days['format']['wide'][3]
u'Donnerstag'
"""
return self._data['days']
@property
def months(self):
"""Locale display names for months.
>>> Locale('de', 'DE').months['format']['wide'][10]
u'Oktober'
"""
return self._data['months']
@property
def quarters(self):
"""Locale display names for quarters.
>>> Locale('de', 'DE').quarters['format']['wide'][1]
u'1. Quartal'
"""
return self._data['quarters']
@property
def eras(self):
"""Locale display names for eras.
>>> Locale('en', 'US').eras['wide'][1]
u'Anno Domini'
>>> Locale('en', 'US').eras['abbreviated'][0]
u'BC'
"""
return self._data['eras']
@property
def time_zones(self):
"""Locale display names for time zones.
>>> Locale('en', 'US').time_zones['Europe/London']['long']['daylight']
u'British Summer Time'
>>> Locale('en', 'US').time_zones['America/St_Johns']['city']
u'St. John\u2019s'
"""
return self._data['time_zones']
@property
def meta_zones(self):
"""Locale display names for meta time zones.
Meta time zones are basically groups of different Olson time zones that
have the same GMT offset and daylight savings time.
>>> Locale('en', 'US').meta_zones['Europe_Central']['long']['daylight']
u'Central European Summer Time'
.. versionadded:: 0.9
"""
return self._data['meta_zones']
@property
def zone_formats(self):
"""Patterns related to the formatting of time zones.
>>> Locale('en', 'US').zone_formats['fallback']
u'%(1)s (%(0)s)'
>>> Locale('pt', 'BR').zone_formats['region']
u'Hor\\xe1rio %s'
.. versionadded:: 0.9
"""
return self._data['zone_formats']
@property
def first_week_day(self):
"""The first day of a week, with 0 being Monday.
>>> Locale('de', 'DE').first_week_day
0
>>> Locale('en', 'US').first_week_day
6
"""
return self._data['week_data']['first_day']
@property
def weekend_start(self):
"""The day the weekend starts, with 0 being Monday.
>>> Locale('de', 'DE').weekend_start
5
"""
return self._data['week_data']['weekend_start']
@property
def weekend_end(self):
"""The day the weekend ends, with 0 being Monday.
>>> Locale('de', 'DE').weekend_end
6
"""
return self._data['week_data']['weekend_end']
@property
def min_week_days(self):
"""The minimum number of days in a week so that the week is counted as
the first week of a year or month.
>>> Locale('de', 'DE').min_week_days
4
"""
return self._data['week_data']['min_days']
@property
def date_formats(self):
"""Locale patterns for date formatting.
>>> Locale('en', 'US').date_formats['short']
<DateTimePattern u'M/d/yy'>
>>> Locale('fr', 'FR').date_formats['long']
<DateTimePattern u'd MMMM y'>
"""
return self._data['date_formats']
@property
def time_formats(self):
"""Locale patterns for time formatting.
>>> Locale('en', 'US').time_formats['short']
<DateTimePattern u'h:mm a'>
>>> Locale('fr', 'FR').time_formats['long']
<DateTimePattern u'HH:mm:ss z'>
"""
return self._data['time_formats']
@property
def datetime_formats(self):
"""Locale patterns for datetime formatting.
>>> Locale('en').datetime_formats['full']
u"{1} 'at' {0}"
>>> Locale('th').datetime_formats['medium']
u'{1}, {0}'
"""
return self._data['datetime_formats']
@property
def plural_form(self):
"""Plural rules for the locale.
>>> Locale('en').plural_form(1)
'one'
>>> Locale('en').plural_form(0)
'other'
>>> Locale('fr').plural_form(0)
'one'
>>> Locale('ru').plural_form(100)
'many'
"""
return self._data['plural_form']
def default_locale(category=None, aliases=LOCALE_ALIASES):
"""Returns the system default locale for a given category, based on
environment variables.
>>> for name in ['LANGUAGE', 'LC_ALL', 'LC_CTYPE']:
... os.environ[name] = ''
>>> os.environ['LANG'] = 'fr_FR.UTF-8'
>>> default_locale('LC_MESSAGES')
'fr_FR'
The "C" or "POSIX" pseudo-locales are treated as aliases for the
"en_US_POSIX" locale:
>>> os.environ['LC_MESSAGES'] = 'POSIX'
>>> default_locale('LC_MESSAGES')
'en_US_POSIX'
The following fallbacks to the variable are always considered:
- ``LANGUAGE``
- ``LC_ALL``
- ``LC_CTYPE``
- ``LANG``
:param category: one of the ``LC_XXX`` environment variable names
:param aliases: a dictionary of aliases for locale identifiers
"""
varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')
for name in filter(None, varnames):
locale = os.getenv(name)
if locale:
if name == 'LANGUAGE' and ':' in locale:
# the LANGUAGE variable may contain a colon-separated list of
# language codes; we just pick the language on the list
locale = locale.split(':')[0]
if locale in ('C', 'POSIX'):
locale = 'en_US_POSIX'
elif aliases and locale in aliases:
locale = aliases[locale]
try:
return get_locale_identifier(parse_locale(locale))
except ValueError:
pass
def negotiate_locale(preferred, available, sep='_', aliases=LOCALE_ALIASES):
"""Find the best match between available and requested locale strings.
>>> negotiate_locale(['de_DE', 'en_US'], ['de_DE', 'de_AT'])
'de_DE'
>>> negotiate_locale(['de_DE', 'en_US'], ['en', 'de'])
'de'
Case is ignored by the algorithm, the result uses the case of the preferred
locale identifier:
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
'de_DE'
>>> negotiate_locale(['de_DE', 'en_US'], ['de_de', 'de_at'])
'de_DE'
By default, some web browsers unfortunately do not include the territory
in the locale identifier for many locales, and some don't even allow the
user to easily add the territory. So while you may prefer using qualified
locale identifiers in your web-application, they would not normally match
the language-only locale sent by such browsers. To workaround that, this
function uses a default mapping of commonly used langauge-only locale
identifiers to identifiers including the territory:
>>> negotiate_locale(['ja', 'en_US'], ['ja_JP', 'en_US'])
'ja_JP'
Some browsers even use an incorrect or outdated language code, such as "no"
for Norwegian, where the correct locale identifier would actually be "nb_NO"
(Bokmål) or "nn_NO" (Nynorsk). The aliases are intended to take care of
such cases, too:
>>> negotiate_locale(['no', 'sv'], ['nb_NO', 'sv_SE'])
'nb_NO'
You can override this default mapping by passing a different `aliases`
dictionary to this function, or you can bypass the behavior althogher by
setting the `aliases` parameter to `None`.
:param preferred: the list of locale strings preferred by the user
:param available: the list of locale strings available
:param sep: character that separates the different parts of the locale
strings
:param aliases: a dictionary of aliases for locale identifiers
"""
available = [a.lower() for a in available if a]
for locale in preferred:
ll = locale.lower()
if ll in available:
return locale
if aliases:
alias = aliases.get(ll)
if alias:
alias = alias.replace('_', sep)
if alias.lower() in available:
return alias
parts = locale.split(sep)
if len(parts) > 1 and parts[0].lower() in available:
return parts[0]
return None
def parse_locale(identifier, sep='_'):
"""Parse a locale identifier into a tuple of the form ``(language,
territory, script, variant)``.
>>> parse_locale('zh_CN')
('zh', 'CN', None, None)
>>> parse_locale('zh_Hans_CN')
('zh', 'CN', 'Hans', None)
The default component separator is "_", but a different separator can be
specified using the `sep` parameter:
>>> parse_locale('zh-CN', sep='-')
('zh', 'CN', None, None)
If the identifier cannot be parsed into a locale, a `ValueError` exception
is raised:
>>> parse_locale('not_a_LOCALE_String')
Traceback (most recent call last):
...
ValueError: 'not_a_LOCALE_String' is not a valid locale identifier
Encoding information and locale modifiers are removed from the identifier:
>>> parse_locale('it_IT@euro')
('it', 'IT', None, None)
>>> parse_locale('en_US.UTF-8')
('en', 'US', None, None)
>>> parse_locale('de_DE.iso885915@euro')
('de', 'DE', None, None)
See :rfc:`4646` for more information.
:param identifier: the locale identifier string
:param sep: character that separates the different components of the locale
identifier
:raise `ValueError`: if the string does not appear to be a valid locale
identifier
"""
if '.' in identifier:
# this is probably the charset/encoding, which we don't care about
identifier = identifier.split('.', 1)[0]
if '@' in identifier:
# this is a locale modifier such as @euro, which we don't care about
# either
identifier = identifier.split('@', 1)[0]
parts = identifier.split(sep)
lang = parts.pop(0).lower()
if not lang.isalpha():
raise ValueError('expected only letters, got %r' % lang)
script = territory = variant = None
if parts:
if len(parts[0]) == 4 and parts[0].isalpha():
script = parts.pop(0).title()
if parts:
if len(parts[0]) == 2 and parts[0].isalpha():
territory = parts.pop(0).upper()
elif len(parts[0]) == 3 and parts[0].isdigit():
territory = parts.pop(0)
if parts:
if len(parts[0]) == 4 and parts[0][0].isdigit() or \
len(parts[0]) >= 5 and parts[0][0].isalpha():
variant = parts.pop()
if parts:
raise ValueError('%r is not a valid locale identifier' % identifier)
return lang, territory, script, variant
def get_locale_identifier(tup, sep='_'):
"""The reverse of :func:`parse_locale`. It creates a locale identifier out
of a ``(language, territory, script, variant)`` tuple. Items can be set to
``None`` and trailing ``None``\s can also be left out of the tuple.
>>> get_locale_identifier(('de', 'DE', None, '1999'))
'de_DE_1999'
.. versionadded:: 1.0
:param tup: the tuple as returned by :func:`parse_locale`.
:param sep: the separator for the identifier.
"""
tup = tuple(tup[:4])
lang, territory, script, variant = tup + (None,) * (4 - len(tup))
return sep.join(filter(None, (lang, script, territory, variant)))
|
{
"content_hash": "a874af57a5149beb5ca81bdb6ccd87c7",
"timestamp": "",
"source": "github",
"line_count": 940,
"max_line_length": 83,
"avg_line_length": 32.95744680851064,
"alnum_prop": 0.5711426726920594,
"repo_name": "nandoflorestan/babel",
"id": "56dbf559334ee7e086c1b3735ae40631c439fcac",
"size": "31005",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "babel/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "976"
},
{
"name": "JavaScript",
"bytes": "4753"
},
{
"name": "Python",
"bytes": "532145"
},
{
"name": "Shell",
"bytes": "5094"
}
],
"symlink_target": ""
}
|
from antlr4 import *
if __name__ is not None and "." in __name__:
from .FullFormParser import FullFormParser
else:
from FullFormParser import FullFormParser
# This class defines a complete generic visitor for a parse tree produced by FullFormParser.
class FullFormVisitor(ParseTreeVisitor):
# Visit a parse tree produced by FullFormParser#prog.
def visitProg(self, ctx:FullFormParser.ProgContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#Number.
def visitNumber(self, ctx:FullFormParser.NumberContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#StringLiteral.
def visitStringLiteral(self, ctx:FullFormParser.StringLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#SymbolLiteral.
def visitSymbolLiteral(self, ctx:FullFormParser.SymbolLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#HeadExpression.
def visitHeadExpression(self, ctx:FullFormParser.HeadExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#ExpressionListed.
def visitExpressionListed(self, ctx:FullFormParser.ExpressionListedContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#ContextName.
def visitContextName(self, ctx:FullFormParser.ContextNameContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#SimpleContext.
def visitSimpleContext(self, ctx:FullFormParser.SimpleContextContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#CompoundContext.
def visitCompoundContext(self, ctx:FullFormParser.CompoundContextContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#NumberBaseN.
def visitNumberBaseN(self, ctx:FullFormParser.NumberBaseNContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#NumberBaseTen.
def visitNumberBaseTen(self, ctx:FullFormParser.NumberBaseTenContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#numberLiteralPrecision.
def visitNumberLiteralPrecision(self, ctx:FullFormParser.NumberLiteralPrecisionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by FullFormParser#numberLiteralExponent.
def visitNumberLiteralExponent(self, ctx:FullFormParser.NumberLiteralExponentContext):
return self.visitChildren(ctx)
del FullFormParser
|
{
"content_hash": "75ef74b1230fe03a954f7804d5a45d0e",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 92,
"avg_line_length": 35.18181818181818,
"alnum_prop": 0.7670727205610927,
"repo_name": "rljacobson/FoxySheep",
"id": "b446d8551ef6be9313459bcc55c1f5266253aa04",
"size": "2754",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_target/FoxySheep/generated/FullFormVisitor.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "ANTLR",
"bytes": "24295"
},
{
"name": "Java",
"bytes": "72986"
},
{
"name": "Jupyter Notebook",
"bytes": "8769"
},
{
"name": "Makefile",
"bytes": "2878"
},
{
"name": "Mathematica",
"bytes": "710"
},
{
"name": "Matlab",
"bytes": "8"
},
{
"name": "Python",
"bytes": "173842"
}
],
"symlink_target": ""
}
|
from openshift_checks import OpenShiftCheck, get_var
class DockerImageAvailability(OpenShiftCheck):
"""Check that required Docker images are available.
This check attempts to ensure that required docker images are
either present locally, or able to be pulled down from available
registries defined in a host machine.
"""
name = "docker_image_availability"
tags = ["preflight"]
dependencies = ["skopeo", "python-docker-py"]
deployment_image_info = {
"origin": {
"namespace": "openshift",
"name": "origin",
},
"openshift-enterprise": {
"namespace": "openshift3",
"name": "ose",
},
}
@classmethod
def is_active(cls, task_vars):
"""Skip hosts with unsupported deployment types."""
deployment_type = get_var(task_vars, "openshift_deployment_type")
has_valid_deployment_type = deployment_type in cls.deployment_image_info
return super(DockerImageAvailability, cls).is_active(task_vars) and has_valid_deployment_type
def run(self, tmp, task_vars):
msg, failed, changed = self.ensure_dependencies(task_vars)
# exit early if Skopeo update fails
if failed:
if "No package matching" in msg:
msg = "Ensure that all required dependencies can be installed via `yum`.\n"
return {
"failed": True,
"changed": changed,
"msg": (
"Unable to update or install required dependency packages on this host;\n"
"These are required in order to check Docker image availability:"
"\n {deps}\n{msg}"
).format(deps=',\n '.join(self.dependencies), msg=msg),
}
required_images = self.required_images(task_vars)
missing_images = set(required_images) - set(self.local_images(required_images, task_vars))
# exit early if all images were found locally
if not missing_images:
return {"changed": changed}
registries = self.known_docker_registries(task_vars)
if not registries:
return {"failed": True, "msg": "Unable to retrieve any docker registries.", "changed": changed}
available_images = self.available_images(missing_images, registries, task_vars)
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
return {
"failed": True,
"msg": (
"One or more required Docker images are not available:\n {}\n"
"Configured registries: {}"
).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)),
"changed": changed,
}
return {"changed": changed}
def required_images(self, task_vars):
deployment_type = get_var(task_vars, "openshift_deployment_type")
image_info = self.deployment_image_info[deployment_type]
openshift_release = get_var(task_vars, "openshift_release", default="latest")
openshift_image_tag = get_var(task_vars, "openshift_image_tag")
is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
images = set(self.required_docker_images(
image_info["namespace"],
image_info["name"],
["registry-console"] if "enterprise" in deployment_type else [], # include enterprise-only image names
openshift_release,
is_containerized,
))
# append images with qualified image tags to our list of required images.
# these are images with a (v0.0.0.0) tag, rather than a standard release
# format tag (v0.0). We want to check this set in both containerized and
# non-containerized installations.
images.update(
self.required_qualified_docker_images(
image_info["namespace"],
image_info["name"],
openshift_image_tag,
),
)
return images
@staticmethod
def required_docker_images(namespace, name, additional_image_names, version, is_containerized):
if is_containerized:
return ["{}/{}:{}".format(namespace, name, version)] if name else []
# include additional non-containerized images specific to the current deployment type
return ["{}/{}:{}".format(namespace, img_name, version) for img_name in additional_image_names]
@staticmethod
def required_qualified_docker_images(namespace, name, version):
# pylint: disable=invalid-name
return [
"{}/{}-{}:{}".format(namespace, name, suffix, version)
for suffix in ["haproxy-router", "docker-registry", "deployer", "pod"]
]
def local_images(self, images, task_vars):
"""Filter a list of images and return those available locally."""
return [
image for image in images
if self.is_image_local(image, task_vars)
]
def is_image_local(self, image, task_vars):
result = self.module_executor("docker_image_facts", {"name": image}, task_vars)
if result.get("failed", False):
return False
return bool(result.get("images", []))
@staticmethod
def known_docker_registries(task_vars):
docker_facts = get_var(task_vars, "openshift", "docker")
regs = set(docker_facts["additional_registries"])
deployment_type = get_var(task_vars, "openshift_deployment_type")
if deployment_type == "origin":
regs.update(["docker.io"])
elif "enterprise" in deployment_type:
regs.update(["registry.access.redhat.com"])
return list(regs)
def available_images(self, images, registries, task_vars):
"""Inspect existing images using Skopeo and return all images successfully inspected."""
return [
image for image in images
if any(self.is_available_skopeo_image(image, registry, task_vars) for registry in registries)
]
def is_available_skopeo_image(self, image, registry, task_vars):
"""Uses Skopeo to determine if required image exists in a given registry."""
cmd_str = "skopeo inspect docker://{registry}/{image}".format(
registry=registry,
image=image,
)
args = {"_raw_params": cmd_str}
result = self.module_executor("command", args, task_vars)
return not result.get("failed", False) and result.get("rc", 0) == 0
# ensures that the skopeo and python-docker-py packages exist
# check is skipped on atomic installations
def ensure_dependencies(self, task_vars):
if get_var(task_vars, "openshift", "common", "is_atomic"):
return "", False, False
result = self.module_executor("yum", {"name": self.dependencies, "state": "latest"}, task_vars)
return result.get("msg", ""), result.get("failed", False) or result.get("rc", 0) != 0, result.get("changed")
|
{
"content_hash": "253dbe3ccd54b7b15a37408dd23a5ead",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 116,
"avg_line_length": 39.99438202247191,
"alnum_prop": 0.6040174181767102,
"repo_name": "DG-i/openshift-ansible",
"id": "4588ed634e6715a5a21c991da0b9533c0e9126d9",
"size": "7155",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "roles/openshift_health_checker/openshift_checks/docker_image_availability.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "4999"
},
{
"name": "HTML",
"bytes": "14650"
},
{
"name": "Python",
"bytes": "3197455"
},
{
"name": "Roff",
"bytes": "5645"
},
{
"name": "Shell",
"bytes": "80962"
}
],
"symlink_target": ""
}
|
import sublime
import re
class FileInfo:
def __init__(self, view):
self.view = view
# Get current file extension
self.file_type = self.view.file_name().split('.')[-1] if self.view.file_name() else ''
# Get current document body
self.contents = view.substr(sublime.Region(0, view.size()))
def get_file_type(self):
return self.file_type.lower()
def is_type_defined(self):
return len(self.file_type) > 0
def is_markdown(self):
return re.search(r'^md|markdown$', self.file_type)
def is_txt_or_md(self):
return re.search(r'^txt|md|markdown$', self.file_type)
def is_stylesheet(self):
return re.search(r'^css|less|scss|sass$', self.file_type)
def get_contents(self):
return self.contents
def get_contents_before(self):
# Get content that precedes the current selection
return self.contents[:self.view.sel()[0].begin()]
def line_count(self):
return len(self.contents.splitlines())
class Preferences:
def __init__(self, view):
self.settings = view.settings()
def get_option(self, key):
return self.settings.get('super_awesome_paste.' + key)
class RegexPatterns:
# Match a hex colour code of the form #aaaaaa or #aaa
hex_color = re.compile(r'^#?((?:[A-Fa-f0-9]{2}){3}|[A-Fa-f0-9]{3})$')
# Match a preceding section back to and including an opening HTML inline content tag such as
# <h1> or <strong>
html_opening_content_tag = re.compile(r'<(p|h[1-6]|span|em|strong|a|small|td)[^<>]*?>[^<>]*$')
|
{
"content_hash": "5d65238bcdf3599357b5646504a098d5",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 98,
"avg_line_length": 31.8,
"alnum_prop": 0.6213836477987421,
"repo_name": "huntie/super-awesome-paste",
"id": "b0551aa93e5d410aac21ffbe95ccf4cad3145859",
"size": "1590",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "util.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "766"
},
{
"name": "Python",
"bytes": "7906"
}
],
"symlink_target": ""
}
|
from django.db import models
# Create your models here.
class Musician(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
instrument = models.CharField(max_length=100)
class Album(models.Model):
artist = models.ForeignKey(Musician, verbose_name="Musician id")
name = models.CharField(max_length=100)
release_date = models.DateField()
num_stars = models.IntegerField()
class Person(models.Model):
SHIRT_SIZE=(
('S', 'Small'),
('M', 'Median'),
('L', 'Large'),
)
name = models.CharField("person's name", max_length=100)
shirt_size = models.CharField(max_length=1, choices=SHIRT_SIZE)
|
{
"content_hash": "4aca5e93a3e89c897a3d128875fc3c49",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 65,
"avg_line_length": 25.307692307692307,
"alnum_prop": 0.71580547112462,
"repo_name": "yqian1991/Django",
"id": "5c847f809b9a3f6a16c078caa8381782250b32d4",
"size": "658",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test1/music/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "890"
},
{
"name": "C",
"bytes": "447223"
},
{
"name": "C++",
"bytes": "2005"
},
{
"name": "CSS",
"bytes": "24544"
},
{
"name": "HTML",
"bytes": "77630"
},
{
"name": "JavaScript",
"bytes": "374"
},
{
"name": "Nginx",
"bytes": "787"
},
{
"name": "PowerShell",
"bytes": "8175"
},
{
"name": "Python",
"bytes": "209145"
}
],
"symlink_target": ""
}
|
"""Simple, schema-based database abstraction layer for the datastore.
Modeled after Django's abstraction layer on top of SQL databases,
http://www.djangoproject.com/documentation/mode_api/. Ours is a little simpler
and a lot less code because the datastore is so much simpler than SQL
databases.
The programming model is to declare Python subclasses of the Model class,
declaring datastore properties as class members of that class. So if you want to
publish a story with title, body, and created date, you would do it like this:
class Story(db.Model):
title = db.StringProperty()
body = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
You can create a new Story in the datastore with this usage pattern:
story = Story(title='My title')
story.body = 'My body'
story.put()
You query for Story entities using built in query interfaces that map directly
to the syntax and semantics of the datastore:
stories = Story.all().filter('date >=', yesterday).order('-date')
for story in stories:
print story.title
The Property declarations enforce types by performing validation on assignment.
For example, the DateTimeProperty enforces that you assign valid datetime
objects, and if you supply the "required" option for a property, you will not
be able to assign None to that property.
We also support references between models, so if a story has comments, you
would represent it like this:
class Comment(db.Model):
story = db.ReferenceProperty(Story)
body = db.TextProperty()
When you get a story out of the datastore, the story reference is resolved
automatically the first time it is referenced, which makes it easy to use
model instances without performing additional queries by hand:
comment = Comment.get(key)
print comment.story.title
Likewise, you can access the set of comments that refer to each story through
this property through a reverse reference called comment_set, which is a Query
preconfigured to return all matching comments:
story = Story.get(key)
for comment in story.comment_set:
print comment.body
"""
import copy
import datetime
import logging
import re
import time
import urlparse
import warnings
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.api import datastore_types
from google.appengine.api import namespace_manager
from google.appengine.api import users
from google.appengine.datastore import datastore_query
Error = datastore_errors.Error
BadValueError = datastore_errors.BadValueError
BadPropertyError = datastore_errors.BadPropertyError
BadRequestError = datastore_errors.BadRequestError
EntityNotFoundError = datastore_errors.EntityNotFoundError
BadArgumentError = datastore_errors.BadArgumentError
QueryNotFoundError = datastore_errors.QueryNotFoundError
TransactionNotFoundError = datastore_errors.TransactionNotFoundError
Rollback = datastore_errors.Rollback
TransactionFailedError = datastore_errors.TransactionFailedError
BadFilterError = datastore_errors.BadFilterError
BadQueryError = datastore_errors.BadQueryError
BadKeyError = datastore_errors.BadKeyError
InternalError = datastore_errors.InternalError
NeedIndexError = datastore_errors.NeedIndexError
Timeout = datastore_errors.Timeout
CommittedButStillApplying = datastore_errors.CommittedButStillApplying
ValidationError = BadValueError
Key = datastore_types.Key
Category = datastore_types.Category
Link = datastore_types.Link
Email = datastore_types.Email
GeoPt = datastore_types.GeoPt
IM = datastore_types.IM
PhoneNumber = datastore_types.PhoneNumber
PostalAddress = datastore_types.PostalAddress
Rating = datastore_types.Rating
Text = datastore_types.Text
Blob = datastore_types.Blob
ByteString = datastore_types.ByteString
BlobKey = datastore_types.BlobKey
READ_CAPABILITY = datastore.READ_CAPABILITY
WRITE_CAPABILITY = datastore.WRITE_CAPABILITY
STRONG_CONSISTENCY = datastore.STRONG_CONSISTENCY
EVENTUAL_CONSISTENCY = datastore.EVENTUAL_CONSISTENCY
KEY_RANGE_EMPTY = "Empty"
"""Indicates the given key range is empty and the datastore's
automatic ID allocator will not assign keys in this range to new
entities.
"""
KEY_RANGE_CONTENTION = "Contention"
"""Indicates the given key range is empty but the datastore's
automatic ID allocator may assign new entities keys in this range.
However it is safe to manually assign keys in this range
if either of the following is true:
- No other request will insert entities with the same kind and parent
as the given key range until all entities with manually assigned
keys from this range have been written.
- Overwriting entities written by other requests with the same kind
and parent as the given key range is acceptable.
The datastore's automatic ID allocator will not assign a key to a new
entity that will overwrite an existing entity, so once the range is
populated there will no longer be any contention.
"""
KEY_RANGE_COLLISION = "Collision"
"""Indicates that entities with keys inside the given key range
already exist and writing to this range will overwrite those entities.
Additionally the implications of KEY_RANGE_COLLISION apply. If
overwriting entities that exist in this range is acceptable it is safe
to use the given range.
The datastore's automatic ID allocator will never assign a key to
a new entity that will overwrite an existing entity so entities
written by the user to this range will never be overwritten by
an entity with an automatically assigned key.
"""
_kind_map = {}
_SELF_REFERENCE = object()
_RESERVED_WORDS = set(['key_name'])
class NotSavedError(Error):
"""Raised when a saved-object action is performed on a non-saved object."""
class KindError(BadValueError):
"""Raised when an entity is used with incorrect Model."""
class PropertyError(Error):
"""Raised when non-existent property is referenced."""
class DuplicatePropertyError(Error):
"""Raised when a property is duplicated in a model definition."""
class ConfigurationError(Error):
"""Raised when a property or model is improperly configured."""
class ReservedWordError(Error):
"""Raised when a property is defined for a reserved word."""
class DerivedPropertyError(Error):
"""Raised when attempting to assign a value to a derived property."""
_ALLOWED_PROPERTY_TYPES = set([
basestring,
str,
unicode,
bool,
int,
long,
float,
Key,
datetime.datetime,
datetime.date,
datetime.time,
Blob,
ByteString,
Text,
users.User,
Category,
Link,
Email,
GeoPt,
IM,
PhoneNumber,
PostalAddress,
Rating,
BlobKey,
])
_ALLOWED_EXPANDO_PROPERTY_TYPES = set(_ALLOWED_PROPERTY_TYPES)
_ALLOWED_EXPANDO_PROPERTY_TYPES.update((list, tuple, type(None)))
_OPERATORS = ['<', '<=', '>', '>=', '=', '==', '!=', 'in']
_FILTER_REGEX = re.compile(
'^\s*([^\s]+)(\s+(%s)\s*)?$' % '|'.join(_OPERATORS),
re.IGNORECASE | re.UNICODE)
def class_for_kind(kind):
"""Return base-class responsible for implementing kind.
Necessary to recover the class responsible for implementing provided
kind.
Args:
kind: Entity kind string.
Returns:
Class implementation for kind.
Raises:
KindError when there is no implementation for kind.
"""
try:
return _kind_map[kind]
except KeyError:
raise KindError('No implementation for kind \'%s\'' % kind)
def check_reserved_word(attr_name):
"""Raise an exception if attribute name is a reserved word.
Args:
attr_name: Name to check to see if it is a reserved word.
Raises:
ReservedWordError when attr_name is determined to be a reserved word.
"""
if datastore_types.RESERVED_PROPERTY_NAME.match(attr_name):
raise ReservedWordError(
"Cannot define property. All names both beginning and "
"ending with '__' are reserved.")
if attr_name in _RESERVED_WORDS or attr_name in dir(Model):
raise ReservedWordError(
"Cannot define property using reserved word '%(attr_name)s'. "
"If you would like to use this name in the datastore consider "
"using a different name like %(attr_name)s_ and adding "
"name='%(attr_name)s' to the parameter list of the property "
"definition." % locals())
def query_descendants(model_instance):
"""Returns a query for all the descendants of a model instance.
Args:
model_instance: Model instance to find the descendants of.
Returns:
Query that will retrieve all entities that have the given model instance
as an ancestor. Unlike normal ancestor queries, this does not include the
ancestor itself.
"""
result = Query().ancestor(model_instance);
result.filter(datastore_types._KEY_SPECIAL_PROPERTY + ' >',
model_instance.key());
return result;
def model_to_protobuf(model_instance, _entity_class=datastore.Entity):
"""Encodes a model instance as a protocol buffer.
Args:
model_instance: Model instance to encode.
Returns:
entity_pb.EntityProto representation of the model instance
"""
return model_instance._populate_entity(_entity_class).ToPb()
def model_from_protobuf(pb, _entity_class=datastore.Entity):
"""Decodes a model instance from a protocol buffer.
Args:
pb: The protocol buffer representation of the model instance. Can be an
entity_pb.EntityProto or str encoding of an entity_bp.EntityProto
Returns:
Model instance resulting from decoding the protocol buffer
"""
entity = _entity_class.FromPb(pb)
return class_for_kind(entity.kind()).from_entity(entity)
def _initialize_properties(model_class, name, bases, dct):
"""Initialize Property attributes for Model-class.
Args:
model_class: Model class to initialize properties for.
"""
model_class._properties = {}
property_source = {}
def get_attr_source(name, cls):
for src_cls in cls.mro():
if name in src_cls.__dict__:
return src_cls
defined = set()
for base in bases:
if hasattr(base, '_properties'):
property_keys = set(base._properties.keys())
duplicate_property_keys = defined & property_keys
for dupe_prop_name in duplicate_property_keys:
old_source = property_source[dupe_prop_name] = get_attr_source(
dupe_prop_name, property_source[dupe_prop_name])
new_source = get_attr_source(dupe_prop_name, base)
if old_source != new_source:
raise DuplicatePropertyError(
'Duplicate property, %s, is inherited from both %s and %s.' %
(dupe_prop_name, old_source.__name__, new_source.__name__))
property_keys -= duplicate_property_keys
if property_keys:
defined |= property_keys
property_source.update(dict.fromkeys(property_keys, base))
model_class._properties.update(base._properties)
for attr_name in dct.keys():
attr = dct[attr_name]
if isinstance(attr, Property):
check_reserved_word(attr_name)
if attr_name in defined:
raise DuplicatePropertyError('Duplicate property: %s' % attr_name)
defined.add(attr_name)
model_class._properties[attr_name] = attr
attr.__property_config__(model_class, attr_name)
model_class._all_properties = frozenset(
prop.name for name, prop in model_class._properties.items())
model_class._unindexed_properties = frozenset(
prop.name for name, prop in model_class._properties.items()
if not prop.indexed)
def _coerce_to_key(value):
"""Returns the value's key.
Args:
value: a Model or Key instance or string encoded key or None
Returns:
The corresponding key, or None if value is None.
"""
if value is None:
return None
value, multiple = datastore.NormalizeAndTypeCheck(
value, (Model, Key, basestring))
if len(value) > 1:
raise datastore_errors.BadArgumentError('Expected only one model or key')
value = value[0]
if isinstance(value, Model):
return value.key()
elif isinstance(value, basestring):
return Key(value)
else:
return value
class PropertiedClass(type):
"""Meta-class for initializing Model classes properties.
Used for initializing Properties defined in the context of a model.
By using a meta-class much of the configuration of a Property
descriptor becomes implicit. By using this meta-class, descriptors
that are of class Model are notified about which class they
belong to and what attribute they are associated with and can
do appropriate initialization via __property_config__.
Duplicate properties are not permitted.
"""
def __init__(cls, name, bases, dct, map_kind=True):
"""Initializes a class that might have property definitions.
This method is called when a class is created with the PropertiedClass
meta-class.
Loads all properties for this model and its base classes in to a dictionary
for easy reflection via the 'properties' method.
Configures each property defined in the new class.
Duplicate properties, either defined in the new class or defined separately
in two base classes are not permitted.
Properties may not assigned to names which are in the list of
_RESERVED_WORDS. It is still possible to store a property using a reserved
word in the datastore by using the 'name' keyword argument to the Property
constructor.
Args:
cls: Class being initialized.
name: Name of new class.
bases: Base classes of new class.
dct: Dictionary of new definitions for class.
Raises:
DuplicatePropertyError when a property is duplicated either in the new
class or separately in two base classes.
ReservedWordError when a property is given a name that is in the list of
reserved words, attributes of Model and names of the form '__.*__'.
"""
super(PropertiedClass, cls).__init__(name, bases, dct)
_initialize_properties(cls, name, bases, dct)
if map_kind:
_kind_map[cls.kind()] = cls
class Property(object):
"""A Property is an attribute of a Model.
It defines the type of the attribute, which determines how it is stored
in the datastore and how the property values are validated. Different property
types support different options, which change validation rules, default
values, etc. The simplest example of a property is a StringProperty:
class Story(db.Model):
title = db.StringProperty()
"""
creation_counter = 0
def __init__(self,
verbose_name=None,
name=None,
default=None,
required=False,
validator=None,
choices=None,
indexed=True):
"""Initializes this Property with the given options.
Args:
verbose_name: User friendly name of property.
name: Storage name for property. By default, uses attribute name
as it is assigned in the Model sub-class.
default: Default value for property if none is assigned.
required: Whether property is required.
validator: User provided method used for validation.
choices: User provided set of valid property values.
indexed: Whether property is indexed.
"""
self.verbose_name = verbose_name
self.name = name
self.default = default
self.required = required
self.validator = validator
self.choices = choices
self.indexed = indexed
self.creation_counter = Property.creation_counter
Property.creation_counter += 1
def __property_config__(self, model_class, property_name):
"""Configure property, connecting it to its model.
Configure the property so that it knows its property name and what class
it belongs to.
Args:
model_class: Model class which Property will belong to.
property_name: Name of property within Model instance to store property
values in. By default this will be the property name preceded by
an underscore, but may change for different subclasses.
"""
self.model_class = model_class
if self.name is None:
self.name = property_name
def __get__(self, model_instance, model_class):
"""Returns the value for this property on the given model instance.
See http://docs.python.org/ref/descriptors.html for a description of
the arguments to this class and what they mean."""
if model_instance is None:
return self
try:
return getattr(model_instance, self._attr_name())
except AttributeError:
return None
def __set__(self, model_instance, value):
"""Sets the value for this property on the given model instance.
See http://docs.python.org/ref/descriptors.html for a description of
the arguments to this class and what they mean.
"""
value = self.validate(value)
setattr(model_instance, self._attr_name(), value)
def default_value(self):
"""Default value for unassigned values.
Returns:
Default value as provided by __init__(default).
"""
return self.default
def validate(self, value):
"""Assert that provided value is compatible with this property.
Args:
value: Value to validate against this Property.
Returns:
A valid value, either the input unchanged or adapted to the
required type.
Raises:
BadValueError if the value is not appropriate for this
property in any way.
"""
if self.empty(value):
if self.required:
raise BadValueError('Property %s is required' % self.name)
else:
if self.choices:
match = False
for choice in self.choices:
if choice == value:
match = True
if not match:
raise BadValueError('Property %s is %r; must be one of %r' %
(self.name, value, self.choices))
if self.validator is not None:
self.validator(value)
return value
def empty(self, value):
"""Determine if value is empty in the context of this property.
For most kinds, this is equivalent to "not value", but for kinds like
bool, the test is more subtle, so subclasses can override this method
if necessary.
Args:
value: Value to validate against this Property.
Returns:
True if this value is considered empty in the context of this Property
type, otherwise False.
"""
return not value
def get_value_for_datastore(self, model_instance):
"""Datastore representation of this property.
Looks for this property in the given model instance, and returns the proper
datastore representation of the value that can be stored in a datastore
entity. Most critically, it will fetch the datastore key value for
reference properties.
Args:
model_instance: Instance to fetch datastore value from.
Returns:
Datastore representation of the model value in a form that is
appropriate for storing in the datastore.
"""
return self.__get__(model_instance, model_instance.__class__)
def make_value_from_datastore(self, value):
"""Native representation of this property.
Given a value retrieved from a datastore entity, return a value,
possibly converted, to be stored on the model instance. Usually
this returns the value unchanged, but a property class may
override this when it uses a different datatype on the model
instance than on the entity.
This API is not quite symmetric with get_value_for_datastore(),
because the model instance on which to store the converted value
may not exist yet -- we may be collecting values to be passed to a
model constructor.
Args:
value: value retrieved from the datastore entity.
Returns:
The value converted for use as a model instance attribute.
"""
return value
def _require_parameter(self, kwds, parameter, value):
"""Sets kwds[parameter] to value.
If kwds[parameter] exists and is not value, raises ConfigurationError.
Args:
kwds: The parameter dict, which maps parameter names (strings) to values.
parameter: The name of the parameter to set.
value: The value to set it to.
"""
if parameter in kwds and kwds[parameter] != value:
raise ConfigurationError('%s must be %s.' % (parameter, value))
kwds[parameter] = value
def _attr_name(self):
"""Attribute name we use for this property in model instances.
DO NOT USE THIS METHOD.
"""
return '_' + self.name
data_type = str
def datastore_type(self):
"""Deprecated backwards-compatible accessor method for self.data_type."""
return self.data_type
class Model(object):
"""Model is the superclass of all object entities in the datastore.
The programming model is to declare Python subclasses of the Model class,
declaring datastore properties as class members of that class. So if you want
to publish a story with title, body, and created date, you would do it like
this:
class Story(db.Model):
title = db.StringProperty()
body = db.TextProperty()
created = db.DateTimeProperty(auto_now_add=True)
A model instance can have a single parent. Model instances without any
parent are root entities. It is possible to efficiently query for
instances by their shared parent. All descendents of a single root
instance also behave as a transaction group. This means that when you
work one member of the group within a transaction all descendents of that
root join the transaction. All operations within a transaction on this
group are ACID.
"""
__metaclass__ = PropertiedClass
def __init__(self,
parent=None,
key_name=None,
_app=None,
_from_entity=False,
**kwds):
"""Creates a new instance of this model.
To create a new entity, you instantiate a model and then call put(),
which saves the entity to the datastore:
person = Person()
person.name = 'Bret'
person.put()
You can initialize properties in the model in the constructor with keyword
arguments:
person = Person(name='Bret')
We initialize all other properties to the default value (as defined by the
properties in the model definition) if they are not provided in the
constructor.
Args:
parent: Parent instance for this instance or None, indicating a top-
level instance.
key_name: Name for new model instance.
_from_entity: Intentionally undocumented.
kwds: Keyword arguments mapping to properties of model. Also:
key: Key instance for this instance, if provided makes parent and
key_name redundant (they do not need to be set but if they are
they must match the key).
"""
namespace = None
if isinstance(_app, tuple):
if len(_app) != 2:
raise BadArgumentError('_app must have 2 values if type is tuple.')
_app, namespace = _app
key = kwds.get('key', None)
if key is not None:
if isinstance(key, (tuple, list)):
key = Key.from_path(*key)
if isinstance(key, basestring):
key = Key(encoded=key)
if not isinstance(key, Key):
raise TypeError('Expected Key type; received %s (is %s)' %
(key, key.__class__.__name__))
if not key.has_id_or_name():
raise BadKeyError('Key must have an id or name')
if key.kind() != self.kind():
raise BadKeyError('Expected Key kind to be %s; received %s' %
(self.kind(), key.kind()))
if _app is not None and key.app() != _app:
raise BadKeyError('Expected Key app to be %s; received %s' %
(_app, key.app()))
if namespace is not None and key.namespace() != namespace:
raise BadKeyError('Expected Key namespace to be %s; received %s' %
(namespace, key.namespace()))
if key_name and key_name != key.name():
raise BadArgumentError('Cannot use key and key_name at the same time'
' with different values')
if parent and parent != key.parent():
raise BadArgumentError('Cannot use key and parent at the same time'
' with different values')
namespace = key.namespace()
self._key = key
self._key_name = None
self._parent = None
self._parent_key = None
else:
if key_name == '':
raise BadKeyError('Name cannot be empty.')
elif key_name is not None and not isinstance(key_name, basestring):
raise BadKeyError('Name must be string type, not %s' %
key_name.__class__.__name__)
if parent is not None:
if not isinstance(parent, (Model, Key)):
raise TypeError('Expected Model type; received %s (is %s)' %
(parent, parent.__class__.__name__))
if isinstance(parent, Model) and not parent.has_key():
raise BadValueError(
"%s instance must have a complete key before it can be used as a "
"parent." % parent.kind())
if isinstance(parent, Key):
self._parent_key = parent
self._parent = None
else:
self._parent_key = parent.key()
self._parent = parent
else:
self._parent_key = None
self._parent = None
self._key_name = key_name
self._key = None
if self._parent_key is not None:
if namespace is not None and self._parent_key.namespace() != namespace:
raise BadArgumentError(
'Expected parent namespace to be %r; received %r' %
(namespace, self._parent_key.namespace()))
namespace = self._parent_key.namespace()
self._entity = None
if _app is not None and isinstance(_app, Key):
raise BadArgumentError('_app should be a string; received Key(\'%s\'):\n'
' This may be the result of passing \'key\' as '
'a positional parameter in SDK 1.2.6. Please '
'only pass \'key\' as a keyword parameter.' % _app)
if namespace is None:
namespace = namespace_manager.get_namespace()
self._app = _app
self.__namespace = namespace
for prop in self.properties().values():
if prop.name in kwds:
value = kwds[prop.name]
else:
value = prop.default_value()
try:
prop.__set__(self, value)
except DerivedPropertyError, e:
if prop.name in kwds and not _from_entity:
raise
def key(self):
"""Unique key for this entity.
This property is only available if this entity is already stored in the
datastore or if it has a full key, so it is available if this entity was
fetched returned from a query, or after put() is called the first time
for new entities, or if a complete key was given when constructed.
Returns:
Datastore key of persisted entity.
Raises:
NotSavedError when entity is not persistent.
"""
if self.is_saved():
return self._entity.key()
elif self._key:
return self._key
elif self._key_name:
parent = self._parent_key or (self._parent and self._parent.key())
self._key = Key.from_path(self.kind(), self._key_name, parent=parent,
_app=self._app, namespace=self.__namespace)
return self._key
else:
raise NotSavedError()
def _to_entity(self, entity):
"""Copies information from this model to provided entity.
Args:
entity: Entity to save information on.
"""
for prop in self.properties().values():
datastore_value = prop.get_value_for_datastore(self)
if datastore_value == []:
try:
del entity[prop.name]
except KeyError:
pass
else:
entity[prop.name] = datastore_value
entity.set_unindexed_properties(self._unindexed_properties)
def _populate_internal_entity(self, _entity_class=datastore.Entity):
"""Populates self._entity, saving its state to the datastore.
After this method is called, calling is_saved() will return True.
Returns:
Populated self._entity
"""
self._entity = self._populate_entity(_entity_class=_entity_class)
for attr in ('_key_name', '_key'):
try:
delattr(self, attr)
except AttributeError:
pass
return self._entity
def put(self, **kwargs):
"""Writes this model instance to the datastore.
If this instance is new, we add an entity to the datastore.
Otherwise, we update this instance, and the key will remain the
same.
Args:
config: datastore_rpc.Configuration to use for this request.
Returns:
The key of the instance (either the existing key or a new key).
Raises:
TransactionFailedError if the data could not be committed.
"""
config = datastore._GetConfigFromKwargs(kwargs)
self._populate_internal_entity()
return datastore.Put(self._entity, config=config)
save = put
def _populate_entity(self, _entity_class=datastore.Entity):
"""Internal helper -- Populate self._entity or create a new one
if that one does not exist. Does not change any state of the instance
other than the internal state of the entity.
This method is separate from _populate_internal_entity so that it is
possible to call to_xml without changing the state of an unsaved entity
to saved.
Returns:
self._entity or a new Entity which is not stored on the instance.
"""
if self.is_saved():
entity = self._entity
else:
kwds = {'_app': self._app, 'namespace': self.__namespace,
'unindexed_properties': self._unindexed_properties}
if self._key is not None:
if self._key.id():
kwds['id'] = self._key.id()
else:
kwds['name'] = self._key.name()
if self._key.parent():
kwds['parent'] = self._key.parent()
else:
if self._key_name is not None:
kwds['name'] = self._key_name
if self._parent_key is not None:
kwds['parent'] = self._parent_key
elif self._parent is not None:
kwds['parent'] = self._parent._entity
entity = _entity_class(self.kind(), **kwds)
self._to_entity(entity)
return entity
def delete(self, **kwargs):
"""Deletes this entity from the datastore.
Args:
config: datastore_rpc.Configuration to use for this request.
Raises:
TransactionFailedError if the data could not be committed.
"""
config = datastore._GetConfigFromKwargs(kwargs)
datastore.Delete(self.key(), config=config)
self._key = self.key()
self._key_name = None
self._parent_key = None
self._entity = None
def is_saved(self):
"""Determine if entity is persisted in the datastore.
New instances of Model do not start out saved in the data. Objects which
are saved to or loaded from the Datastore will have a True saved state.
Returns:
True if object has been persisted to the datastore, otherwise False.
"""
return self._entity is not None
def has_key(self):
"""Determine if this model instance has a complete key.
When not using a fully self-assigned Key, ids are not assigned until the
data is saved to the Datastore, but instances with a key name always have
a full key.
Returns:
True if the object has been persisted to the datastore or has a key
or has a key_name, otherwise False.
"""
return self.is_saved() or self._key or self._key_name
def dynamic_properties(self):
"""Returns a list of all dynamic properties defined for instance."""
return []
def instance_properties(self):
"""Alias for dyanmic_properties."""
return self.dynamic_properties()
def parent(self):
"""Get the parent of the model instance.
Returns:
Parent of contained entity or parent provided in constructor, None if
instance has no parent.
"""
if self._parent is None:
parent_key = self.parent_key()
if parent_key is not None:
self._parent = get(parent_key)
return self._parent
def parent_key(self):
"""Get the parent's key.
This method is useful for avoiding a potential fetch from the datastore
but still get information about the instances parent.
Returns:
Parent key of entity, None if there is no parent.
"""
if self._parent_key is not None:
return self._parent_key
elif self._parent is not None:
return self._parent.key()
elif self._entity is not None:
return self._entity.parent()
elif self._key is not None:
return self._key.parent()
else:
return None
def to_xml(self, _entity_class=datastore.Entity):
"""Generate an XML representation of this model instance.
atom and gd:namespace properties are converted to XML according to their
respective schemas. For more information, see:
http://www.atomenabled.org/developers/syndication/
http://code.google.com/apis/gdata/common-elements.html
"""
entity = self._populate_entity(_entity_class)
return entity.ToXml()
@classmethod
def get(cls, keys, **kwargs):
"""Fetch instance from the datastore of a specific Model type using key.
We support Key objects and string keys (we convert them to Key objects
automatically).
Useful for ensuring that specific instance types are retrieved from the
datastore. It also helps that the source code clearly indicates what
kind of object is being retreived. Example:
story = Story.get(story_key)
Args:
keys: Key within datastore entity collection to find; or string key;
or list of Keys or string keys.
config: datastore_rpc.Configuration to use for this request.
Returns:
If a single key was given: a Model instance associated with key
for provided class if it exists in the datastore, otherwise
None; if a list of keys was given: a list whose items are either
a Model instance or None.
Raises:
KindError if any of the retreived objects are not instances of the
type associated with call to 'get'.
"""
config = datastore._GetConfigFromKwargs(kwargs)
results = get(keys, config=config)
if results is None:
return None
if isinstance(results, Model):
instances = [results]
else:
instances = results
for instance in instances:
if not(instance is None or isinstance(instance, cls)):
raise KindError('Kind %r is not a subclass of kind %r' %
(instance.kind(), cls.kind()))
return results
@classmethod
def get_by_key_name(cls, key_names, parent=None, **kwargs):
"""Get instance of Model class by its key's name.
Args:
key_names: A single key-name or a list of key-names.
parent: Parent of instances to get. Can be a model or key.
config: datastore_rpc.Configuration to use for this request.
"""
try:
parent = _coerce_to_key(parent)
except BadKeyError, e:
raise BadArgumentError(str(e))
config = datastore._GetConfigFromKwargs(kwargs)
key_names, multiple = datastore.NormalizeAndTypeCheck(key_names, basestring)
keys = [datastore.Key.from_path(cls.kind(), name, parent=parent)
for name in key_names]
if multiple:
return get(keys, config=config)
else:
return get(keys[0], config=config)
@classmethod
def get_by_id(cls, ids, parent=None, **kwargs):
"""Get instance of Model class by id.
Args:
key_names: A single id or a list of ids.
parent: Parent of instances to get. Can be a model or key.
config: datastore_rpc.Configuration to use for this request.
"""
config = datastore._GetConfigFromKwargs(kwargs)
if isinstance(parent, Model):
parent = parent.key()
ids, multiple = datastore.NormalizeAndTypeCheck(ids, (int, long))
keys = [datastore.Key.from_path(cls.kind(), id, parent=parent)
for id in ids]
if multiple:
return get(keys, config=config)
else:
return get(keys[0], config=config)
@classmethod
def get_or_insert(cls, key_name, **kwds):
"""Transactionally retrieve or create an instance of Model class.
This acts much like the Python dictionary setdefault() method, where we
first try to retrieve a Model instance with the given key name and parent.
If it's not present, then we create a new instance (using the *kwds
supplied) and insert that with the supplied key name.
Subsequent calls to this method with the same key_name and parent will
always yield the same entity (though not the same actual object instance),
regardless of the *kwds supplied. If the specified entity has somehow
been deleted separately, then the next call will create a new entity and
return it.
If the 'parent' keyword argument is supplied, it must be a Model instance.
It will be used as the parent of the new instance of this Model class if
one is created.
This method is especially useful for having just one unique entity for
a specific identifier. Insertion/retrieval is done transactionally, which
guarantees uniqueness.
Example usage:
class WikiTopic(db.Model):
creation_date = db.DatetimeProperty(auto_now_add=True)
body = db.TextProperty(required=True)
# The first time through we'll create the new topic.
wiki_word = 'CommonIdioms'
topic = WikiTopic.get_or_insert(wiki_word,
body='This topic is totally new!')
assert topic.key().name() == 'CommonIdioms'
assert topic.body == 'This topic is totally new!'
# The second time through will just retrieve the entity.
overwrite_topic = WikiTopic.get_or_insert(wiki_word,
body='A totally different message!')
assert topic.key().name() == 'CommonIdioms'
assert topic.body == 'This topic is totally new!'
Args:
key_name: Key name to retrieve or create.
**kwds: Keyword arguments to pass to the constructor of the model class
if an instance for the specified key name does not already exist. If
an instance with the supplied key_name and parent already exists, the
rest of these arguments will be discarded.
Returns:
Existing instance of Model class with the specified key_name and parent
or a new one that has just been created.
Raises:
TransactionFailedError if the specified Model instance could not be
retrieved or created transactionally (due to high contention, etc).
"""
def txn():
entity = cls.get_by_key_name(key_name, parent=kwds.get('parent'))
if entity is None:
entity = cls(key_name=key_name, **kwds)
entity.put()
return entity
return run_in_transaction(txn)
@classmethod
def all(cls, **kwds):
"""Returns a query over all instances of this model from the datastore.
Returns:
Query that will retrieve all instances from entity collection.
"""
return Query(cls, **kwds)
@classmethod
def gql(cls, query_string, *args, **kwds):
"""Returns a query using GQL query string.
See appengine/ext/gql for more information about GQL.
Args:
query_string: properly formatted GQL query string with the
'SELECT * FROM <entity>' part omitted
*args: rest of the positional arguments used to bind numeric references
in the query.
**kwds: dictionary-based arguments (for named parameters).
"""
return GqlQuery('SELECT * FROM %s %s' % (cls.kind(), query_string),
*args, **kwds)
@classmethod
def _load_entity_values(cls, entity):
"""Load dynamic properties from entity.
Loads attributes which are not defined as part of the entity in
to the model instance.
Args:
entity: Entity which contain values to search dyanmic properties for.
"""
entity_values = {}
for prop in cls.properties().values():
if prop.name in entity:
try:
value = prop.make_value_from_datastore(entity[prop.name])
entity_values[prop.name] = value
except KeyError:
entity_values[prop.name] = []
return entity_values
@classmethod
def from_entity(cls, entity):
"""Converts the entity representation of this model to an instance.
Converts datastore.Entity instance to an instance of cls.
Args:
entity: Entity loaded directly from datastore.
Raises:
KindError when cls is incorrect model for entity.
"""
if cls.kind() != entity.kind():
raise KindError('Class %s cannot handle kind \'%s\'' %
(repr(cls), entity.kind()))
entity_values = cls._load_entity_values(entity)
if entity.key().has_id_or_name():
entity_values['key'] = entity.key()
instance = cls(None, _from_entity=True, **entity_values)
if entity.is_saved():
instance._entity = entity
del instance._key_name
del instance._key
return instance
@classmethod
def kind(cls):
"""Returns the datastore kind we use for this model.
We just use the name of the model for now, ignoring potential collisions.
"""
return cls.__name__
@classmethod
def entity_type(cls):
"""Soon to be removed alias for kind."""
return cls.kind()
@classmethod
def properties(cls):
"""Returns a dictionary of all the properties defined for this model."""
return dict(cls._properties)
@classmethod
def fields(cls):
"""Soon to be removed alias for properties."""
return cls.properties()
def create_rpc(deadline=None, callback=None, read_policy=STRONG_CONSISTENCY):
"""Create an rpc for use in configuring datastore calls.
NOTE: This functions exists for backwards compatibility. Please use
create_config() instead. NOTE: the latter uses 'on_completion',
which is a function taking an argument, wherease create_rpc uses
'callback' which is a function without arguments.
Args:
deadline: float, deadline for calls in seconds.
callback: callable, a callback triggered when this rpc completes,
accepts one argument: the returned rpc.
read_policy: flag, set to EVENTUAL_CONSISTENCY to enable eventually
consistent reads
Returns:
A datastore.DatastoreRPC instance.
"""
return datastore.CreateRPC(
deadline=deadline, callback=callback, read_policy=read_policy)
def get(keys, **kwargs):
"""Fetch the specific Model instance with the given key from the datastore.
We support Key objects and string keys (we convert them to Key objects
automatically).
Args:
keys: Key within datastore entity collection to find; or string key;
or list of Keys or string keys.
config: datastore_rpc.Configuration to use for this request.
Returns:
If a single key was given: a Model instance associated with key
for if it exists in the datastore, otherwise None; if a list of
keys was given: a list whose items are either a Model instance or
None.
"""
config = datastore._GetConfigFromKwargs(kwargs)
keys, multiple = datastore.NormalizeAndTypeCheckKeys(keys)
try:
entities = datastore.Get(keys, config=config)
except datastore_errors.EntityNotFoundError:
assert not multiple
return None
models = []
for entity in entities:
if entity is None:
model = None
else:
cls1 = class_for_kind(entity.kind())
model = cls1.from_entity(entity)
models.append(model)
if multiple:
return models
assert len(models) == 1
return models[0]
def put(models, **kwargs):
"""Store one or more Model instances.
Args:
models: Model instance or list of Model instances.
config: datastore_rpc.Configuration to use for this request.
Returns:
A Key or a list of Keys (corresponding to the argument's plurality).
Raises:
TransactionFailedError if the data could not be committed.
"""
config = datastore._GetConfigFromKwargs(kwargs)
models, multiple = datastore.NormalizeAndTypeCheck(models, Model)
entities = [model._populate_internal_entity() for model in models]
keys = datastore.Put(entities, config=config)
if multiple:
return keys
assert len(keys) == 1
return keys[0]
save = put
def delete(models, **kwargs):
"""Delete one or more Model instances.
Args:
models: Model instance, key, key string or iterable thereof.
config: datastore_rpc.Configuration to use for this request.
Raises:
TransactionFailedError if the data could not be committed.
"""
config = datastore._GetConfigFromKwargs(kwargs)
if isinstance(models, (basestring, Model, Key)):
models = [models]
else:
try:
models = iter(models)
except TypeError:
models = [models]
keys = [_coerce_to_key(v) for v in models]
datastore.Delete(keys, config=config)
def allocate_ids(model, size, **kwargs):
"""Allocates a range of IDs of size for the model_key defined by model.
Allocates a range of IDs in the datastore such that those IDs will not
be automatically assigned to new entities. You can only allocate IDs
for model keys from your app. If there is an error, raises a subclass of
datastore_errors.Error.
Args:
model: Model instance, Key or string to serve as a template specifying the
ID sequence in which to allocate IDs. Returned ids should only be used
in entities with the same parent (if any) and kind as this key.
size: Number of IDs to allocate.
config: datastore_rpc.Configuration to use for this request.
Returns:
(start, end) of the allocated range, inclusive.
"""
return datastore.AllocateIds(_coerce_to_key(model), size=size, **kwargs)
def allocate_id_range(model, start, end, **kwargs):
"""Allocates a range of IDs with specific endpoints.
Once these IDs have been allocated they may be provided manually to
newly created entities.
Since the datastore's automatic ID allocator will never assign
a key to a new entity that will cause an existing entity to be
overwritten, entities written to the given key range will never be
overwritten. However, writing entities with manually assigned keys in this
range may overwrite existing entities (or new entities written by a
separate request) depending on the key range state returned.
This method should only be used if you have an existing numeric id
range that you want to reserve, e.g. bulk loading entities that already
have IDs. If you don't care about which IDs you receive, use allocate_ids
instead.
Args:
model: Model instance, Key or string to serve as a template specifying the
ID sequence in which to allocate IDs. Allocated ids should only be used
in entities with the same parent (if any) and kind as this key.
start: first id of the range to allocate, inclusive.
end: last id of the range to allocate, inclusive.
config: datastore_rpc.Configuration to use for this request.
Returns:
One of (KEY_RANGE_EMPTY, KEY_RANGE_CONTENTION, KEY_RANGE_COLLISION). If not
KEY_RANGE_EMPTY, this represents a potential issue with using the allocated
key range.
"""
key = _coerce_to_key(model)
datastore.NormalizeAndTypeCheck((start, end), (int, long))
if start < 1 or end < 1:
raise BadArgumentError('Start %d and end %d must both be > 0.' %
(start, end))
if start > end:
raise BadArgumentError('Range end %d cannot be less than start %d.' %
(end, start))
safe_start, safe_end = datastore.AllocateIds(key, max=end, **kwargs)
race_condition = safe_start > start
start_key = Key.from_path(key.kind(), start, parent=key.parent())
end_key = Key.from_path(key.kind(), end, parent=key.parent())
collision = (Query(keys_only=True).filter('__key__ >=', start_key)
.filter('__key__ <=', end_key).fetch(1))
if collision:
return KEY_RANGE_COLLISION
elif race_condition:
return KEY_RANGE_CONTENTION
else:
return KEY_RANGE_EMPTY
class Expando(Model):
"""Dynamically expandable model.
An Expando does not require (but can still benefit from) the definition
of any properties before it can be used to store information in the
datastore. Properties can be added to an expando object by simply
performing an assignment. The assignment of properties is done on
an instance by instance basis, so it is possible for one object of an
expando type to have different properties from another or even the same
properties with different types. It is still possible to define
properties on an expando, allowing those properties to behave the same
as on any other model.
Example:
import datetime
class Song(db.Expando):
title = db.StringProperty()
crazy = Song(title='Crazy like a diamond',
author='Lucy Sky',
publish_date='yesterday',
rating=5.0)
hoboken = Song(title='The man from Hoboken',
author=['Anthony', 'Lou'],
publish_date=datetime.datetime(1977, 5, 3))
crazy.last_minute_note=db.Text('Get a train to the station.')
Possible Uses:
One use of an expando is to create an object without any specific
structure and later, when your application mature and it in the right
state, change it to a normal model object and define explicit properties.
Additional exceptions for expando:
Protected attributes (ones whose names begin with '_') cannot be used
as dynamic properties. These are names that are reserved for protected
transient (non-persisted) attributes.
Order of lookup:
When trying to set or access an attribute value, any other defined
properties, such as methods and other values in __dict__ take precedence
over values in the datastore.
1 - Because it is not possible for the datastore to know what kind of
property to store on an undefined expando value, setting a property to
None is the same as deleting it from the expando.
2 - Persistent variables on Expando must not begin with '_'. These
variables considered to be 'protected' in Python, and are used
internally.
3 - Expando's dynamic properties are not able to store empty lists.
Attempting to assign an empty list to a dynamic property will raise
ValueError. Static properties on Expando can still support empty
lists but like normal Model properties is restricted from using
None.
"""
_dynamic_properties = None
def __init__(self, parent=None, key_name=None, _app=None, **kwds):
"""Creates a new instance of this expando model.
Args:
parent: Parent instance for this instance or None, indicating a top-
level instance.
key_name: Name for new model instance.
_app: Intentionally undocumented.
args: Keyword arguments mapping to properties of model.
"""
super(Expando, self).__init__(parent, key_name, _app, **kwds)
self._dynamic_properties = {}
for prop, value in kwds.iteritems():
if prop not in self._all_properties and prop != 'key':
if not (hasattr(getattr(type(self), prop, None), '__set__')):
setattr(self, prop, value)
else:
check_reserved_word(prop)
def __setattr__(self, key, value):
"""Dynamically set field values that are not defined.
Tries to set the value on the object normally, but failing that
sets the value on the contained entity.
Args:
key: Name of attribute.
value: Value to set for attribute. Must be compatible with
datastore.
Raises:
ValueError on attempt to assign empty list.
"""
check_reserved_word(key)
if (key[:1] != '_' and
not hasattr(getattr(type(self), key, None), '__set__')):
if value == []:
raise ValueError('Cannot store empty list to dynamic property %s' %
key)
if type(value) not in _ALLOWED_EXPANDO_PROPERTY_TYPES:
raise TypeError("Expando cannot accept values of type '%s'." %
type(value).__name__)
if self._dynamic_properties is None:
self._dynamic_properties = {}
self._dynamic_properties[key] = value
else:
super(Expando, self).__setattr__(key, value)
def __getattribute__(self, key):
"""Get attribute from expando.
Must be overridden to allow dynamic properties to obscure class attributes.
Since all attributes are stored in self._dynamic_properties, the normal
__getattribute__ does not attempt to access it until __setattr__ is called.
By then, the static attribute being overwritten has already been located
and returned from the call.
This method short circuits the usual __getattribute__ call when finding a
dynamic property and returns it to the user via __getattr__. __getattr__
is called to preserve backward compatibility with older Expando models
that may have overridden the original __getattr__.
NOTE: Access to properties defined by Python descriptors are not obscured
because setting those attributes are done through the descriptor and does
not place those attributes in self._dynamic_properties.
"""
if not key.startswith('_'):
dynamic_properties = self._dynamic_properties
if dynamic_properties is not None and key in dynamic_properties:
return self.__getattr__(key)
return super(Expando, self).__getattribute__(key)
def __getattr__(self, key):
"""If no explicit attribute defined, retrieve value from entity.
Tries to get the value on the object normally, but failing that
retrieves value from contained entity.
Args:
key: Name of attribute.
Raises:
AttributeError when there is no attribute for key on object or
contained entity.
"""
_dynamic_properties = self._dynamic_properties
if _dynamic_properties is not None and key in _dynamic_properties:
return _dynamic_properties[key]
else:
return getattr(super(Expando, self), key)
def __delattr__(self, key):
"""Remove attribute from expando.
Expando is not like normal entities in that undefined fields
can be removed.
Args:
key: Dynamic property to be deleted.
"""
if self._dynamic_properties and key in self._dynamic_properties:
del self._dynamic_properties[key]
else:
object.__delattr__(self, key)
def dynamic_properties(self):
"""Determine which properties are particular to instance of entity.
Returns:
Set of names which correspond only to the dynamic properties.
"""
if self._dynamic_properties is None:
return []
return self._dynamic_properties.keys()
def _to_entity(self, entity):
"""Store to entity, deleting dynamic properties that no longer exist.
When the expando is saved, it is possible that a given property no longer
exists. In this case, the property will be removed from the saved instance.
Args:
entity: Entity which will receive dynamic properties.
"""
super(Expando, self)._to_entity(entity)
if self._dynamic_properties is None:
self._dynamic_properties = {}
for key, value in self._dynamic_properties.iteritems():
entity[key] = value
all_properties = set(self._dynamic_properties.iterkeys())
all_properties.update(self._all_properties)
for key in entity.keys():
if key not in all_properties:
del entity[key]
@classmethod
def _load_entity_values(cls, entity):
"""Load dynamic properties from entity.
Expando needs to do a second pass to add the entity values which were
ignored by Model because they didn't have an corresponding predefined
property on the model.
Args:
entity: Entity which contain values to search dyanmic properties for.
"""
entity_values = super(Expando, cls)._load_entity_values(entity)
for key, value in entity.iteritems():
if key not in entity_values:
entity_values[str(key)] = value
return entity_values
class _BaseQuery(object):
"""Base class for both Query and GqlQuery."""
_compile = False
def __init__(self, model_class=None, keys_only=False, compile=True,
cursor=None, namespace=None):
"""Constructor.
Args:
model_class: Model class from which entities are constructed.
keys_only: Whether the query should return full entities or only keys.
compile: Whether the query should also return a compiled query.
cursor: A compiled query from which to resume.
namespace: The namespace to query.
"""
self._model_class = model_class
self._keys_only = keys_only
self._compile = compile
self._namespace = namespace
self.with_cursor(cursor)
def is_keys_only(self):
"""Returns whether this query is keys only.
Returns:
True if this query returns keys, False if it returns entities.
"""
return self._keys_only
def _get_query(self):
"""Subclass must override (and not call their super method).
Returns:
A datastore.Query instance representing the query.
"""
raise NotImplementedError
def run(self, **kwargs):
"""Iterator for this query.
If you know the number of results you need, consider fetch() instead,
or use a GQL query with a LIMIT clause. It's more efficient.
Args:
config: datastore_rpc.Configuration to use for this request.
Returns:
Iterator for this query.
"""
config = datastore._GetConfigFromKwargs(kwargs)
raw_query = self._get_query()
iterator = raw_query.Run(config=config)
if self._compile:
self._last_raw_query = raw_query
if self._keys_only:
return iterator
else:
return _QueryIterator(self._model_class, iter(iterator))
def __iter__(self):
"""Iterator for this query.
If you know the number of results you need, consider fetch() instead,
or use a GQL query with a LIMIT clause. It's more efficient.
"""
return self.run()
def __getstate__(self):
state = self.__dict__.copy()
if '_last_raw_query' in state:
del state['_last_raw_query']
return state
def get(self, **kwargs):
"""Get first result from this.
Args:
config: datastore_rpc.Configuration to use for this request.
Beware: get() ignores the LIMIT clause on GQL queries.
Returns:
First result from running the query if there are any, else None.
"""
config = datastore._GetConfigFromKwargs(kwargs)
results = self.fetch(1, config=config)
try:
return results[0]
except IndexError:
return None
def count(self, limit=1000, **kwargs):
"""Number of entities this query fetches.
Beware: count() ignores the LIMIT clause on GQL queries.
Args:
limit: A number. If there are more results than this, stop short and
just return this number. Providing this argument makes the count
operation more efficient.
config: datastore_rpc.Configuration to use for this request.
Returns:
Number of entities this query fetches.
"""
config = datastore._GetConfigFromKwargs(kwargs)
raw_query = self._get_query()
result = raw_query.Count(limit=limit, config=config)
if self._compile:
self._last_raw_query = raw_query
return result
def fetch(self, limit, offset=0, **kwargs):
"""Return a list of items selected using SQL-like limit and offset.
Whenever possible, use fetch() instead of iterating over the query
results with run() or __iter__() . fetch() is more efficient.
Beware: fetch() ignores the LIMIT clause on GQL queries.
Args:
limit: Maximum number of results to return.
offset: Optional number of results to skip first; default zero.
config: datastore_rpc.Configuration to use for this request.
Returns:
A list of db.Model instances. There may be fewer than 'limit'
results if there aren't enough results to satisfy the request.
"""
config = datastore._GetConfigFromKwargs(kwargs)
accepted = (int, long)
if not (isinstance(limit, accepted) and isinstance(offset, accepted)):
raise TypeError('Arguments to fetch() must be integers')
if limit < 0 or offset < 0:
raise ValueError('Arguments to fetch() must be >= 0')
raw_query = self._get_query()
raw = raw_query.Get(limit, offset, config=config)
if self._compile:
self._last_raw_query = raw_query
if self._keys_only:
return raw
else:
if self._model_class is not None:
return [self._model_class.from_entity(e) for e in raw]
else:
return [class_for_kind(e.kind()).from_entity(e) for e in raw]
def cursor(self):
"""Get a serialized cursor for an already executed query.
The returned cursor effectively lets a future invocation of a similar
query to begin fetching results immediately after the last returned
result from this query invocation.
Returns:
A base64-encoded serialized cursor.
"""
if not self._compile:
raise AssertionError(
'Query must be created with compile=True to produce cursors')
try:
return websafe_encode_cursor(
self._last_raw_query.GetCompiledCursor())
except AttributeError:
raise AssertionError('No cursor available.')
def with_cursor(self, start_cursor=None, end_cursor=None):
"""Set the start and end of this query using serialized cursors.
Conceptually cursors point to the position between the last result returned
and the next result so running a query with each of the following cursors
combinations will return all results in four chunks with no duplicate
results:
query.with_cursor(end_cursor=cursor1)
query.with_cursors(cursor1, cursor2)
query.with_cursors(cursor2, cursor3)
query.with_cursors(start_cursor=cursor3)
For example if the cursors pointed to:
cursor: 1 2 3
result: a b c d e f g h
The results returned by these queries would be [a, b], [c, d], [e, f],
[g, h] respectively.
Cursors are pinned to the position just after the previous result (last
result, exclusive), so if results are inserted or deleted between the time
the cursor was made and these queries are executed, the cursors stay pinned
to these positions. For example:
delete(b, f, g, h)
put(a1, b1, c1, d1)
cursor: 1(b) 2(d) 3(f)
result: a a1 b1 c c1 d d1 e
The results returned by these queries would now be: [a, a1], [b1, c, c1, d],
[d1, e], [] respectively.
Args:
start_cursor: The cursor position at which to start or None
end_cursor: The cursor position at which to end or None
Returns:
This Query instance, for chaining.
Raises:
BadValueError when cursor is not valid.
"""
if start_cursor is None:
self._cursor = None
else:
self._cursor = websafe_decode_cursor(start_cursor)
if end_cursor is None:
self._end_cursor = None
else:
self._end_cursor = websafe_decode_cursor(end_cursor)
return self
def __getitem__(self, arg):
"""Support for query[index] and query[start:stop].
Beware: this ignores the LIMIT clause on GQL queries.
Args:
arg: Either a single integer, corresponding to the query[index]
syntax, or a Python slice object, corresponding to the
query[start:stop] or query[start:stop:step] syntax.
Returns:
A single Model instance when the argument is a single integer.
A list of Model instances when the argument is a slice.
"""
if isinstance(arg, slice):
start, stop, step = arg.start, arg.stop, arg.step
if start is None:
start = 0
if stop is None:
raise ValueError('Open-ended slices are not supported')
if step is None:
step = 1
if start < 0 or stop < 0 or step != 1:
raise ValueError(
'Only slices with start>=0, stop>=0, step==1 are supported')
limit = stop - start
if limit < 0:
return []
return self.fetch(limit, start)
elif isinstance(arg, (int, long)):
if arg < 0:
raise ValueError('Only indices >= 0 are supported')
results = self.fetch(1, arg)
if results:
return results[0]
else:
raise IndexError('The query returned fewer than %d results' % (arg+1))
else:
raise TypeError('Only integer indices and slices are supported')
class _QueryIterator(object):
"""Wraps the datastore iterator to return Model instances.
The datastore returns entities. We wrap the datastore iterator to
return Model instances instead.
"""
def __init__(self, model_class, datastore_iterator):
"""Iterator constructor
Args:
model_class: Model class from which entities are constructed.
datastore_iterator: Underlying datastore iterator.
"""
self.__model_class = model_class
self.__iterator = datastore_iterator
def __iter__(self):
"""Iterator on self.
Returns:
Self.
"""
return self
def next(self):
"""Get next Model instance in query results.
Returns:
Next model instance.
Raises:
StopIteration when there are no more results in query.
"""
if self.__model_class is not None:
return self.__model_class.from_entity(self.__iterator.next())
else:
entity = self.__iterator.next()
return class_for_kind(entity.kind()).from_entity(entity)
def _normalize_query_parameter(value):
"""Make any necessary type conversions to a query parameter.
The following conversions are made:
- Model instances are converted to Key instances. This is necessary so
that querying reference properties will work.
- datetime.date objects are converted to datetime.datetime objects (see
_date_to_datetime for details on this conversion). This is necessary so
that querying date properties with date objects will work.
- datetime.time objects are converted to datetime.datetime objects (see
_time_to_datetime for details on this conversion). This is necessary so
that querying time properties with time objects will work.
Args:
value: The query parameter value.
Returns:
The input value, or a converted value if value matches one of the
conversions specified above.
"""
if isinstance(value, Model):
value = value.key()
if (isinstance(value, datetime.date) and
not isinstance(value, datetime.datetime)):
value = _date_to_datetime(value)
elif isinstance(value, datetime.time):
value = _time_to_datetime(value)
return value
class Query(_BaseQuery):
"""A Query instance queries over instances of Models.
You construct a query with a model class, like this:
class Story(db.Model):
title = db.StringProperty()
date = db.DateTimeProperty()
query = Query(Story)
You modify a query with filters and orders like this:
query.filter('title =', 'Foo')
query.order('-date')
query.ancestor(key_or_model_instance)
Every query can return an iterator, so you access the results of a query
by iterating over it:
for story in query:
print story.title
For convenience, all of the filtering and ordering methods return "self",
so the easiest way to use the query interface is to cascade all filters and
orders in the iterator line like this:
for story in Query(story).filter('title =', 'Foo').order('-date'):
print story.title
"""
def __init__(self, model_class=None, keys_only=False, cursor=None,
namespace=None):
"""Constructs a query over instances of the given Model.
Args:
model_class: Model class to build query for.
keys_only: Whether the query should return full entities or only keys.
cursor: A compiled query from which to resume.
namespace: The namespace to use for this query.
"""
super(Query, self).__init__(model_class, keys_only, cursor=cursor,
namespace=namespace)
self.__query_sets = [{}]
self.__orderings = []
self.__ancestor = None
def _get_query(self,
_query_class=datastore.Query,
_multi_query_class=datastore.MultiQuery):
queries = []
for query_set in self.__query_sets:
if self._model_class is not None:
kind = self._model_class.kind()
else:
kind = None
query = _query_class(kind,
query_set,
keys_only=self._keys_only,
compile=self._compile,
cursor=self._cursor,
end_cursor=self._end_cursor,
namespace=self._namespace)
query.Order(*self.__orderings)
if self.__ancestor is not None:
query.Ancestor(self.__ancestor)
queries.append(query)
if (_query_class != datastore.Query and
_multi_query_class == datastore.MultiQuery):
warnings.warn(
'Custom _query_class specified without corresponding custom'
' _query_multi_class. Things will break if you use queries with'
' the "IN" or "!=" operators.', RuntimeWarning)
if len(queries) > 1:
raise datastore_errors.BadArgumentError(
'Query requires multiple subqueries to satisfy. If _query_class'
' is overridden, _multi_query_class must also be overridden.')
elif (_query_class == datastore.Query and
_multi_query_class != datastore.MultiQuery):
raise BadArgumentError('_query_class must also be overridden if'
' _multi_query_class is overridden.')
if len(queries) == 1:
return queries[0]
else:
return _multi_query_class(queries, self.__orderings)
def __filter_disjunction(self, operations, values):
"""Add a disjunction of several filters and several values to the query.
This is implemented by duplicating queries and combining the
results later.
Args:
operations: a string or list of strings. Each string contains a
property name and an operator to filter by. The operators
themselves must not require multiple queries to evaluate
(currently, this means that 'in' and '!=' are invalid).
values: a value or list of filter values, normalized by
_normalize_query_parameter.
"""
if not isinstance(operations, (list, tuple)):
operations = [operations]
if not isinstance(values, (list, tuple)):
values = [values]
new_query_sets = []
for operation in operations:
if operation.lower().endswith('in') or operation.endswith('!='):
raise BadQueryError('Cannot use "in" or "!=" in a disjunction.')
for query_set in self.__query_sets:
for value in values:
new_query_set = copy.deepcopy(query_set)
datastore._AddOrAppend(new_query_set, operation, value)
new_query_sets.append(new_query_set)
self.__query_sets = new_query_sets
def filter(self, property_operator, value):
"""Add filter to query.
Args:
property_operator: string with the property and operator to filter by.
value: the filter value.
Returns:
Self to support method chaining.
Raises:
PropertyError if invalid property is provided.
"""
match = _FILTER_REGEX.match(property_operator)
prop = match.group(1)
if match.group(3) is not None:
operator = match.group(3)
else:
operator = '=='
if self._model_class is None:
if prop != datastore_types._KEY_SPECIAL_PROPERTY:
raise BadQueryError(
'Only %s filters are allowed on kindless queries.' %
datastore_types._KEY_SPECIAL_PROPERTY)
elif prop in self._model_class._unindexed_properties:
raise PropertyError('Property \'%s\' is not indexed' % prop)
if operator.lower() == 'in':
if self._keys_only:
raise BadQueryError('Keys only queries do not support IN filters.')
elif not isinstance(value, (list, tuple)):
raise BadValueError('Argument to the "in" operator must be a list')
values = [_normalize_query_parameter(v) for v in value]
self.__filter_disjunction(prop + ' =', values)
else:
if isinstance(value, (list, tuple)):
raise BadValueError('Filtering on lists is not supported')
if operator == '!=':
if self._keys_only:
raise BadQueryError('Keys only queries do not support != filters.')
self.__filter_disjunction([prop + ' <', prop + ' >'],
_normalize_query_parameter(value))
else:
value = _normalize_query_parameter(value)
for query_set in self.__query_sets:
datastore._AddOrAppend(query_set, property_operator, value)
return self
def order(self, property):
"""Set order of query result.
To use descending order, prepend '-' (minus) to the property
name, e.g., '-date' rather than 'date'.
Args:
property: Property to sort on.
Returns:
Self to support method chaining.
Raises:
PropertyError if invalid property is provided.
"""
if property.startswith('-'):
property = property[1:]
order = datastore.Query.DESCENDING
else:
order = datastore.Query.ASCENDING
if self._model_class is None:
if (property != datastore_types._KEY_SPECIAL_PROPERTY or
order != datastore.Query.ASCENDING):
raise BadQueryError(
'Only %s ascending orders are supported on kindless queries' %
datastore_types._KEY_SPECIAL_PROPERTY)
else:
if not issubclass(self._model_class, Expando):
if (property not in self._model_class._all_properties and
property not in datastore_types._SPECIAL_PROPERTIES):
raise PropertyError('Invalid property name \'%s\'' % property)
if property in self._model_class._unindexed_properties:
raise PropertyError('Property \'%s\' is not indexed' % property)
self.__orderings.append((property, order))
return self
def ancestor(self, ancestor):
"""Sets an ancestor for this query.
This restricts the query to only return results that descend from
a given model instance. In other words, all of the results will
have the ancestor as their parent, or parent's parent, etc. The
ancestor itself is also a possible result!
Args:
ancestor: Model or Key (that has already been saved)
Returns:
Self to support method chaining.
Raises:
TypeError if the argument isn't a Key or Model; NotSavedError
if it is, but isn't saved yet.
"""
if isinstance(ancestor, datastore.Key):
if ancestor.has_id_or_name():
self.__ancestor = ancestor
else:
raise NotSavedError()
elif isinstance(ancestor, Model):
if ancestor.has_key():
self.__ancestor = ancestor.key()
else:
raise NotSavedError()
else:
raise TypeError('ancestor should be Key or Model')
return self
class GqlQuery(_BaseQuery):
"""A Query class that uses GQL query syntax instead of .filter() etc."""
def __init__(self, query_string, *args, **kwds):
"""Constructor.
Args:
query_string: Properly formatted GQL query string.
*args: Positional arguments used to bind numeric references in the query.
**kwds: Dictionary-based arguments for named references.
Raises:
PropertyError if the query filters or sorts on a property that's not
indexed.
"""
from google.appengine.ext import gql
app = kwds.pop('_app', None)
namespace = None
if isinstance(app, tuple):
if len(app) != 2:
raise BadArgumentError('_app must have 2 values if type is tuple.')
app, namespace = app
self._proto_query = gql.GQL(query_string, _app=app, namespace=namespace)
if self._proto_query._entity is not None:
model_class = class_for_kind(self._proto_query._entity)
else:
model_class = None
super(GqlQuery, self).__init__(model_class,
keys_only=self._proto_query._keys_only)
if model_class is not None:
for property, unused in (self._proto_query.filters().keys() +
self._proto_query.orderings()):
if property in model_class._unindexed_properties:
raise PropertyError('Property \'%s\' is not indexed' % property)
self.bind(*args, **kwds)
def bind(self, *args, **kwds):
"""Bind arguments (positional or keyword) to the query.
Note that you can also pass arguments directly to the query
constructor. Each time you call bind() the previous set of
arguments is replaced with the new set. This is useful because
the hard work in in parsing the query; so if you expect to be
using the same query with different sets of arguments, you should
hold on to the GqlQuery() object and call bind() on it each time.
Args:
*args: Positional arguments used to bind numeric references in the query.
**kwds: Dictionary-based arguments for named references.
"""
self._args = []
for arg in args:
self._args.append(_normalize_query_parameter(arg))
self._kwds = {}
for name, arg in kwds.iteritems():
self._kwds[name] = _normalize_query_parameter(arg)
def run(self, **kwargs):
"""Iterator for this query that handles the LIMIT clause property.
If the GQL query string contains a LIMIT clause, this function fetches
all results before returning an iterator. Otherwise results are retrieved
in batches by the iterator.
Args:
config: datastore_rpc.Configuration to use for this request.
Returns:
Iterator for this query.
"""
if self._proto_query.limit() >= 0:
return iter(self.fetch(limit=self._proto_query.limit(),
offset=self._proto_query.offset(),
**kwargs))
else:
results = _BaseQuery.run(self, **kwargs)
try:
for _ in xrange(self._proto_query.offset()):
results.next()
except StopIteration:
pass
return results
def _get_query(self):
return self._proto_query.Bind(self._args, self._kwds,
self._cursor, self._end_cursor)
class UnindexedProperty(Property):
"""A property that isn't indexed by either built-in or composite indices.
TextProperty and BlobProperty derive from this class.
"""
def __init__(self, *args, **kwds):
"""Construct property. See the Property class for details.
Raises:
ConfigurationError if indexed=True.
"""
self._require_parameter(kwds, 'indexed', False)
kwds['indexed'] = True
super(UnindexedProperty, self).__init__(*args, **kwds)
def validate(self, value):
"""Validate property.
Returns:
A valid value.
Raises:
BadValueError if property is not an instance of data_type.
"""
if value is not None and not isinstance(value, self.data_type):
try:
value = self.data_type(value)
except TypeError, err:
raise BadValueError('Property %s must be convertible '
'to a %s instance (%s)' %
(self.name, self.data_type.__name__, err))
value = super(UnindexedProperty, self).validate(value)
if value is not None and not isinstance(value, self.data_type):
raise BadValueError('Property %s must be a %s instance' %
(self.name, self.data_type.__name__))
return value
class TextProperty(UnindexedProperty):
"""A string that can be longer than 500 bytes."""
data_type = Text
class StringProperty(Property):
"""A textual property, which can be multi- or single-line."""
def __init__(self, verbose_name=None, multiline=False, **kwds):
"""Construct string property.
Args:
verbose_name: Verbose name is always first parameter.
multi-line: Carriage returns permitted in property.
"""
super(StringProperty, self).__init__(verbose_name, **kwds)
self.multiline = multiline
def validate(self, value):
"""Validate string property.
Returns:
A valid value.
Raises:
BadValueError if property is not multi-line but value is.
"""
value = super(StringProperty, self).validate(value)
if value is not None and not isinstance(value, basestring):
raise BadValueError(
'Property %s must be a str or unicode instance, not a %s'
% (self.name, type(value).__name__))
if not self.multiline and value and value.find('\n') != -1:
raise BadValueError('Property %s is not multi-line' % self.name)
return value
data_type = basestring
class _CoercingProperty(Property):
"""A Property subclass that extends validate() to coerce to self.data_type."""
def validate(self, value):
"""Coerce values (except None) to self.data_type.
Args:
value: The value to be validated and coerced.
Returns:
The coerced and validated value. It is guaranteed that this is
either None or an instance of self.data_type; otherwise an exception
is raised.
Raises:
BadValueError if the value could not be validated or coerced.
"""
value = super(_CoercingProperty, self).validate(value)
if value is not None and not isinstance(value, self.data_type):
value = self.data_type(value)
return value
class CategoryProperty(_CoercingProperty):
"""A property whose values are Category instances."""
data_type = Category
class LinkProperty(_CoercingProperty):
"""A property whose values are Link instances."""
def validate(self, value):
value = super(LinkProperty, self).validate(value)
if value is not None:
scheme, netloc, path, query, fragment = urlparse.urlsplit(value)
if not scheme or not netloc:
raise BadValueError('Property %s must be a full URL (\'%s\')' %
(self.name, value))
return value
data_type = Link
URLProperty = LinkProperty
class EmailProperty(_CoercingProperty):
"""A property whose values are Email instances."""
data_type = Email
class GeoPtProperty(_CoercingProperty):
"""A property whose values are GeoPt instances."""
data_type = GeoPt
class IMProperty(_CoercingProperty):
"""A property whose values are IM instances."""
data_type = IM
class PhoneNumberProperty(_CoercingProperty):
"""A property whose values are PhoneNumber instances."""
data_type = PhoneNumber
class PostalAddressProperty(_CoercingProperty):
"""A property whose values are PostalAddress instances."""
data_type = PostalAddress
class BlobProperty(UnindexedProperty):
"""A byte string that can be longer than 500 bytes."""
data_type = Blob
class ByteStringProperty(Property):
"""A short (<=500 bytes) byte string.
This type should be used for short binary values that need to be indexed. If
you do not require indexing (regardless of length), use BlobProperty instead.
"""
def validate(self, value):
"""Validate ByteString property.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'ByteString'.
"""
if value is not None and not isinstance(value, ByteString):
try:
value = ByteString(value)
except TypeError, err:
raise BadValueError('Property %s must be convertible '
'to a ByteString instance (%s)' % (self.name, err))
value = super(ByteStringProperty, self).validate(value)
if value is not None and not isinstance(value, ByteString):
raise BadValueError('Property %s must be a ByteString instance'
% self.name)
return value
data_type = ByteString
class DateTimeProperty(Property):
"""The base class of all of our date/time properties.
We handle common operations, like converting between time tuples and
datetime instances.
"""
def __init__(self, verbose_name=None, auto_now=False, auto_now_add=False,
**kwds):
"""Construct a DateTimeProperty
Args:
verbose_name: Verbose name is always first parameter.
auto_now: Date/time property is updated with the current time every time
it is saved to the datastore. Useful for properties that want to track
the modification time of an instance.
auto_now_add: Date/time is set to the when its instance is created.
Useful for properties that record the creation time of an entity.
"""
super(DateTimeProperty, self).__init__(verbose_name, **kwds)
self.auto_now = auto_now
self.auto_now_add = auto_now_add
def validate(self, value):
"""Validate datetime.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'datetime'.
"""
value = super(DateTimeProperty, self).validate(value)
if value and not isinstance(value, self.data_type):
raise BadValueError('Property %s must be a %s' %
(self.name, self.data_type.__name__))
return value
def default_value(self):
"""Default value for datetime.
Returns:
value of now() as appropriate to the date-time instance if auto_now
or auto_now_add is set, else user configured default value implementation.
"""
if self.auto_now or self.auto_now_add:
return self.now()
return Property.default_value(self)
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
Returns:
now() as appropriate to the date-time instance in the odd case where
auto_now is set to True, else the default implementation.
"""
if self.auto_now:
return self.now()
else:
return super(DateTimeProperty,
self).get_value_for_datastore(model_instance)
data_type = datetime.datetime
@staticmethod
def now():
"""Get now as a full datetime value.
Returns:
'now' as a whole timestamp, including both time and date.
"""
return datetime.datetime.now()
def _date_to_datetime(value):
"""Convert a date to a datetime for datastore storage.
Args:
value: A datetime.date object.
Returns:
A datetime object with time set to 0:00.
"""
assert isinstance(value, datetime.date)
return datetime.datetime(value.year, value.month, value.day)
def _time_to_datetime(value):
"""Convert a time to a datetime for datastore storage.
Args:
value: A datetime.time object.
Returns:
A datetime object with date set to 1970-01-01.
"""
assert isinstance(value, datetime.time)
return datetime.datetime(1970, 1, 1,
value.hour, value.minute, value.second,
value.microsecond)
class DateProperty(DateTimeProperty):
"""A date property, which stores a date without a time."""
@staticmethod
def now():
"""Get now as a date datetime value.
Returns:
'date' part of 'now' only.
"""
return datetime.datetime.now().date()
def validate(self, value):
"""Validate date.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'date',
or if it is an instance of 'datetime' (which is a subclass
of 'date', but for all practical purposes a different type).
"""
value = super(DateProperty, self).validate(value)
if isinstance(value, datetime.datetime):
raise BadValueError('Property %s must be a %s, not a datetime' %
(self.name, self.data_type.__name__))
return value
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
We retrieve a datetime.date from the model instance and return a
datetime.datetime instance with the time set to zero.
See base class method documentation for details.
"""
value = super(DateProperty, self).get_value_for_datastore(model_instance)
if value is not None:
assert isinstance(value, datetime.date)
value = _date_to_datetime(value)
return value
def make_value_from_datastore(self, value):
"""Native representation of this property.
We receive a datetime.datetime retrieved from the entity and return
a datetime.date instance representing its date portion.
See base class method documentation for details.
"""
if value is not None:
assert isinstance(value, datetime.datetime)
value = value.date()
return value
data_type = datetime.date
class TimeProperty(DateTimeProperty):
"""A time property, which stores a time without a date."""
@staticmethod
def now():
"""Get now as a time datetime value.
Returns:
'time' part of 'now' only.
"""
return datetime.datetime.now().time()
def empty(self, value):
"""Is time property empty.
"0:0" (midnight) is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
We retrieve a datetime.time from the model instance and return a
datetime.datetime instance with the date set to 1/1/1970.
See base class method documentation for details.
"""
value = super(TimeProperty, self).get_value_for_datastore(model_instance)
if value is not None:
assert isinstance(value, datetime.time), repr(value)
value = _time_to_datetime(value)
return value
def make_value_from_datastore(self, value):
"""Native representation of this property.
We receive a datetime.datetime retrieved from the entity and return
a datetime.date instance representing its time portion.
See base class method documentation for details.
"""
if value is not None:
assert isinstance(value, datetime.datetime)
value = value.time()
return value
data_type = datetime.time
class IntegerProperty(Property):
"""An integer property."""
def validate(self, value):
"""Validate integer property.
Returns:
A valid value.
Raises:
BadValueError if value is not an integer or long instance.
"""
value = super(IntegerProperty, self).validate(value)
if value is None:
return value
if not isinstance(value, (int, long)) or isinstance(value, bool):
raise BadValueError('Property %s must be an int or long, not a %s'
% (self.name, type(value).__name__))
if value < -0x8000000000000000 or value > 0x7fffffffffffffff:
raise BadValueError('Property %s must fit in 64 bits' % self.name)
return value
data_type = int
def empty(self, value):
"""Is integer property empty.
0 is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
class RatingProperty(_CoercingProperty, IntegerProperty):
"""A property whose values are Rating instances."""
data_type = Rating
class FloatProperty(Property):
"""A float property."""
def validate(self, value):
"""Validate float.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'float'.
"""
value = super(FloatProperty, self).validate(value)
if value is not None and not isinstance(value, float):
raise BadValueError('Property %s must be a float' % self.name)
return value
data_type = float
def empty(self, value):
"""Is float property empty.
0.0 is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
class BooleanProperty(Property):
"""A boolean property."""
def validate(self, value):
"""Validate boolean.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'bool'.
"""
value = super(BooleanProperty, self).validate(value)
if value is not None and not isinstance(value, bool):
raise BadValueError('Property %s must be a bool' % self.name)
return value
data_type = bool
def empty(self, value):
"""Is boolean property empty.
False is not an empty value.
Returns:
True if value is None, else False.
"""
return value is None
class UserProperty(Property):
"""A user property."""
def __init__(self,
verbose_name=None,
name=None,
required=False,
validator=None,
choices=None,
auto_current_user=False,
auto_current_user_add=False,
indexed=True):
"""Initializes this Property with the given options.
Note: this does *not* support the 'default' keyword argument.
Use auto_current_user_add=True instead.
Args:
verbose_name: User friendly name of property.
name: Storage name for property. By default, uses attribute name
as it is assigned in the Model sub-class.
required: Whether property is required.
validator: User provided method used for validation.
choices: User provided set of valid property values.
auto_current_user: If true, the value is set to the current user
each time the entity is written to the datastore.
auto_current_user_add: If true, the value is set to the current user
the first time the entity is written to the datastore.
indexed: Whether property is indexed.
"""
super(UserProperty, self).__init__(verbose_name, name,
required=required,
validator=validator,
choices=choices,
indexed=indexed)
self.auto_current_user = auto_current_user
self.auto_current_user_add = auto_current_user_add
def validate(self, value):
"""Validate user.
Returns:
A valid value.
Raises:
BadValueError if property is not instance of 'User'.
"""
value = super(UserProperty, self).validate(value)
if value is not None and not isinstance(value, users.User):
raise BadValueError('Property %s must be a User' % self.name)
return value
def default_value(self):
"""Default value for user.
Returns:
Value of users.get_current_user() if auto_current_user or
auto_current_user_add is set; else None. (But *not* the default
implementation, since we don't support the 'default' keyword
argument.)
"""
if self.auto_current_user or self.auto_current_user_add:
return users.get_current_user()
return None
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
Returns:
Value of users.get_current_user() if auto_current_user is set;
else the default implementation.
"""
if self.auto_current_user:
return users.get_current_user()
return super(UserProperty, self).get_value_for_datastore(model_instance)
data_type = users.User
class ListProperty(Property):
"""A property that stores a list of things.
This is a parameterized property; the parameter must be a valid
non-list data type, and all items must conform to this type.
"""
def __init__(self, item_type, verbose_name=None, default=None, **kwds):
"""Construct ListProperty.
Args:
item_type: Type for the list items; must be one of the allowed property
types.
verbose_name: Optional verbose name.
default: Optional default value; if omitted, an empty list is used.
**kwds: Optional additional keyword arguments, passed to base class.
Note that the only permissible value for 'required' is True.
"""
if item_type is str:
item_type = basestring
if not isinstance(item_type, type):
raise TypeError('Item type should be a type object')
if item_type not in _ALLOWED_PROPERTY_TYPES:
raise ValueError('Item type %s is not acceptable' % item_type.__name__)
if issubclass(item_type, (Blob, Text)):
self._require_parameter(kwds, 'indexed', False)
kwds['indexed'] = True
self._require_parameter(kwds, 'required', True)
if default is None:
default = []
self.item_type = item_type
super(ListProperty, self).__init__(verbose_name,
default=default,
**kwds)
def validate(self, value):
"""Validate list.
Returns:
A valid value.
Raises:
BadValueError if property is not a list whose items are instances of
the item_type given to the constructor.
"""
value = super(ListProperty, self).validate(value)
if value is not None:
if not isinstance(value, list):
raise BadValueError('Property %s must be a list' % self.name)
value = self.validate_list_contents(value)
return value
def validate_list_contents(self, value):
"""Validates that all items in the list are of the correct type.
Returns:
The validated list.
Raises:
BadValueError if the list has items are not instances of the
item_type given to the constructor.
"""
if self.item_type in (int, long):
item_type = (int, long)
else:
item_type = self.item_type
for item in value:
if not isinstance(item, item_type):
if item_type == (int, long):
raise BadValueError('Items in the %s list must all be integers.' %
self.name)
else:
raise BadValueError(
'Items in the %s list must all be %s instances' %
(self.name, self.item_type.__name__))
return value
def empty(self, value):
"""Is list property empty.
[] is not an empty value.
Returns:
True if value is None, else false.
"""
return value is None
data_type = list
def default_value(self):
"""Default value for list.
Because the property supplied to 'default' is a static value,
that value must be shallow copied to prevent all fields with
default values from sharing the same instance.
Returns:
Copy of the default value.
"""
return list(super(ListProperty, self).default_value())
def get_value_for_datastore(self, model_instance):
"""Get value from property to send to datastore.
Returns:
validated list appropriate to save in the datastore.
"""
value = self.validate_list_contents(
super(ListProperty, self).get_value_for_datastore(model_instance))
if self.validator:
self.validator(value)
return value
class StringListProperty(ListProperty):
"""A property that stores a list of strings.
A shorthand for the most common type of ListProperty.
"""
def __init__(self, verbose_name=None, default=None, **kwds):
"""Construct StringListProperty.
Args:
verbose_name: Optional verbose name.
default: Optional default value; if omitted, an empty list is used.
**kwds: Optional additional keyword arguments, passed to ListProperty().
"""
super(StringListProperty, self).__init__(basestring,
verbose_name=verbose_name,
default=default,
**kwds)
class ReferenceProperty(Property):
"""A property that represents a many-to-one reference to another model.
For example, a reference property in model A that refers to model B forms
a many-to-one relationship from A to B: every instance of A refers to a
single B instance, and every B instance can have many A instances refer
to it.
"""
def __init__(self,
reference_class=None,
verbose_name=None,
collection_name=None,
**attrs):
"""Construct ReferenceProperty.
Args:
reference_class: Which model class this property references.
verbose_name: User friendly name of property.
collection_name: If provided, alternate name of collection on
reference_class to store back references. Use this to allow
a Model to have multiple fields which refer to the same class.
"""
super(ReferenceProperty, self).__init__(verbose_name, **attrs)
self.collection_name = collection_name
if reference_class is None:
reference_class = Model
if not ((isinstance(reference_class, type) and
issubclass(reference_class, Model)) or
reference_class is _SELF_REFERENCE):
raise KindError('reference_class must be Model or _SELF_REFERENCE')
self.reference_class = self.data_type = reference_class
def __property_config__(self, model_class, property_name):
"""Loads all of the references that point to this model.
We need to do this to create the ReverseReferenceProperty properties for
this model and create the <reference>_set attributes on the referenced
model, e.g.:
class Story(db.Model):
title = db.StringProperty()
class Comment(db.Model):
story = db.ReferenceProperty(Story)
story = Story.get(id)
print [c for c in story.comment_set]
In this example, the comment_set property was created based on the reference
from Comment to Story (which is inherently one to many).
Args:
model_class: Model class which will have its reference properties
initialized.
property_name: Name of property being configured.
Raises:
DuplicatePropertyError if referenced class already has the provided
collection name as a property.
"""
super(ReferenceProperty, self).__property_config__(model_class,
property_name)
if self.reference_class is _SELF_REFERENCE:
self.reference_class = self.data_type = model_class
if self.collection_name is None:
self.collection_name = '%s_set' % (model_class.__name__.lower())
existing_prop = getattr(self.reference_class, self.collection_name, None)
if existing_prop is not None:
if not (isinstance(existing_prop, _ReverseReferenceProperty) and
existing_prop._prop_name == property_name and
existing_prop._model.__name__ == model_class.__name__ and
existing_prop._model.__module__ == model_class.__module__):
raise DuplicatePropertyError('Class %s already has property %s '
% (self.reference_class.__name__,
self.collection_name))
setattr(self.reference_class,
self.collection_name,
_ReverseReferenceProperty(model_class, property_name))
def __get__(self, model_instance, model_class):
"""Get reference object.
This method will fetch unresolved entities from the datastore if
they are not already loaded.
Returns:
ReferenceProperty to Model object if property is set, else None.
"""
if model_instance is None:
return self
if hasattr(model_instance, self.__id_attr_name()):
reference_id = getattr(model_instance, self.__id_attr_name())
else:
reference_id = None
if reference_id is not None:
resolved = getattr(model_instance, self.__resolved_attr_name())
if resolved is not None:
return resolved
else:
instance = get(reference_id)
if instance is None:
raise Error('ReferenceProperty failed to be resolved')
setattr(model_instance, self.__resolved_attr_name(), instance)
return instance
else:
return None
def __set__(self, model_instance, value):
"""Set reference."""
value = self.validate(value)
if value is not None:
if isinstance(value, datastore.Key):
setattr(model_instance, self.__id_attr_name(), value)
setattr(model_instance, self.__resolved_attr_name(), None)
else:
setattr(model_instance, self.__id_attr_name(), value.key())
setattr(model_instance, self.__resolved_attr_name(), value)
else:
setattr(model_instance, self.__id_attr_name(), None)
setattr(model_instance, self.__resolved_attr_name(), None)
def get_value_for_datastore(self, model_instance):
"""Get key of reference rather than reference itself."""
return getattr(model_instance, self.__id_attr_name())
def validate(self, value):
"""Validate reference.
Returns:
A valid value.
Raises:
BadValueError for the following reasons:
- Value is not saved.
- Object not of correct model type for reference.
"""
if isinstance(value, datastore.Key):
return value
if value is not None and not value.has_key():
raise BadValueError(
'%s instance must have a complete key before it can be stored as a '
'reference' % self.reference_class.kind())
value = super(ReferenceProperty, self).validate(value)
if value is not None and not isinstance(value, self.reference_class):
raise KindError('Property %s must be an instance of %s' %
(self.name, self.reference_class.kind()))
return value
def __id_attr_name(self):
"""Get attribute of referenced id.
Returns:
Attribute where to store id of referenced entity.
"""
return self._attr_name()
def __resolved_attr_name(self):
"""Get attribute of resolved attribute.
The resolved attribute is where the actual loaded reference instance is
stored on the referring model instance.
Returns:
Attribute name of where to store resolved reference model instance.
"""
return '_RESOLVED' + self._attr_name()
Reference = ReferenceProperty
def SelfReferenceProperty(verbose_name=None, collection_name=None, **attrs):
"""Create a self reference.
Function for declaring a self referencing property on a model.
Example:
class HtmlNode(db.Model):
parent = db.SelfReferenceProperty('Parent', 'children')
Args:
verbose_name: User friendly name of property.
collection_name: Name of collection on model.
Raises:
ConfigurationError if reference_class provided as parameter.
"""
if 'reference_class' in attrs:
raise ConfigurationError(
'Do not provide reference_class to self-reference.')
return ReferenceProperty(_SELF_REFERENCE,
verbose_name,
collection_name,
**attrs)
SelfReference = SelfReferenceProperty
class _ReverseReferenceProperty(Property):
"""The inverse of the Reference property above.
We construct reverse references automatically for the model to which
the Reference property is pointing to create the one-to-many property for
that model. For example, if you put a Reference property in model A that
refers to model B, we automatically create a _ReverseReference property in
B called a_set that can fetch all of the model A instances that refer to
that instance of model B.
"""
def __init__(self, model, prop):
"""Constructor for reverse reference.
Constructor does not take standard values of other property types.
Args:
model: Model class that this property is a collection of.
property: Name of foreign property on referred model that points back
to this properties entity.
"""
self.__model = model
self.__property = prop
@property
def _model(self):
"""Internal helper to access the model class, read-only."""
return self.__model
@property
def _prop_name(self):
"""Internal helper to access the property name, read-only."""
return self.__property
def __get__(self, model_instance, model_class):
"""Fetches collection of model instances of this collection property."""
if model_instance is not None:
query = Query(self.__model)
return query.filter(self.__property + ' =', model_instance.key())
else:
return self
def __set__(self, model_instance, value):
"""Not possible to set a new collection."""
raise BadValueError('Virtual property is read-only')
class ComputedProperty(Property):
"""Property used for creating properties derived from other values.
Certain attributes should never be set by users but automatically
calculated at run-time from other values of the same entity. These
values are implemented as persistent properties because they provide
useful search keys.
A computed property behaves the same as normal properties except that
you may not set values on them. Attempting to do so raises
db.DerivedPropertyError which db.Model knows to ignore during entity
loading time. Whenever getattr is used for the property
the value is recaclulated. This happens when the model calls
get_value_for_datastore on the property.
Example:
import string
class Person(Model):
name = StringProperty(required=True)
@db.ComputedProperty
def lower_case_name(self):
return self.name.lower()
# Find all people regardless of case used in name.
Person.gql('WHERE lower_case_name=:1' % name_to_search_for.lower())
"""
def __init__(self, value_function, indexed=True):
"""Constructor.
Args:
value_function: Callable f(model_instance) -> value used to derive
persistent property value for storage in datastore.
indexed: Whether or not the attribute should be indexed.
"""
super(ComputedProperty, self).__init__(indexed=indexed)
self.__value_function = value_function
def __set__(self, *args):
"""Disallow setting this value.
Raises:
DerivedPropertyError when developer attempts to set attribute manually.
Model knows to ignore this exception when getting from datastore.
"""
raise DerivedPropertyError(
'Computed property %s cannot be set.' % self.name)
def __get__(self, model_instance, model_class):
"""Derive property value.
Args:
model_instance: Instance to derive property for in bound method case,
else None.
model_class: Model class associated with this property descriptor.
Returns:
Result of calling self.__value_funcion as provided by property
constructor.
"""
if model_instance is None:
return self
return self.__value_function(model_instance)
run_in_transaction = datastore.RunInTransaction
run_in_transaction_custom_retries = datastore.RunInTransactionCustomRetries
RunInTransaction = run_in_transaction
RunInTransactionCustomRetries = run_in_transaction_custom_retries
websafe_encode_cursor = datastore_query.Cursor.to_websafe_string
websafe_decode_cursor = datastore_query.Cursor.from_websafe_string
is_in_transaction = datastore.IsInTransaction
create_config = datastore.CreateConfig
|
{
"content_hash": "9ebe1fc0257afb64b65954b553e4a0f5",
"timestamp": "",
"source": "github",
"line_count": 3403,
"max_line_length": 80,
"avg_line_length": 32.66588304437261,
"alnum_prop": 0.6665317284683615,
"repo_name": "SRabbelier/Melange",
"id": "9bec50a4bb5ca6689ad117b6fd4f2b567ca5a5a4",
"size": "111764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thirdparty/google_appengine/google/appengine/ext/db/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "400472"
},
{
"name": "C++",
"bytes": "20"
},
{
"name": "Java",
"bytes": "1496"
},
{
"name": "JavaScript",
"bytes": "1623582"
},
{
"name": "PHP",
"bytes": "1032"
},
{
"name": "Perl",
"bytes": "177565"
},
{
"name": "Python",
"bytes": "15317793"
},
{
"name": "Ruby",
"bytes": "59"
},
{
"name": "Shell",
"bytes": "15303"
}
],
"symlink_target": ""
}
|
from oslo_log import log as logging
from oslo_middleware import base
from oslo_utils import strutils
import webob
import webob.exc as ex
from sahara.i18n import _
from sahara.i18n import _LW
LOG = logging.getLogger(__name__)
class AuthValidator(base.Middleware):
"""Handles token auth results and tenants."""
@webob.dec.wsgify
def __call__(self, req):
"""Ensures that tenants in url and token are equal.
Handle incoming request by checking tenant info prom the headers and
url ({tenant_id} url attribute).
Pass request downstream on success.
Reject request if tenant_id from headers not equals to tenant_id from
url.
"""
token_tenant = req.environ.get("HTTP_X_TENANT_ID")
if not token_tenant:
LOG.warning(_LW("Can't get tenant_id from env"))
raise ex.HTTPServiceUnavailable()
path = req.environ['PATH_INFO']
if path != '/':
try:
version, url_tenant, rest = strutils.split_path(path, 3, 3,
True)
except ValueError:
LOG.warning(_LW("Incorrect path: {path}").format(path=path))
raise ex.HTTPNotFound(_("Incorrect path"))
if token_tenant != url_tenant:
LOG.debug("Unauthorized: token tenant != requested tenant")
raise ex.HTTPUnauthorized(
_('Token tenant != requested tenant'))
return self.application
class AuthValidatorV2(base.Middleware):
"""Handles token auth results and tenants."""
@webob.dec.wsgify
def __call__(self, req):
"""Ensures that the requested and token tenants match
Handle incoming requests by checking tenant info from the
headers and url ({tenant_id} url attribute), if using v1 or v1.1
APIs. If using the v2 API, this function will check the token
tenant and the requested tenant in the headers.
Pass request downstream on success.
Reject request if tenant_id from headers is not equal to the
tenant_id from url or v2 project header.
"""
path = req.environ['PATH_INFO']
if path != '/':
token_tenant = req.environ.get("HTTP_X_TENANT_ID")
if not token_tenant:
LOG.warning(_LW("Can't get tenant_id from env"))
raise ex.HTTPServiceUnavailable()
try:
if path.startswith('/v2'):
version, rest = strutils.split_path(path, 2, 2, True)
requested_tenant = req.headers.get('OpenStack-Project-ID')
else:
version, requested_tenant, rest = strutils.split_path(
path, 3, 3, True)
except ValueError:
LOG.warning(_LW("Incorrect path: {path}").format(path=path))
raise ex.HTTPNotFound(_("Incorrect path"))
if token_tenant != requested_tenant:
LOG.debug("Unauthorized: token tenant != requested tenant")
raise ex.HTTPUnauthorized(
_('Token tenant != requested tenant'))
return self.application
|
{
"content_hash": "d3fc6f91f0d1a49c98ae23162fda666f",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 36.46067415730337,
"alnum_prop": 0.5784283513097073,
"repo_name": "tellesnobrega/sahara",
"id": "5f83fc7383e7259b926642e34e90a9e638b7c1ed",
"size": "3828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sahara/api/middleware/auth_valid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "952"
},
{
"name": "Python",
"bytes": "3354711"
},
{
"name": "Shell",
"bytes": "56856"
}
],
"symlink_target": ""
}
|
from pants.base.build_environment import pants_version
from pants.version import VERSION as _VERSION
def test_version() -> None:
assert pants_version() == _VERSION
|
{
"content_hash": "28b5f36331fb4d0707bcf4ac06579bc1",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 54,
"avg_line_length": 28,
"alnum_prop": 0.7619047619047619,
"repo_name": "jsirois/pants",
"id": "e2bf75378f3d1e1454b93afa49ed91ce6bccb4d4",
"size": "300",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "testprojects/pants-plugins/tests/python/test_pants_plugin/test_pants_plugin_pants_requirement.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "6008"
},
{
"name": "Mustache",
"bytes": "1798"
},
{
"name": "Python",
"bytes": "2837069"
},
{
"name": "Rust",
"bytes": "1241058"
},
{
"name": "Shell",
"bytes": "57720"
},
{
"name": "Starlark",
"bytes": "27937"
}
],
"symlink_target": ""
}
|
from lldbsuite.test import lldbinline
from lldbsuite.test import decorators
lldbinline.MakeInlineTest(
__file__, globals(), [
decorators.expectedFailureAll(
oslist=["windows"], bugnumber="llvm.org/pr24764")])
|
{
"content_hash": "5706ec5dc13565b42cbef1472fbde351",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 63,
"avg_line_length": 33.42857142857143,
"alnum_prop": 0.7051282051282052,
"repo_name": "youtube/cobalt",
"id": "af362f5be5d7708c63b0e447eb7ccf0b8b7297ab",
"size": "234",
"binary": false,
"copies": "24",
"ref": "refs/heads/master",
"path": "third_party/llvm-project/lldb/packages/Python/lldbsuite/test/lang/cpp/namespace_conflicts/TestNamespaceConflicts.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""This code example creates new line item creative associations (LICAs) for an
existing line item and a set of creative ids.
To determine which LICAs exist, run get_all_licas.py."""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate modules from the client library.
from googleads import dfp
# Set the line item ID and creative IDs to associate.
LINE_ITEM_ID = 'INSERT_LINE_ITEM_ID_HERE'
CREATIVE_IDS = ['INSERT_CREATIVE_IDS_HERE']
def main(client, line_item_id, creative_ids):
# Initialize appropriate service.
lica_service = client.GetService(
'LineItemCreativeAssociationService', version='v201403')
licas = []
for creative_id in creative_ids:
licas.append({'creativeId': creative_id,
'lineItemId': line_item_id})
# Create the LICAs remotely.
licas = lica_service.createLineItemCreativeAssociations(licas)
# Display results.
if licas:
for lica in licas:
print ('LICA with line item id \'%s\', creative id \'%s\', and '
'status \'%s\' was created.' %
(lica['lineItemId'], lica['creativeId'], lica['status']))
else:
print 'No LICAs created.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, LINE_ITEM_ID, CREATIVE_IDS)
|
{
"content_hash": "292df4e35997c1654ecd0eaa9efaae85",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 31.523809523809526,
"alnum_prop": 0.6722054380664653,
"repo_name": "dietrichc/streamline-ppc-reports",
"id": "a97611baed1cbc6e8cb42301b1bc0b3252fb762a",
"size": "1942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/dfp/v201403/line_item_creative_association_service/create_licas.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "492"
},
{
"name": "JavaScript",
"bytes": "504"
},
{
"name": "Python",
"bytes": "2235969"
}
],
"symlink_target": ""
}
|
"""
This script is called by update_route.sh. It is to set a default route for route tables to use the first online NAT instancein an OpsWorks stack.
Route tables should store the stack id and the layer short name that contain NAT instances in the NATStackId and NATLAyer tags respectively.
"""
import argparse
import logging
import boto
def getParameter(paramName, isRequired, isList, args):
if paramName in args.__dict__ and args.__dict__[paramName]:
param = args.__dict__[paramName].split(',') if isList else args.__dict__[paramName]
else:
param = None
if isRequired and param == None:
while True:
str = raw_input('Please enter %s: ' % paramName).strip()
if len(str) != 0: break
param = str.split(',') if isList else str
return param
parser = argparse.ArgumentParser(description='Create a default route to use a NAT instance.')
parser.add_argument('-i', '--instance_id', dest='instance_id', help='NAT instance Id', required=True)
parser.add_argument('-r', '--region', dest='region', default='us-east-1', help='Region name', required=False)
parser.add_argument('-s', '--route_id', dest='route_id', help='Routetable Id for NAT instance', required=True)
parser.add_argument('-v', '--verbose', help='increase output verbosity', action='store_true')
args = parser.parse_args()
level = logging.DEBUG if args.verbose else logging.INFO
logger = logging.getLogger()
logger.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
logger.addHandler(ch)
logger.debug('Command line arguments: %s' % args)
instance_id = getParameter('instance_id', False, False, args)
profile = getParameter('profile', False, False, args)
region = getParameter('region', False, False, args)
param_route_id = getParameter('route_id', False, False, args)
logger.debug('instance_id: %s' % instance_id)
logger.debug('profile: %s' % profile)
logger.debug('region: %s' % region)
logger.debug('route_id: %s' % param_route_id)
try:
boto.connect_vpc(profile_name=profile)
vpc = boto.vpc.connect_to_region(region)
if ',' not in param_route_id:
route_ids = param_route_id
else:
route_ids = param_route_id.split(',')
for route_id in route_ids:
# Find route tables that are supposed to use this NAT instance
# Route tables should be tagged with NATStackId and NATLayer set to
# this NAT stack id and this NAT layer short name respectively.
route_table = vpc.get_all_route_tables(route_table_ids=route_id)
logger.debug('route_tables: %s' % route_table)
# Go through route tables that are supposed to consume this NAT instance
logger.debug('route_table {0}'.format(route_table))
# Go through route entries until a defact route is detected
for route in route_table.routes:
target = route.destination_cidr_block
logger.debug('target {0}'.format(target))
if target == "0.0.0.0/0":
logger.debug('instance_id {0}'.format(route.instance_id))
# Route entry already exists for this NAT instance
if instance_id == route.instance_id:
create_route = False
# Route entry exists but it has a different NAT instance
# Get rid of it
else:
vpc.delete_route(route_table.id, "0.0.0.0/0")
break
# Create route to use this NAT instance
if create_route:
vpc.create_route(route_table.id, "0.0.0.0/0", instance_id=instance_id)
print('Default route is set for {0} to use {1}'.format(route_table.id, instance_id))
except Exception as e:
logger.info(e.message)
|
{
"content_hash": "e60ba0f5cd4444d84b94902009a4108d",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 146,
"avg_line_length": 39.723404255319146,
"alnum_prop": 0.6537225495447242,
"repo_name": "cloudtp/Cloud-Portal",
"id": "1f74e356cd53131570c764efcaaecc69b6ba3801",
"size": "3753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devops/scripts/ubuntu-NAT/update_route.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "906510"
},
{
"name": "HTML",
"bytes": "1896101"
},
{
"name": "JavaScript",
"bytes": "3727006"
},
{
"name": "PHP",
"bytes": "1684"
},
{
"name": "Python",
"bytes": "74927"
},
{
"name": "Ruby",
"bytes": "981"
},
{
"name": "Shell",
"bytes": "1314"
}
],
"symlink_target": ""
}
|
from classytags.arguments import Argument, MultiValueArgument
from classytags.core import Options, Tag
from classytags.helpers import InclusionTag
from classytags.parser import Parser
from cms.models import Page, Placeholder as PlaceholderModel
from cms.plugin_rendering import render_plugins, render_placeholder
from cms.plugins.utils import get_plugins
from cms.utils import get_language_from_request
from cms.utils.moderator import get_cmsplugin_queryset, get_page_queryset
from cms.utils.placeholder import validate_placeholder_name
from django import template
from django.conf import settings
from django.contrib.sites.models import Site
from django.core.cache import cache
from django.core.mail import mail_managers
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from itertools import chain
import re
register = template.Library()
def get_site_id(site):
if site:
if isinstance(site, Site):
site_id = site.id
elif isinstance(site, int) or (isinstance(site, basestring) and site.isdigit()):
site_id = int(site)
else:
site_id = settings.SITE_ID
else:
site_id = settings.SITE_ID
return site_id
def has_permission(page, request):
return page.has_change_permission(request)
register.filter(has_permission)
CLEAN_KEY_PATTERN = re.compile(r'[^a-zA-Z0-9_-]')
def _clean_key(key):
return CLEAN_KEY_PATTERN.sub('-', key)
def _get_cache_key(name, page_lookup, lang, site_id):
if isinstance(page_lookup, Page):
page_key = str(page_lookup.pk)
else:
page_key = str(page_lookup)
page_key = _clean_key(page_key)
return name+'__page_lookup:'+page_key+'_site:'+str(site_id)+'_lang:'+str(lang)
def _get_page_by_untyped_arg(page_lookup, request, site_id):
"""
The `page_lookup` argument can be of any of the following types:
- Integer: interpreted as `pk` of the desired page
- String: interpreted as `reverse_id` of the desired page
- `dict`: a dictionary containing keyword arguments to find the desired page
(for instance: `{'pk': 1}`)
- `Page`: you can also pass a Page object directly, in which case there will be no database lookup.
- `None`: the current page will be used
"""
if page_lookup is None:
return request.current_page
if isinstance(page_lookup, Page):
return page_lookup
if isinstance(page_lookup, basestring):
page_lookup = {'reverse_id': page_lookup}
elif isinstance(page_lookup, (int, long)):
page_lookup = {'pk': page_lookup}
elif not isinstance(page_lookup, dict):
raise TypeError('The page_lookup argument can be either a Dictionary, Integer, Page, or String.')
page_lookup.update({'site': site_id})
try:
return get_page_queryset(request).get(**page_lookup)
except Page.DoesNotExist:
site = Site.objects.get_current()
subject = _('Page not found on %(domain)s') % {'domain':site.domain}
body = _("A template tag couldn't find the page with lookup arguments `%(page_lookup)s\n`. "
"The URL of the request was: http://%(host)s%(path)s") \
% {'page_lookup': repr(page_lookup), 'host': site.domain, 'path': request.path}
if settings.DEBUG:
raise Page.DoesNotExist(body)
else:
if settings.SEND_BROKEN_LINK_EMAILS:
mail_managers(subject, body, fail_silently=True)
return None
class PageUrl(InclusionTag):
template = 'cms/content.html'
name = 'page_url'
options = Options(
Argument('page_lookup'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, context, page_lookup, lang, site):
site_id = get_site_id(site)
request = context.get('request', False)
if not request:
return {'content': ''}
if request.current_page == "dummy":
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
cache_key = _get_cache_key('page_url', page_lookup, lang, site_id)+'_type:absolute_url'
url = cache.get(cache_key)
if not url:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if page:
url = page.get_absolute_url(language=lang)
cache.set(cache_key, url, settings.CMS_CACHE_DURATIONS['content'])
if url:
return {'content': url}
return {'content': ''}
register.tag(PageUrl)
register.tag('page_id_url', PageUrl)
def _get_placeholder(current_page, page, context, name):
placeholder_cache = getattr(current_page, '_tmp_placeholders_cache', {})
if page.pk in placeholder_cache:
return placeholder_cache[page.pk].get(name, None)
placeholder_cache[page.pk] = {}
placeholders = page.placeholders.all()
for placeholder in placeholders:
placeholder_cache[page.pk][placeholder.slot] = placeholder
current_page._tmp_placeholders_cache = placeholder_cache
return placeholder_cache[page.pk].get(name, None)
def get_placeholder_content(context, request, current_page, name, inherit):
pages = [current_page]
if inherit:
pages = chain([current_page], current_page.get_cached_ancestors(ascending=True))
for page in pages:
placeholder = _get_placeholder(current_page, page, context, name)
if placeholder is None:
continue
if not get_plugins(request, placeholder):
continue
content = render_placeholder(placeholder, context, name)
if content:
return content
placeholder = _get_placeholder(current_page, current_page, context, name)
return render_placeholder(placeholder, context, name)
class PlaceholderParser(Parser):
def parse_blocks(self):
for bit in getattr(self.kwargs['extra_bits'], 'value', self.kwargs['extra_bits']):
if getattr(bit, 'value', bit.var.value) == 'or':
return super(PlaceholderParser, self).parse_blocks()
return
class PlaceholderOptions(Options):
def get_parser_class(self):
return PlaceholderParser
class Placeholder(Tag):
"""
This template node is used to output page content and
is also used in the admin to dynamically generate input fields.
eg: {% placeholder "placeholder_name" %}
{% placeholder "sidebar" inherit %}
{% placeholder "footer" inherit or %}
<a href="/about/">About us</a>
{% endplaceholder %}
Keyword arguments:
name -- the name of the placeholder
width -- additional width attribute (integer) which gets added to the plugin context
(deprecated, use `{% with 320 as width %}{% placeholder "foo"}{% endwith %}`)
inherit -- optional argument which if given will result in inheriting
the content of the placeholder with the same name on parent pages
or -- optional argument which if given will make the template tag a block
tag whose content is shown if the placeholder is empty
"""
name = 'placeholder'
options = PlaceholderOptions(
Argument('name', resolve=False),
MultiValueArgument('extra_bits', required=False, resolve=False),
blocks=[
('endplaceholder', 'nodelist'),
]
)
def render_tag(self, context, name, extra_bits, nodelist=None):
validate_placeholder_name(name)
width = None
inherit = False
for bit in extra_bits:
if bit == 'inherit':
inherit = True
elif bit.isdigit():
width = int(bit)
import warnings
warnings.warn(
"The width parameter for the placeholder tag is deprecated.",
DeprecationWarning
)
if not 'request' in context:
return ''
request = context['request']
if width:
context.update({'width': width})
page = request.current_page
if not page or page == 'dummy':
if nodelist:
return nodelist.render(context)
return ''
content = get_placeholder_content(context, request, page, name, inherit)
if not content and nodelist:
return nodelist.render(context)
return content
def get_name(self):
return self.kwargs['name'].var.value.strip('"').strip("'")
register.tag(Placeholder)
class PageAttribute(Tag):
"""
This template node is used to output attribute from a page such
as its title or slug.
Synopsis
{% page_attribute "field-name" %}
{% page_attribute "field-name" page_lookup %}
Example
{# Output current page's page_title attribute: #}
{% page_attribute "page_title" %}
{# Output page_title attribute of the page with reverse_id "the_page": #}
{% page_attribute "page_title" "the_page" %}
{# Output slug attribute of the page with pk 10: #}
{% page_attribute "slug" 10 %}
Keyword arguments:
field-name -- the name of the field to output. Use one of:
- title
- menu_title
- page_title
- slug
- meta_description
- meta_keywords
page_lookup -- lookup argument for Page, if omitted field-name of current page is returned.
See _get_page_by_untyped_arg() for detailed information on the allowed types and their interpretation
for the page_lookup argument.
"""
name = 'page_attribute'
options = Options(
Argument('name', resolve=False),
Argument('page_lookup', required=False, default=None)
)
valid_attributes = [
"title",
"slug",
"meta_description",
"meta_keywords",
"page_title",
"menu_title"
]
def render_tag(self, context, name, page_lookup):
if not 'request' in context:
return ''
name = name.lower()
request = context['request']
lang = get_language_from_request(request)
page = _get_page_by_untyped_arg(page_lookup, request, get_site_id(None))
if page == "dummy":
return ''
if page and name in self.valid_attributes:
f = getattr(page, "get_%s" % name)
return f(language=lang, fallback=True)
return ''
register.tag(PageAttribute)
class CleanAdminListFilter(InclusionTag):
template = 'admin/filter.html'
name = 'clean_admin_list_filter'
options = Options(
Argument('cl'),
Argument('spec'),
)
def get_context(self, context, cl, spec):
choices = sorted(list(spec.choices(cl)), key=lambda k: k['query_string'])
query_string = None
unique_choices = []
for choice in choices:
if choice['query_string'] != query_string:
unique_choices.append(choice)
query_string = choice['query_string']
return {'title': spec.title(), 'choices' : unique_choices}
def _show_placeholder_for_page(context, placeholder_name, page_lookup, lang=None,
site=None, cache_result=True):
"""
Shows the content of a page with a placeholder name and given lookup
arguments in the given language.
This is useful if you want to have some more or less static content that is
shared among many pages, such as a footer.
See _get_page_by_untyped_arg() for detailed information on the allowed types
and their interpretation for the page_lookup argument.
"""
validate_placeholder_name(placeholder_name)
request = context.get('request', False)
site_id = get_site_id(site)
if not request:
return {'content': ''}
if lang is None:
lang = get_language_from_request(request)
content = None
if cache_result:
base_key = _get_cache_key('_show_placeholder_for_page', page_lookup, lang, site_id)
cache_key = _clean_key('%s_placeholder:%s' % (base_key, placeholder_name))
content = cache.get(cache_key)
if not content:
page = _get_page_by_untyped_arg(page_lookup, request, site_id)
if not page:
return {'content': ''}
try:
placeholder = page.placeholders.get(slot=placeholder_name)
except PlaceholderModel.DoesNotExist:
if settings.DEBUG:
raise
return {'content': ''}
baseqs = get_cmsplugin_queryset(request)
plugins = baseqs.filter(
placeholder=placeholder,
language=lang,
placeholder__slot__iexact=placeholder_name,
parent__isnull=True
).order_by('position').select_related()
c = render_plugins(plugins, context, placeholder)
content = "".join(c)
if cache_result:
cache.set(cache_key, content, settings.CMS_CACHE_DURATIONS['content'])
if content:
return {'content': mark_safe(content)}
return {'content': ''}
class ShowPlaceholderById(InclusionTag):
template = 'cms/content.html'
name = 'show_placeholder_by_id'
options = Options(
Argument('placeholder_name'),
Argument('reverse_id'),
Argument('lang', required=False, default=None),
Argument('site', required=False, default=None),
)
def get_context(self, *args, **kwargs):
return _show_placeholder_for_page(**self.get_kwargs(*args, **kwargs))
def get_kwargs(self, context, placeholder_name, reverse_id, lang, site):
return {
'context': context,
'placeholder_name': placeholder_name,
'page_lookup': reverse_id,
'lang': lang,
'site': site
}
register.tag(ShowPlaceholderById)
register.tag('show_placeholder', ShowPlaceholderById)
class ShowUncachedPlaceholderById(ShowPlaceholderById):
name = 'show_uncached_placeholder_by_id'
def get_kwargs(self, *args, **kwargs):
kwargs = super(ShowUncachedPlaceholderById, self).get_kwargs(*args, **kwargs)
kwargs['cache_result'] = False
return kwargs
register.tag(ShowUncachedPlaceholderById)
register.tag('show_uncached_placeholder', ShowUncachedPlaceholderById)
class CMSToolbar(InclusionTag):
template = 'cms/toolbar/toolbar.html'
name = 'cms_toolbar'
def render(self, context):
request = context.get('request', None)
if not request:
return ''
toolbar = getattr(request, 'toolbar', None)
if not toolbar:
return ''
if not toolbar.show_toolbar:
return ''
return super(CMSToolbar, self).render(context)
def get_context(self, context):
context['CMS_TOOLBAR_CONFIG'] = context['request'].toolbar.as_json(context)
return context
register.tag(CMSToolbar)
|
{
"content_hash": "78a507a8d05e810aaaeaafa121a73948",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 105,
"avg_line_length": 35.28266033254157,
"alnum_prop": 0.6310084825636192,
"repo_name": "vipins/ccccms",
"id": "e05d9a06e3dba000810f7fa836161d1e7bbad0bf",
"size": "14878",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "env/Lib/site-packages/cms/templatetags/cms_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "309380"
},
{
"name": "C++",
"bytes": "136422"
},
{
"name": "CSS",
"bytes": "250114"
},
{
"name": "JavaScript",
"bytes": "626303"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "11038514"
},
{
"name": "Shell",
"bytes": "889"
},
{
"name": "XSLT",
"bytes": "5122"
}
],
"symlink_target": ""
}
|
"""
* Copyright 2012-14 Justin A. Debrabant <debrabant@cs.brown.edu> and Matteo Riondato <matteo@cs.brown.edu>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
"""
import os, sys
def errorExit(msg):
sys.stderr.write(msg)
sys.exit(1)
def main():
if len(sys.argv) != 3:
errorExit("Usage: {} MAXLEN FILE\n".format(os.path.basename(sys.argv[0])))
maxlen = int(sys.argv[1])
fileName = sys.argv[2]
if not os.path.isfile(fileName):
errorExit("{} does not exist, or is not a file\n".format(fileName))
deletedLines = 0
with (open(fileName, 'rt')) as FILE:
for line in FILE:
items = line.split()
if len(items) <= maxlen:
sys.stdout.write(line)
else:
deletedLines += 1
sys.stderr.write("{}: {} lines deleted from {}\n".format(sys.argv[0],
deletedLines, fileName))
return 0
if __name__ == "__main__":
main()
|
{
"content_hash": "404652048541ef275c6487cd9bad14a9",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 107,
"avg_line_length": 32.955555555555556,
"alnum_prop": 0.6271072151045178,
"repo_name": "jdebrabant/parallel_arules",
"id": "bfc6f6b9aa78cf232a269d73fd4ae654ddc2c89a",
"size": "1483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/util/deleteLongLines.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "416965"
},
{
"name": "Matlab",
"bytes": "1675"
},
{
"name": "Python",
"bytes": "10921"
},
{
"name": "Shell",
"bytes": "1876"
}
],
"symlink_target": ""
}
|
__author__ = 'Andrew Hawker <andrew.r.hawker@gmail.com>'
from flask import Blueprint
scores = Blueprint('scores', __name__, url_prefix='/api/scores')
@scores.route('/')
def f():
return 'hello world'
|
{
"content_hash": "6f611e8ff23525a7b657ea3be26607c0",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 64,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.6782178217821783,
"repo_name": "ahawker/jpool",
"id": "43a183d39246254c79aaa620713353f5ac917c61",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "jpool/scores/scores.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "600"
}
],
"symlink_target": ""
}
|
import re
from importlib import import_module
from django.conf import settings
from django.contrib.admin.templatetags.admin_list import (result_headers,
result_hidden_fields,
results)
from django.urls import reverse
from django.template import Library
from .base import AdminReadonlyField, Inline
from .base import Layout, Fieldset, Row
from .compat import simple_tag
register = Library()
CL_VALUE_RE = re.compile('value="(.*)\"')
def get_admin_site():
site_module = getattr(
settings,
'DJADMIN_SITE',
'django.contrib.admin.site'
)
mod, inst = site_module.rsplit('.', 1)
mod = import_module(mod)
return getattr(mod, inst)
site = get_admin_site()
@register.simple_tag
def fieldset_layout(adminform, inline_admin_formsets):
layout = getattr(adminform.model_admin, 'layout', None)
if layout is not None:
for element in layout.elements:
# TODO Ugly hack to substitute inline classes to instances
if isinstance(element, Inline) and isinstance(element.inline, type):
for inline in inline_admin_formsets:
if inline.formset.model == element.inline.model:
element.inline = inline
return layout
sets = []
for fieldset in adminform:
fields = []
for line in fieldset:
line_fields = []
for fieldset_field in line:
field = None
if getattr(fieldset_field, 'is_readonly', False):
field = AdminReadonlyField(fieldset_field)
else:
field = fieldset_field.field.name
line_fields.append(field)
if len(line_fields) == 1:
fields.append(line_fields[0])
else:
fields.append(Row(*line_fields))
if fieldset.name:
sets.append(Fieldset(fieldset.name, *fields))
else:
sets += fields
for inline in inline_admin_formsets:
sets.append(Inline(inline))
return Layout(*sets)
def admin_related_field_urls(bound_field):
from django.contrib.admin.views.main import IS_POPUP_VAR, TO_FIELD_VAR
rel_widget = bound_field.field.widget
rel_opts = rel_widget.rel.model._meta
info = (rel_opts.app_label, rel_opts.model_name)
rel_widget.widget.choices = rel_widget.choices
url_params = '&'.join("%s=%s" % param for param in [
(TO_FIELD_VAR, rel_widget.rel.get_related_field().name),
(IS_POPUP_VAR, 1),
])
context = {
'widget': rel_widget.widget.render(bound_field.name, bound_field.value()),
'name': bound_field.name,
'url_params': url_params,
'model': rel_opts.verbose_name,
}
if rel_widget.can_change_related:
change_related_template_url = rel_widget.get_related_url(info, 'change', '__fk__')
context.update(
can_change_related=True,
change_related_template_url=change_related_template_url,
)
if rel_widget.can_add_related:
add_related_url = rel_widget.get_related_url(info, 'add')
context.update(
can_add_related=True,
add_related_url=add_related_url,
)
if rel_widget.can_delete_related:
delete_related_template_url = rel_widget.get_related_url(info, 'delete', '__fk__')
context.update(
can_delete_related=True,
delete_related_template_url=delete_related_template_url,
)
return context
simple_tag(register, admin_related_field_urls)
def admin_select_related_link(bound_field):
"""
{% admin_select_related_link bound_field as rel_field_urls %}
"""
rel_widget = bound_field.field.widget
rel_to = rel_widget.rel.model
if rel_to in rel_widget.admin_site._registry:
related_url = reverse(
'admin:%s_%s_changelist' % (
rel_to._meta.app_label,
rel_to._meta.model_name,
),
current_app=rel_widget.admin_site.name,
)
params = rel_widget.url_parameters()
if params:
related_url += '?' + '&'.join('%s=%s' % (k, v) for k, v in params.items())
return {'related_url': related_url}
return {}
simple_tag(register, admin_select_related_link)
@register.inclusion_tag("admin/change_list_results.html")
def result_sortable_list(cl):
"""
Displays the headers and data list together
"""
if not cl.params.get('o',None):
# Disable sortable when admin filter data from result table according to fields
from ..models import Sortable
cl.result_list = Sortable.get_sortable_row(cl.opts.model_name, cl.result_list)
headers = list(result_headers(cl))
num_sorted_fields = 0
for h in headers:
if h['sortable'] and h['sorted']:
num_sorted_fields += 1
return {'cl': cl,
'result_hidden_fields': list(result_hidden_fields(cl)),
'result_headers': headers,
'num_sorted_fields': num_sorted_fields,
'results': list(results(cl))}
@register.simple_tag
def list_count(app_label, model_name):
from django.apps import apps
Model = apps.get_model(app_label=app_label, model_name=model_name)
return Model.objects.count()
@register.simple_tag
def dashboard_icon(model_name):
if hasattr(settings, 'DASHBOARD_ICONS'):
icon = settings.DASHBOARD_ICONS.get(model_name)
if icon:
return icon
return "mdi-format-align-center"
|
{
"content_hash": "e9bec724b01f6a5a45e469e232504d28",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 90,
"avg_line_length": 31.620111731843576,
"alnum_prop": 0.5996466431095406,
"repo_name": "sainipray/djadmin",
"id": "4ebab2414883df6f6b987f36ce61f3e5ebb46c77",
"size": "5660",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "djadmin/templatetags/djadmin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52955"
},
{
"name": "HTML",
"bytes": "250399"
},
{
"name": "JavaScript",
"bytes": "1263830"
},
{
"name": "Python",
"bytes": "135708"
}
],
"symlink_target": ""
}
|
from ddt import data, ddt
from django.core.exceptions import ValidationError
from rest_framework import test
from waldur_core.core import validators
from waldur_core.structure.models import Customer
class NameValidationTest(test.APITransactionTestCase):
def test_name_should_have_at_least_one_non_whitespace_character(self):
with self.assertRaises(ValidationError):
customer = Customer(name=' ')
customer.full_clean()
@ddt
class MinCronValueValidatorTest(test.APITransactionTestCase):
@data('*/1 * * * *', '*/10 * * * *', '*/59 * * * *')
def test_validator_raises_validation_error_if_given_schedule_value_is_less_than_1_hours(
self, value
):
validator = validators.MinCronValueValidator(limit_value=1)
with self.assertRaises(ValidationError):
validator(value)
@data('hello world', '* * * * * *', '*/59')
def test_validator_raises_validation_error_if_given_format_is_not_valid(
self, value
):
validator = validators.MinCronValueValidator(limit_value=1)
with self.assertRaises(ValidationError):
validator(value)
@data(
'0 * * * *',
'0 0 * * *',
'0 0 0 * *',
'0 0 * * 0',
'0 0 1 * *',
'0 0 1 1 *',
'0 0 1 1 *',
)
def test_validator_does_not_raise_error_if_schedule_is_greater_than_or_equal_1_hour(
self, value
):
validator = validators.MinCronValueValidator(limit_value=1)
validator(value)
class CIDRListValidatorTest(test.APITransactionTestCase):
def test_validator_accepts_valid_cidr_list(self):
validators.validate_cidr_list('fc00::/7, 127.0.0.1/32')
def test_validator_accepts_empty_list(self):
validators.validate_cidr_list(' ')
def test_invalid_values_are_rejected(self):
with self.assertRaises(ValidationError):
validators.validate_cidr_list('hello/25')
def test_space_separated_list_rejected(self):
with self.assertRaises(ValidationError):
validators.validate_cidr_list('fc00::/7 127.0.0.1/32')
|
{
"content_hash": "36c20ca767358aad26cc3fcbc28773d4",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 92,
"avg_line_length": 33.65079365079365,
"alnum_prop": 0.6391509433962265,
"repo_name": "opennode/waldur-mastermind",
"id": "7c1241adbad7bcb7f9dde2335c90afa116993815",
"size": "2120",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "src/waldur_core/core/tests/test_validators.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4429"
},
{
"name": "Dockerfile",
"bytes": "6258"
},
{
"name": "HTML",
"bytes": "42329"
},
{
"name": "JavaScript",
"bytes": "729"
},
{
"name": "Python",
"bytes": "5520019"
},
{
"name": "Shell",
"bytes": "15429"
}
],
"symlink_target": ""
}
|
"""Utilities and helper functions."""
import collections
import datetime
import decimal
import errno
import functools
import hashlib
import multiprocessing
import netaddr
import os
import random
import signal
import socket
import sys
import tempfile
import uuid
import debtcollector
from eventlet.green import subprocess
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
import six
from stevedore import driver
from neutron.common import constants as n_const
from neutron.i18n import _LE
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOG = logging.getLogger(__name__)
SYNCHRONIZED_PREFIX = 'neutron-'
synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX)
class cache_method_results(object):
"""This decorator is intended for object methods only."""
def __init__(self, func):
self.func = func
functools.update_wrapper(self, func)
self._first_call = True
self._not_cached = object()
def _get_from_cache(self, target_self, *args, **kwargs):
func_name = "%(module)s.%(class)s.%(func_name)s" % {
'module': target_self.__module__,
'class': target_self.__class__.__name__,
'func_name': self.func.__name__,
}
key = (func_name,) + args
if kwargs:
key += dict2tuple(kwargs)
try:
item = target_self._cache.get(key, self._not_cached)
except TypeError:
LOG.debug("Method %(func_name)s cannot be cached due to "
"unhashable parameters: args: %(args)s, kwargs: "
"%(kwargs)s",
{'func_name': func_name,
'args': args,
'kwargs': kwargs})
return self.func(target_self, *args, **kwargs)
if item is self._not_cached:
item = self.func(target_self, *args, **kwargs)
target_self._cache.set(key, item, None)
return item
def __call__(self, target_self, *args, **kwargs):
if not hasattr(target_self, '_cache'):
raise NotImplementedError(
"Instance of class %(module)s.%(class)s must contain _cache "
"attribute" % {
'module': target_self.__module__,
'class': target_self.__class__.__name__})
if not target_self._cache:
if self._first_call:
LOG.debug("Instance of class %(module)s.%(class)s doesn't "
"contain attribute _cache therefore results "
"cannot be cached for %(func_name)s.",
{'module': target_self.__module__,
'class': target_self.__class__.__name__,
'func_name': self.func.__name__})
self._first_call = False
return self.func(target_self, *args, **kwargs)
return self._get_from_cache(target_self, *args, **kwargs)
def __get__(self, obj, objtype):
return functools.partial(self.__call__, obj)
@debtcollector.removals.remove(message="Unused in Liberty release.")
def read_cached_file(filename, cache_info, reload_func=None):
"""Read from a file if it has been modified.
:param cache_info: dictionary to hold opaque cache.
:param reload_func: optional function to be called with data when
file is reloaded due to a modification.
:returns: data from file
"""
mtime = os.path.getmtime(filename)
if not cache_info or mtime != cache_info.get('mtime'):
LOG.debug("Reloading cached file %s", filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
if reload_func:
reload_func(cache_info['data'])
return cache_info['data']
@debtcollector.removals.remove(message="Unused in Liberty release.")
def find_config_file(options, config_file):
"""Return the first config file found.
We search for the paste config file in the following order:
* If --config-file option is used, use that
* Search for the configuration files via common cfg directories
:retval Full path to config file, or None if no config file found
"""
fix_path = lambda p: os.path.abspath(os.path.expanduser(p))
if options.get('config_file'):
if os.path.exists(options['config_file']):
return fix_path(options['config_file'])
dir_to_common = os.path.dirname(os.path.abspath(__file__))
root = os.path.join(dir_to_common, '..', '..', '..', '..')
# Handle standard directory search for the config file
config_file_dirs = [fix_path(os.path.join(os.getcwd(), 'etc')),
fix_path(os.path.join('~', '.neutron-venv', 'etc',
'neutron')),
fix_path('~'),
os.path.join(cfg.CONF.state_path, 'etc'),
os.path.join(cfg.CONF.state_path, 'etc', 'neutron'),
fix_path(os.path.join('~', '.local',
'etc', 'neutron')),
'/usr/etc/neutron',
'/usr/local/etc/neutron',
'/etc/neutron/',
'/etc']
if 'plugin' in options:
config_file_dirs = [
os.path.join(x, 'neutron', 'plugins', options['plugin'])
for x in config_file_dirs
]
if os.path.exists(os.path.join(root, 'plugins')):
plugins = [fix_path(os.path.join(root, 'plugins', p, 'etc'))
for p in os.listdir(os.path.join(root, 'plugins'))]
plugins = [p for p in plugins if os.path.isdir(p)]
config_file_dirs.extend(plugins)
for cfg_dir in config_file_dirs:
cfg_file = os.path.join(cfg_dir, config_file)
if os.path.exists(cfg_file):
return cfg_file
def ensure_dir(dir_path):
"""Ensure a directory with 755 permissions mode."""
try:
os.makedirs(dir_path, 0o755)
except OSError as e:
# If the directory already existed, don't raise the error.
if e.errno != errno.EEXIST:
raise
def _subprocess_setup():
# Python installs a SIGPIPE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
env=None, preexec_fn=_subprocess_setup, close_fds=True):
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, preexec_fn=preexec_fn,
close_fds=close_fds, env=env)
def parse_mappings(mapping_list, unique_values=True):
"""Parse a list of mapping strings into a dictionary.
:param mapping_list: a list of strings of the form '<key>:<value>'
:param unique_values: values must be unique if True
:returns: a dict mapping keys to values
"""
mappings = {}
for mapping in mapping_list:
mapping = mapping.strip()
if not mapping:
continue
split_result = mapping.split(':')
if len(split_result) != 2:
raise ValueError(_("Invalid mapping: '%s'") % mapping)
key = split_result[0].strip()
if not key:
raise ValueError(_("Missing key in mapping: '%s'") % mapping)
value = split_result[1].strip()
if not value:
raise ValueError(_("Missing value in mapping: '%s'") % mapping)
if key in mappings:
raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not "
"unique") % {'key': key, 'mapping': mapping})
if unique_values and value in mappings.values():
raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' "
"not unique") % {'value': value,
'mapping': mapping})
mappings[key] = value
return mappings
def get_hostname():
return socket.gethostname()
def get_first_host_ip(net, ip_version):
return str(netaddr.IPAddress(net.first + 1, ip_version))
def compare_elements(a, b):
"""Compare elements if a and b have same elements.
This method doesn't consider ordering
"""
if a is None:
a = []
if b is None:
b = []
return set(a) == set(b)
def safe_sort_key(value):
"""Return value hash or build one for dictionaries."""
if isinstance(value, collections.Mapping):
return sorted(value.items())
return value
def dict2str(dic):
return ','.join("%s=%s" % (key, val)
for key, val in sorted(six.iteritems(dic)))
def str2dict(string):
res_dict = {}
for keyvalue in string.split(','):
(key, value) = keyvalue.split('=', 1)
res_dict[key] = value
return res_dict
def dict2tuple(d):
items = list(d.items())
items.sort()
return tuple(items)
def diff_list_of_dict(old_list, new_list):
new_set = set([dict2str(l) for l in new_list])
old_set = set([dict2str(l) for l in old_list])
added = new_set - old_set
removed = old_set - new_set
return [str2dict(a) for a in added], [str2dict(r) for r in removed]
def is_extension_supported(plugin, ext_alias):
return ext_alias in getattr(
plugin, "supported_extension_aliases", [])
def log_opt_values(log):
cfg.CONF.log_opt_values(log, logging.DEBUG)
def get_random_mac(base_mac):
mac = [int(base_mac[0], 16), int(base_mac[1], 16),
int(base_mac[2], 16), random.randint(0x00, 0xff),
random.randint(0x00, 0xff), random.randint(0x00, 0xff)]
if base_mac[3] != '00':
mac[3] = int(base_mac[3], 16)
return ':'.join(["%02x" % x for x in mac])
def get_random_string(length):
"""Get a random hex string of the specified length.
based on Cinder library
cinder/transfer/api.py
"""
rndstr = ""
random.seed(datetime.datetime.now().microsecond)
while len(rndstr) < length:
base_str = str(random.random()).encode('utf-8')
rndstr += hashlib.sha224(base_str).hexdigest()
return rndstr[0:length]
def get_dhcp_agent_device_id(network_id, host):
# Split host so as to always use only the hostname and
# not the domain name. This will guarantee consistency
# whether a local hostname or an fqdn is passed in.
local_hostname = host.split('.')[0]
host_uuid = uuid.uuid5(uuid.NAMESPACE_DNS, str(local_hostname))
return 'dhcp%s-%s' % (host_uuid, network_id)
def cpu_count():
try:
return multiprocessing.cpu_count()
except NotImplementedError:
return 1
class exception_logger(object):
"""Wrap a function and log raised exception
:param logger: the logger to log the exception default is LOG.exception
:returns: origin value if no exception raised; re-raise the exception if
any occurred
"""
def __init__(self, logger=None):
self.logger = logger
def __call__(self, func):
if self.logger is None:
LOG = logging.getLogger(func.__module__)
self.logger = LOG.exception
def call(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
with excutils.save_and_reraise_exception():
self.logger(e)
return call
def is_dvr_serviced(device_owner):
"""Check if the port need to be serviced by DVR
Helper function to check the device owners of the
ports in the compute and service node to make sure
if they are required for DVR or any service directly or
indirectly associated with DVR.
"""
dvr_serviced_device_owners = (n_const.DEVICE_OWNER_LOADBALANCER,
n_const.DEVICE_OWNER_LOADBALANCERV2,
n_const.DEVICE_OWNER_DHCP)
return (device_owner.startswith('compute:') or
device_owner in dvr_serviced_device_owners)
@debtcollector.removals.remove(message="Unused in Liberty release.")
def get_keystone_url(conf):
if conf.auth_uri:
auth_uri = conf.auth_uri.rstrip('/')
else:
auth_uri = ('%(protocol)s://%(host)s:%(port)s' %
{'protocol': conf.auth_protocol,
'host': conf.auth_host,
'port': conf.auth_port})
# NOTE(ihrachys): all existing consumers assume version 2.0
return '%s/v2.0/' % auth_uri
def ip_to_cidr(ip, prefix=None):
"""Convert an ip with no prefix to cidr notation
:param ip: An ipv4 or ipv6 address. Convertable to netaddr.IPNetwork.
:param prefix: Optional prefix. If None, the default 32 will be used for
ipv4 and 128 for ipv6.
"""
net = netaddr.IPNetwork(ip)
if prefix is not None:
# Can't pass ip and prefix separately. Must concatenate strings.
net = netaddr.IPNetwork(str(net.ip) + '/' + str(prefix))
return str(net)
def fixed_ip_cidrs(fixed_ips):
"""Create a list of a port's fixed IPs in cidr notation.
:param fixed_ips: A neutron port's fixed_ips dictionary
"""
return [ip_to_cidr(fixed_ip['ip_address'], fixed_ip.get('prefixlen'))
for fixed_ip in fixed_ips]
def is_cidr_host(cidr):
"""Determines if the cidr passed in represents a single host network
:param cidr: Either an ipv4 or ipv6 cidr.
:returns: True if the cidr is /32 for ipv4 or /128 for ipv6.
:raises ValueError: raises if cidr does not contain a '/'. This disallows
plain IP addresses specifically to avoid ambiguity.
"""
if '/' not in str(cidr):
raise ValueError("cidr doesn't contain a '/'")
net = netaddr.IPNetwork(cidr)
if net.version == 4:
return net.prefixlen == n_const.IPv4_BITS
return net.prefixlen == n_const.IPv6_BITS
def ip_version_from_int(ip_version_int):
if ip_version_int == 4:
return n_const.IPv4
if ip_version_int == 6:
return n_const.IPv6
raise ValueError(_('Illegal IP version number'))
def is_port_trusted(port):
"""Used to determine if port can be trusted not to attack network.
Trust is currently based on the device_owner field starting with 'network:'
since we restrict who can use that in the default policy.json file.
"""
return port['device_owner'].startswith('network:')
class DelayedStringRenderer(object):
"""Takes a callable and its args and calls when __str__ is called
Useful for when an argument to a logging statement is expensive to
create. This will prevent the callable from being called if it's
never converted to a string.
"""
def __init__(self, function, *args, **kwargs):
self.function = function
self.args = args
self.kwargs = kwargs
def __str__(self):
return str(self.function(*self.args, **self.kwargs))
def camelize(s):
return ''.join(s.replace('_', ' ').title().split())
def round_val(val):
# we rely on decimal module since it behaves consistently across Python
# versions (2.x vs. 3.x)
return int(decimal.Decimal(val).quantize(decimal.Decimal('1'),
rounding=decimal.ROUND_HALF_UP))
def replace_file(file_name, data):
"""Replaces the contents of file_name with data in a safe manner.
First write to a temp file and then rename. Since POSIX renames are
atomic, the file is unlikely to be corrupted by competing writes.
We create the tempfile on the same device to ensure that it can be renamed.
"""
base_dir = os.path.dirname(os.path.abspath(file_name))
with tempfile.NamedTemporaryFile('w+',
dir=base_dir,
delete=False) as tmp_file:
tmp_file.write(data)
os.chmod(tmp_file.name, 0o644)
os.rename(tmp_file.name, file_name)
def load_class_by_alias_or_classname(namespace, name):
"""Load class using stevedore alias or the class name
:param namespace: namespace where the alias is defined
:param name: alias or class name of the class to be loaded
:returns class if calls can be loaded
:raises ImportError if class cannot be loaded
"""
if not name:
LOG.error(_LE("Alias or class name is not set"))
raise ImportError(_("Class not found."))
try:
# Try to resolve class by alias
mgr = driver.DriverManager(namespace, name)
class_to_load = mgr.driver
except RuntimeError:
e1_info = sys.exc_info()
# Fallback to class name
try:
class_to_load = importutils.import_class(name)
except (ImportError, ValueError):
LOG.error(_LE("Error loading class by alias"),
exc_info=e1_info)
LOG.error(_LE("Error loading class by class name"),
exc_info=True)
raise ImportError(_("Class not found."))
return class_to_load
|
{
"content_hash": "95afb167ec939b5d112ba9b3a7946a0a",
"timestamp": "",
"source": "github",
"line_count": 508,
"max_line_length": 79,
"avg_line_length": 33.84055118110236,
"alnum_prop": 0.5978709790006399,
"repo_name": "glove747/liberty-neutron",
"id": "f0b57e3793c799c33e792cfb97ffbd173f60a63c",
"size": "17914",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/common/utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7559351"
},
{
"name": "Shell",
"bytes": "14832"
}
],
"symlink_target": ""
}
|
""":func:`~pandas.eval` parsers
"""
import ast
import operator
import sys
import inspect
import tokenize
import datetime
from functools import partial
import pandas as pd
from pandas import compat
from pandas.compat import StringIO, lmap, zip, reduce, string_types
from pandas.core.base import StringMixin
from pandas.core import common as com
from pandas.tools.util import compose
from pandas.computation.ops import (_cmp_ops_syms, _bool_ops_syms,
_arith_ops_syms, _unary_ops_syms, is_term)
from pandas.computation.ops import _reductions, _mathops, _LOCAL_TAG
from pandas.computation.ops import Op, BinOp, UnaryOp, Term, Constant, Div
from pandas.computation.ops import UndefinedVariableError, FuncNode
from pandas.computation.scope import Scope, _ensure_scope
def tokenize_string(source):
"""Tokenize a Python source code string.
Parameters
----------
source : str
A Python source code string
"""
line_reader = StringIO(source).readline
for toknum, tokval, _, _, _ in tokenize.generate_tokens(line_reader):
yield toknum, tokval
def _rewrite_assign(tok):
"""Rewrite the assignment operator for PyTables expressions that use ``=``
as a substitute for ``==``.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
return toknum, '==' if tokval == '=' else tokval
def _replace_booleans(tok):
"""Replace ``&`` with ``and`` and ``|`` with ``or`` so that bitwise
precedence is changed to boolean precedence.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
"""
toknum, tokval = tok
if toknum == tokenize.OP:
if tokval == '&':
return tokenize.NAME, 'and'
elif tokval == '|':
return tokenize.NAME, 'or'
return toknum, tokval
return toknum, tokval
def _replace_locals(tok):
"""Replace local variables with a syntactically valid name.
Parameters
----------
tok : tuple of int, str
ints correspond to the all caps constants in the tokenize module
Returns
-------
t : tuple of int, str
Either the input or token or the replacement values
Notes
-----
This is somewhat of a hack in that we rewrite a string such as ``'@a'`` as
``'__pd_eval_local_a'`` by telling the tokenizer that ``__pd_eval_local_``
is a ``tokenize.OP`` and to replace the ``'@'`` symbol with it.
"""
toknum, tokval = tok
if toknum == tokenize.OP and tokval == '@':
return tokenize.OP, _LOCAL_TAG
return toknum, tokval
def _preparse(source, f=compose(_replace_locals, _replace_booleans,
_rewrite_assign)):
"""Compose a collection of tokenization functions
Parameters
----------
source : str
A Python source code string
f : callable
This takes a tuple of (toknum, tokval) as its argument and returns a
tuple with the same structure but possibly different elements. Defaults
to the composition of ``_rewrite_assign``, ``_replace_booleans``, and
``_replace_locals``.
Returns
-------
s : str
Valid Python source code
Notes
-----
The `f` parameter can be any callable that takes *and* returns input of the
form ``(toknum, tokval)``, where ``toknum`` is one of the constants from
the ``tokenize`` module and ``tokval`` is a string.
"""
assert callable(f), 'f must be callable'
return tokenize.untokenize(lmap(f, tokenize_string(source)))
def _is_type(t):
"""Factory for a type checking function of type ``t`` or tuple of types."""
return lambda x: isinstance(x.value, t)
_is_list = _is_type(list)
_is_str = _is_type(string_types)
# partition all AST nodes
_all_nodes = frozenset(filter(lambda x: isinstance(x, type) and
issubclass(x, ast.AST),
(getattr(ast, node) for node in dir(ast))))
def _filter_nodes(superclass, all_nodes=_all_nodes):
"""Filter out AST nodes that are subclasses of ``superclass``."""
node_names = (node.__name__ for node in all_nodes
if issubclass(node, superclass))
return frozenset(node_names)
_all_node_names = frozenset(map(lambda x: x.__name__, _all_nodes))
_mod_nodes = _filter_nodes(ast.mod)
_stmt_nodes = _filter_nodes(ast.stmt)
_expr_nodes = _filter_nodes(ast.expr)
_expr_context_nodes = _filter_nodes(ast.expr_context)
_slice_nodes = _filter_nodes(ast.slice)
_boolop_nodes = _filter_nodes(ast.boolop)
_operator_nodes = _filter_nodes(ast.operator)
_unary_op_nodes = _filter_nodes(ast.unaryop)
_cmp_op_nodes = _filter_nodes(ast.cmpop)
_comprehension_nodes = _filter_nodes(ast.comprehension)
_handler_nodes = _filter_nodes(ast.excepthandler)
_arguments_nodes = _filter_nodes(ast.arguments)
_keyword_nodes = _filter_nodes(ast.keyword)
_alias_nodes = _filter_nodes(ast.alias)
# nodes that we don't support directly but are needed for parsing
_hacked_nodes = frozenset(['Assign', 'Module', 'Expr'])
_unsupported_expr_nodes = frozenset(['Yield', 'GeneratorExp', 'IfExp',
'DictComp', 'SetComp', 'Repr', 'Lambda',
'Set', 'AST', 'Is', 'IsNot'])
# these nodes are low priority or won't ever be supported (e.g., AST)
_unsupported_nodes = ((_stmt_nodes | _mod_nodes | _handler_nodes |
_arguments_nodes | _keyword_nodes | _alias_nodes |
_expr_context_nodes | _unsupported_expr_nodes) -
_hacked_nodes)
# we're adding a different assignment in some cases to be equality comparison
# and we don't want `stmt` and friends in their so get only the class whose
# names are capitalized
_base_supported_nodes = (_all_node_names - _unsupported_nodes) | _hacked_nodes
_msg = 'cannot both support and not support {0}'.format(_unsupported_nodes &
_base_supported_nodes)
assert not _unsupported_nodes & _base_supported_nodes, _msg
def _node_not_implemented(node_name, cls):
"""Return a function that raises a NotImplementedError with a passed node
name.
"""
def f(self, *args, **kwargs):
raise NotImplementedError("{0!r} nodes are not "
"implemented".format(node_name))
return f
def disallow(nodes):
"""Decorator to disallow certain nodes from parsing. Raises a
NotImplementedError instead.
Returns
-------
disallowed : callable
"""
def disallowed(cls):
cls.unsupported_nodes = ()
for node in nodes:
new_method = _node_not_implemented(node, cls)
name = 'visit_{0}'.format(node)
cls.unsupported_nodes += (name,)
setattr(cls, name, new_method)
return cls
return disallowed
def _op_maker(op_class, op_symbol):
"""Return a function to create an op class with its symbol already passed.
Returns
-------
f : callable
"""
def f(self, node, *args, **kwargs):
"""Return a partial function with an Op subclass with an operator
already passed.
Returns
-------
f : callable
"""
return partial(op_class, op_symbol, *args, **kwargs)
return f
_op_classes = {'binary': BinOp, 'unary': UnaryOp}
def add_ops(op_classes):
"""Decorator to add default implementation of ops."""
def f(cls):
for op_attr_name, op_class in compat.iteritems(op_classes):
ops = getattr(cls, '{0}_ops'.format(op_attr_name))
ops_map = getattr(cls, '{0}_op_nodes_map'.format(op_attr_name))
for op in ops:
op_node = ops_map[op]
if op_node is not None:
made_op = _op_maker(op_class, op)
setattr(cls, 'visit_{0}'.format(op_node), made_op)
return cls
return f
@disallow(_unsupported_nodes)
@add_ops(_op_classes)
class BaseExprVisitor(ast.NodeVisitor):
"""Custom ast walker. Parsers of other engines should subclass this class
if necessary.
Parameters
----------
env : Scope
engine : str
parser : str
preparser : callable
"""
const_type = Constant
term_type = Term
binary_ops = _cmp_ops_syms + _bool_ops_syms + _arith_ops_syms
binary_op_nodes = ('Gt', 'Lt', 'GtE', 'LtE', 'Eq', 'NotEq', 'In', 'NotIn',
'BitAnd', 'BitOr', 'And', 'Or', 'Add', 'Sub', 'Mult',
None, 'Pow', 'FloorDiv', 'Mod')
binary_op_nodes_map = dict(zip(binary_ops, binary_op_nodes))
unary_ops = _unary_ops_syms
unary_op_nodes = 'UAdd', 'USub', 'Invert', 'Not'
unary_op_nodes_map = dict(zip(unary_ops, unary_op_nodes))
rewrite_map = {
ast.Eq: ast.In,
ast.NotEq: ast.NotIn,
ast.In: ast.In,
ast.NotIn: ast.NotIn
}
def __init__(self, env, engine, parser, preparser=_preparse):
self.env = env
self.engine = engine
self.parser = parser
self.preparser = preparser
self.assigner = None
def visit(self, node, **kwargs):
if isinstance(node, string_types):
clean = self.preparser(node)
node = ast.fix_missing_locations(ast.parse(clean))
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method)
return visitor(node, **kwargs)
def visit_Module(self, node, **kwargs):
if len(node.body) != 1:
raise SyntaxError('only a single expression is allowed')
expr = node.body[0]
return self.visit(expr, **kwargs)
def visit_Expr(self, node, **kwargs):
return self.visit(node.value, **kwargs)
def _rewrite_membership_op(self, node, left, right):
# the kind of the operator (is actually an instance)
op_instance = node.op
op_type = type(op_instance)
# must be two terms and the comparison operator must be ==/!=/in/not in
if is_term(left) and is_term(right) and op_type in self.rewrite_map:
left_list, right_list = map(_is_list, (left, right))
left_str, right_str = map(_is_str, (left, right))
# if there are any strings or lists in the expression
if left_list or right_list or left_str or right_str:
op_instance = self.rewrite_map[op_type]()
# pop the string variable out of locals and replace it with a list
# of one string, kind of a hack
if right_str:
name = self.env.add_tmp([right.value])
right = self.term_type(name, self.env)
if left_str:
name = self.env.add_tmp([left.value])
left = self.term_type(name, self.env)
op = self.visit(op_instance)
return op, op_instance, left, right
def _possibly_transform_eq_ne(self, node, left=None, right=None):
if left is None:
left = self.visit(node.left, side='left')
if right is None:
right = self.visit(node.right, side='right')
op, op_class, left, right = self._rewrite_membership_op(node, left,
right)
return op, op_class, left, right
def _possibly_eval(self, binop, eval_in_python):
# eval `in` and `not in` (for now) in "partial" python space
# things that can be evaluated in "eval" space will be turned into
# temporary variables. for example,
# [1,2] in a + 2 * b
# in that case a + 2 * b will be evaluated using numexpr, and the "in"
# call will be evaluated using isin (in python space)
return binop.evaluate(self.env, self.engine, self.parser,
self.term_type, eval_in_python)
def _possibly_evaluate_binop(self, op, op_class, lhs, rhs,
eval_in_python=('in', 'not in'),
maybe_eval_in_python=('==', '!=', '<', '>',
'<=', '>=')):
res = op(lhs, rhs)
if res.has_invalid_return_type:
raise TypeError("unsupported operand type(s) for {0}:"
" '{1}' and '{2}'".format(res.op, lhs.type,
rhs.type))
if self.engine != 'pytables':
if (res.op in _cmp_ops_syms
and getattr(lhs, 'is_datetime', False)
or getattr(rhs, 'is_datetime', False)):
# all date ops must be done in python bc numexpr doesn't work
# well with NaT
return self._possibly_eval(res, self.binary_ops)
if res.op in eval_in_python:
# "in"/"not in" ops are always evaluated in python
return self._possibly_eval(res, eval_in_python)
elif self.engine != 'pytables':
if (getattr(lhs, 'return_type', None) == object
or getattr(rhs, 'return_type', None) == object):
# evaluate "==" and "!=" in python if either of our operands
# has an object return type
return self._possibly_eval(res, eval_in_python +
maybe_eval_in_python)
return res
def visit_BinOp(self, node, **kwargs):
op, op_class, left, right = self._possibly_transform_eq_ne(node)
return self._possibly_evaluate_binop(op, op_class, left, right)
def visit_Div(self, node, **kwargs):
truediv = self.env.scope['truediv']
return lambda lhs, rhs: Div(lhs, rhs, truediv)
def visit_UnaryOp(self, node, **kwargs):
op = self.visit(node.op)
operand = self.visit(node.operand)
return op(operand)
def visit_Name(self, node, **kwargs):
return self.term_type(node.id, self.env, **kwargs)
def visit_NameConstant(self, node, **kwargs):
return self.const_type(node.value, self.env)
def visit_Num(self, node, **kwargs):
return self.const_type(node.n, self.env)
def visit_Str(self, node, **kwargs):
name = self.env.add_tmp(node.s)
return self.term_type(name, self.env)
def visit_List(self, node, **kwargs):
name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts])
return self.term_type(name, self.env)
visit_Tuple = visit_List
def visit_Index(self, node, **kwargs):
""" df.index[4] """
return self.visit(node.value)
def visit_Subscript(self, node, **kwargs):
value = self.visit(node.value)
slobj = self.visit(node.slice)
result = pd.eval(slobj, local_dict=self.env, engine=self.engine,
parser=self.parser)
try:
# a Term instance
v = value.value[result]
except AttributeError:
# an Op instance
lhs = pd.eval(value, local_dict=self.env, engine=self.engine,
parser=self.parser)
v = lhs[result]
name = self.env.add_tmp(v)
return self.term_type(name, env=self.env)
def visit_Slice(self, node, **kwargs):
""" df.index[slice(4,6)] """
lower = node.lower
if lower is not None:
lower = self.visit(lower).value
upper = node.upper
if upper is not None:
upper = self.visit(upper).value
step = node.step
if step is not None:
step = self.visit(step).value
return slice(lower, upper, step)
def visit_Assign(self, node, **kwargs):
"""
support a single assignment node, like
c = a + b
set the assigner at the top level, must be a Name node which
might or might not exist in the resolvers
"""
if len(node.targets) != 1:
raise SyntaxError('can only assign a single expression')
if not isinstance(node.targets[0], ast.Name):
raise SyntaxError('left hand side of an assignment must be a '
'single name')
if self.env.target is None:
raise ValueError('cannot assign without a target object')
try:
assigner = self.visit(node.targets[0], **kwargs)
except UndefinedVariableError:
assigner = node.targets[0].id
self.assigner = getattr(assigner, 'name', assigner)
if self.assigner is None:
raise SyntaxError('left hand side of an assignment must be a '
'single resolvable name')
return self.visit(node.value, **kwargs)
def visit_Attribute(self, node, **kwargs):
attr = node.attr
value = node.value
ctx = node.ctx
if isinstance(ctx, ast.Load):
# resolve the value
resolved = self.visit(value).value
try:
v = getattr(resolved, attr)
name = self.env.add_tmp(v)
return self.term_type(name, self.env)
except AttributeError:
# something like datetime.datetime where scope is overridden
if isinstance(value, ast.Name) and value.id == attr:
return resolved
raise ValueError("Invalid Attribute context {0}".format(ctx.__name__))
def visit_Call_35(self, node, side=None, **kwargs):
""" in 3.5 the starargs attribute was changed to be more flexible, #11097 """
if isinstance(node.func, ast.Attribute):
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
try:
res = self.visit(node.func)
except UndefinedVariableError:
# Check if this is a supported function name
try:
res = FuncNode(node.func.id)
except ValueError:
# Raise original error
raise
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
if isinstance(res, FuncNode):
new_args = [ self.visit(arg) for arg in node.args ]
if node.keywords:
raise TypeError("Function \"{0}\" does not support keyword "
"arguments".format(res.name))
return res(*new_args, **kwargs)
else:
new_args = [ self.visit(arg).value for arg in node.args ]
for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError("keyword error in function call "
"'{0}'".format(node.func.id))
if key.arg:
kwargs.append(ast.keyword(keyword.arg, self.visit(keyword.value)))
return self.const_type(res(*new_args, **kwargs), self.env)
def visit_Call_legacy(self, node, side=None, **kwargs):
# this can happen with: datetime.datetime
if isinstance(node.func, ast.Attribute):
res = self.visit_Attribute(node.func)
elif not isinstance(node.func, ast.Name):
raise TypeError("Only named functions are supported")
else:
try:
res = self.visit(node.func)
except UndefinedVariableError:
# Check if this is a supported function name
try:
res = FuncNode(node.func.id)
except ValueError:
# Raise original error
raise
if res is None:
raise ValueError("Invalid function call {0}".format(node.func.id))
if hasattr(res, 'value'):
res = res.value
if isinstance(res, FuncNode):
args = [self.visit(targ) for targ in node.args]
if node.starargs is not None:
args += self.visit(node.starargs)
if node.keywords or node.kwargs:
raise TypeError("Function \"{0}\" does not support keyword "
"arguments".format(res.name))
return res(*args, **kwargs)
else:
args = [self.visit(targ).value for targ in node.args]
if node.starargs is not None:
args += self.visit(node.starargs).value
keywords = {}
for key in node.keywords:
if not isinstance(key, ast.keyword):
raise ValueError("keyword error in function call "
"'{0}'".format(node.func.id))
keywords[key.arg] = self.visit(key.value).value
if node.kwargs is not None:
keywords.update(self.visit(node.kwargs).value)
return self.const_type(res(*args, **keywords), self.env)
def translate_In(self, op):
return op
def visit_Compare(self, node, **kwargs):
ops = node.ops
comps = node.comparators
# base case: we have something like a CMP b
if len(comps) == 1:
op = self.translate_In(ops[0])
binop = ast.BinOp(op=op, left=node.left, right=comps[0])
return self.visit(binop)
# recursive case: we have a chained comparison, a CMP b CMP c, etc.
left = node.left
values = []
for op, comp in zip(ops, comps):
new_node = self.visit(ast.Compare(comparators=[comp], left=left,
ops=[self.translate_In(op)]))
left = comp
values.append(new_node)
return self.visit(ast.BoolOp(op=ast.And(), values=values))
def _try_visit_binop(self, bop):
if isinstance(bop, (Op, Term)):
return bop
return self.visit(bop)
def visit_BoolOp(self, node, **kwargs):
def visitor(x, y):
lhs = self._try_visit_binop(x)
rhs = self._try_visit_binop(y)
op, op_class, lhs, rhs = self._possibly_transform_eq_ne(node, lhs,
rhs)
return self._possibly_evaluate_binop(op, node.op, lhs, rhs)
operands = node.values
return reduce(visitor, operands)
# ast.Call signature changed on 3.5,
# conditionally change which methods is named
# visit_Call depending on Python version, #11097
if compat.PY35:
BaseExprVisitor.visit_Call = BaseExprVisitor.visit_Call_35
else:
BaseExprVisitor.visit_Call = BaseExprVisitor.visit_Call_legacy
_python_not_supported = frozenset(['Dict', 'BoolOp', 'In', 'NotIn'])
_numexpr_supported_calls = frozenset(_reductions + _mathops)
@disallow((_unsupported_nodes | _python_not_supported) -
(_boolop_nodes | frozenset(['BoolOp', 'Attribute', 'In', 'NotIn',
'Tuple'])))
class PandasExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser,
preparser=partial(_preparse, f=compose(_replace_locals,
_replace_booleans))):
super(PandasExprVisitor, self).__init__(env, engine, parser, preparser)
@disallow(_unsupported_nodes | _python_not_supported | frozenset(['Not']))
class PythonExprVisitor(BaseExprVisitor):
def __init__(self, env, engine, parser, preparser=lambda x: x):
super(PythonExprVisitor, self).__init__(env, engine, parser,
preparser=preparser)
class Expr(StringMixin):
"""Object encapsulating an expression.
Parameters
----------
expr : str
engine : str, optional, default 'numexpr'
parser : str, optional, default 'pandas'
env : Scope, optional, default None
truediv : bool, optional, default True
level : int, optional, default 2
"""
def __init__(self, expr, engine='numexpr', parser='pandas', env=None,
truediv=True, level=0):
self.expr = expr
self.env = env or Scope(level=level + 1)
self.engine = engine
self.parser = parser
self.env.scope['truediv'] = truediv
self._visitor = _parsers[parser](self.env, self.engine, self.parser)
self.terms = self.parse()
@property
def assigner(self):
return getattr(self._visitor, 'assigner', None)
def __call__(self):
return self.terms(self.env)
def __unicode__(self):
return com.pprint_thing(self.terms)
def __len__(self):
return len(self.expr)
def parse(self):
"""Parse an expression"""
return self._visitor.visit(self.expr)
@property
def names(self):
"""Get the names in an expression"""
if is_term(self.terms):
return frozenset([self.terms.name])
return frozenset(term.name for term in com.flatten(self.terms))
_parsers = {'python': PythonExprVisitor, 'pandas': PandasExprVisitor}
|
{
"content_hash": "80273e248dc47b5fdb289cdaee7acb93",
"timestamp": "",
"source": "github",
"line_count": 737,
"max_line_length": 86,
"avg_line_length": 34.576662143826326,
"alnum_prop": 0.5716359926225326,
"repo_name": "stevenzhang18/Indeed-Flask",
"id": "6da5cf4753a8e4959a7b37c589e3ea03214d75cf",
"size": "25483",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "lib/pandas/computation/expr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "45061"
},
{
"name": "HTML",
"bytes": "1386611"
},
{
"name": "JavaScript",
"bytes": "84693"
},
{
"name": "Python",
"bytes": "10498302"
}
],
"symlink_target": ""
}
|
import os
import sys
import re
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Translate X color schemes to termiantor format')
parser.add_argument('xrdb_path', type=str, help='path to xrdb files')
parser.add_argument('-d', '--destiny', type=str, dest='output_path',
help='path where terminator config files will be' +
' created, if not provided then will be printed')
args = parser.parse_args()
# The regexes to match the colors
color_regex = re.compile("#define +Ansi_(\d+)_Color +(#[A-Fa-f0-9]{6})")
bg_regex = re.compile("#define +Background_Color +(#[A-Fa-f0-9]{6})")
fg_regex = re.compile("#define +Foreground_Color +(#[A-Fa-f0-9]{6})")
cursor_regex = re.compile("#define +Cursor_Color +(#[A-Fa-f0-9]{6})")
# File regex
xrdb_regex = re.compile("(.+)\.[xX][rR][dD][bB]")
for i in filter(lambda x: xrdb_regex.match(x), os.listdir(args.xrdb_path)):
# per file
with open(os.path.join(args.xrdb_path, i)) as f:
lines = f.readlines()
# Search special colors
color_file = "\n".join(lines)
bg_color = bg_regex.search(color_file).group(1)
fg_color = fg_regex.search(color_file).group(1)
cursor_color = cursor_regex.search(color_file).group(1)
# Search palette
colors = sorted(filter(lambda x: color_regex.match(x), lines),
key=lambda x: int(color_regex.match(x).group(1)))
# Create the color string
colors = ":".join(map(lambda x: color_regex.match(x).group(2), colors))
scheme = """
[[{name}]]
palette = "{pl}"
background_color = "{bg}"
cursor_color = "{cr}"
foreground_color = "{fg}"
background_image = None
"""
output = scheme.format(name=xrdb_regex.match(i).group(1),
pl=colors,
bg=bg_color,
cr=cursor_color,
fg=fg_color)
if not args.output_path:
print(output)
else:
dest = os.path.join(args.output_path, xrdb_regex.match(i).group(1))
with open('{0}.config'.format(dest), 'w+') as f:
f.write(output)
|
{
"content_hash": "87ccc852d3b99c2c5626566380130f85",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 79,
"avg_line_length": 34.53731343283582,
"alnum_prop": 0.5492653414001729,
"repo_name": "redlist/dotfiles",
"id": "a80c34b7bfdeef9a2b0a5e91ed7c6903569ccc50",
"size": "2541",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mbadolato-iTerm2-Color-Schemes-dee0645/tools/xrdb2terminator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6024"
},
{
"name": "Ruby",
"bytes": "5437"
},
{
"name": "Shell",
"bytes": "5651"
},
{
"name": "VimL",
"bytes": "4639"
}
],
"symlink_target": ""
}
|
"""Tests of PulpClearRepo script.
"""
from __future__ import print_function, unicode_literals
import unittest
import os
import sys
from mock import Mock, patch
DIR = os.path.dirname(__file__)
sys.path.insert(0, os.path.join(DIR, ".."))
from releng_sop.pulp_clear_repos import get_parser, PulpClearRepos # noqa
from tests.common import ParserTestBase # noqa
class TestPulpClearRepos(unittest.TestCase):
"""Tests of methods from KojiCloneTagForReleaseMilestone class."""
data = {
"release_id": 'rhel-7.1',
"service": 'pulp',
"repo_family": 'htb',
"repo_family_diff": 'ht',
"content_format": 'rpm',
"arch": ['x86_64', 's390x'],
"variant_uid": ['Server', 'Workstation'],
}
release_spec = {
'name': 'rhel-7.1',
'config_path': 'some.json',
'config_data': {},
'__getitem__': lambda self, item: self.config_data[item],
}
env_spec = {
"name": 'default',
'config_path': 'some_path.json',
'config_data': {
'pdc_server': 'pdc-test',
'pulp_server': 'pulp_test',
},
'__getitem__': lambda self, item: self.config_data[item]
}
pulp_spec = {
'user': 'admin',
'password': 'pass',
'config': 'pulp-test',
'config_path': 'some_path.json',
}
# Expected details text
details_base = """Pulp clear repos
* env name: {env[name]}
* env config: {env[config_path]}
* release source {release[config_path]}
* PDC server: pdc-test
* release_id: {release[name]}
* pulp config: {pulp[config]}
* pulp config path: {pulp[config_path]}
* pulp user: {pulp[user]}
""".format(env=env_spec, release=release_spec, pulp=pulp_spec)
details_good_repo = """ * repo_family: {data[repo_family]}
""".format(data=data)
details_diff_repo = """ * repo_family: {data[repo_family_diff]}
""".format(data=data)
details_with_one_repo = """ * repos:
rhel-7-workstation-htb-rpms
""".format(data=data)
details_with_more_repo = """ * repos:
rhel-7-workstation-htb-rpms
rhel-7-server-htb-source-rpms
""".format(data=data)
details_no_repo = """ * repos:
No repos found.
""".format(data=data)
details_arch = """ * arches:
{arches}
""".format(arches="\n ".join(data['arch']))
details_variant = """ * variants:
{variants}
""".format(variants="\n ".join(data['variant_uid']))
details_variant_arch = """ * arches:
{data[arch]}
* variants:
{data[variant_uid]}
""".format(data=data)
expected_query = {
"release_id": "rhel-7.1",
"service": "pulp",
"repo_family": "htb",
"content_format": "rpm",
}
expected_query_no_repo = {
"release_id": "rhel-7.1",
"service": "pulp",
"repo_family": "ht",
"content_format": "rpm",
}
def setUp(self):
"""Set up variables before tests."""
self.data['arch'] = []
self.data['variant_uid'] = []
self.env = Mock(spec_set=list(self.env_spec.keys()))
self.env.configure_mock(**self.env_spec)
self.release = Mock(spec_set=list(self.release_spec.keys()))
self.release.configure_mock(**self.release_spec)
def check_details(self, PDCClientClassMock, PulpAdminConfigClassMock, expected_details,
expected_query, testMethod, query_result, commit):
"""Check the expected and actual."""
# get mock instance and configure return value for get_paged
instance = PDCClientClassMock.return_value
api = instance.__getitem__.return_value
api._.return_value = query_result
pulpAdminConfig = PulpAdminConfigClassMock.return_value
pulpAdminConfig.name = 'pulp-test'
pulpAdminConfig.config_path = 'some_path.json'
client = pulpAdminConfig.__getitem__.return_value
client.__getitem__.return_value = 'admin'
clear = PulpClearRepos(self.env, self.release, self.data['repo_family'],
self.data['variant_uid'], self.data['arch'])
actual = clear.details(commit=commit)
# check that class constructor is called once with the value
# of env['pdc_server']
PDCClientClassMock.assert_called_once_with('pdc-test', develop=True)
# check that the right resource is accessed
instance.__getitem__.assert_called_once_with('content-delivery-repos')
# check that mock instance is called once, with the correct
# parameters
instance.__getitem__()._.assert_called_once_with(page_size=0, **expected_query)
# check that the actual details are the same as the expected ones
self.assertEquals(expected_details, actual, testMethod.__doc__)
@patch('releng_sop.pulp_clear_repos.PDCClient', autospec=True)
@patch('releng_sop.pulp_clear_repos.PulpAdminConfig', autospec=True)
def test_details_no_commit_one_repo(self, PulpAdminConfigClassMock, PDCClientClassMock):
"""Check details with one repo found and two variants, while not commiting."""
self.data['repo_family'] = 'htb'
self.data['variant_uid'] = ['Server', 'Workstation']
query_result = [
{
'name': 'rhel-7-workstation-htb-rpms',
}
]
expected_details = (self.details_base + self.details_good_repo +
self.details_variant + self.details_with_one_repo + "*** TEST MODE ***")
expected_query_add = {
'arch': self.data['arch'],
'variant_uid': self.data['variant_uid'],
}
expected_query = dict(self.expected_query)
expected_query.update(expected_query_add)
commit = False
testMethod = TestPulpClearRepos.test_details_no_commit_one_repo
self.check_details(PDCClientClassMock, PulpAdminConfigClassMock, expected_details,
expected_query, testMethod, query_result, commit)
@patch('releng_sop.pulp_clear_repos.PDCClient', autospec=True)
@patch('releng_sop.pulp_clear_repos.PulpAdminConfig', autospec=True)
def test_details_no_commit_more_repo(self, PulpAdminConfigClassMock, PDCClientClassMock):
"""Check details with two repos found and two arches, while not commiting."""
self.data['repo_family'] = 'htb'
self.data['arch'] = ['x86_64', 's390x']
query_result = [
{
'name': 'rhel-7-workstation-htb-rpms',
},
{
'name': 'rhel-7-server-htb-source-rpms',
}
]
expected_details = (self.details_base + self.details_good_repo +
self.details_arch + self.details_with_more_repo + "*** TEST MODE ***")
expected_query_add = {
'arch': self.data['arch'],
'variant_uid': self.data['variant_uid'],
}
expected_query = dict(self.expected_query)
expected_query.update(expected_query_add)
commit = False
testMethod = TestPulpClearRepos.test_details_no_commit_more_repo
self.check_details(PDCClientClassMock, PulpAdminConfigClassMock, expected_details,
expected_query, testMethod, query_result, commit)
@patch('releng_sop.pulp_clear_repos.PDCClient', autospec=True)
@patch('releng_sop.pulp_clear_repos.PulpAdminConfig', autospec=True)
def test_details_no_commit_no_repo(self, PulpAdminConfigClassMock, PDCClientClassMock):
"""Check details with no repo found and two variants and two arches, while not commiting."""
self.data['repo_family'] = 'ht'
self.data['arch'] = ['x86_64', 's390x']
self.data['variant_uid'] = ['Server', 'Workstation']
query_result = []
expected_details = (self.details_base + self.details_diff_repo +
self.details_arch + self.details_variant + self.details_no_repo +
"*** TEST MODE ***")
expected_query_add = {
'arch': self.data['arch'],
'variant_uid': self.data['variant_uid'],
}
expected_query = dict(self.expected_query_no_repo)
expected_query.update(expected_query_add)
commit = False
testMethod = TestPulpClearRepos.test_details_no_commit_no_repo
self.check_details(PDCClientClassMock, PulpAdminConfigClassMock, expected_details,
expected_query, testMethod, query_result, commit)
@patch('releng_sop.pulp_clear_repos.PDCClient', autospec=True)
@patch('releng_sop.pulp_clear_repos.PulpAdminConfig', autospec=True)
def test_details_with_commit_one_repo(self,
PulpAdminConfigClassMock, PDCClientClassMock):
"""Check details with one repo found and two variants and two arches, when commiting."""
self.data['repo_family'] = 'htb'
self.data['arch'] = ['x86_64', 's390x']
self.data['variant_uid'] = ['Server', 'Workstation']
query_result = [
{
'name': 'rhel-7-workstation-htb-rpms',
}
]
expected_details = (self.details_base + self.details_good_repo +
self.details_arch + self.details_variant + self.details_with_one_repo)
expected_query_add = {
'arch': self.data['arch'],
'variant_uid': self.data['variant_uid'],
}
expected_query = dict(self.expected_query)
expected_query.update(expected_query_add)
commit = True
testMethod = TestPulpClearRepos.test_details_with_commit_one_repo
self.check_details(PDCClientClassMock, PulpAdminConfigClassMock, expected_details,
expected_query, testMethod, query_result, commit)
@patch('releng_sop.pulp_clear_repos.PDCClient', autospec=True)
@patch('releng_sop.pulp_clear_repos.PulpAdminConfig', autospec=True)
def test_details_with_commit_more_repo(self, PulpAdminConfigClassMock, PDCClientClassMock):
"""Check details with two repos found and two variants, when commiting."""
self.data['repo_family'] = 'htb'
self.data['variant_uid'] = ['Server', 'Workstation']
query_result = [
{
'name': 'rhel-7-workstation-htb-rpms',
},
{
'name': 'rhel-7-server-htb-source-rpms',
}
]
expected_details = (self.details_base + self.details_good_repo +
self.details_variant + self.details_with_more_repo)
expected_query_add = {
'arch': self.data['arch'],
'variant_uid': self.data['variant_uid'],
}
expected_query = dict(self.expected_query)
expected_query.update(expected_query_add)
commit = True
testMethod = TestPulpClearRepos.test_details_with_commit_more_repo
self.check_details(PDCClientClassMock, PulpAdminConfigClassMock, expected_details,
expected_query, testMethod, query_result, commit)
@patch('releng_sop.pulp_clear_repos.PDCClient', autospec=True)
@patch('releng_sop.pulp_clear_repos.PulpAdminConfig', autospec=True)
def test_details_with_commit_no_repo(self, PulpAdminConfigClassMock, PDCClientClassMock):
"""Check details with no repo found and two arches, when commiting."""
self.data['repo_family'] = 'ht'
self.data['arch'] = ['x86_64', 's390x']
query_result = []
expected_details = (self.details_base + self.details_diff_repo +
self.details_arch + self.details_no_repo)
expected_query_add = {
'arch': self.data['arch'],
'variant_uid': self.data['variant_uid'],
}
expected_query = dict(self.expected_query_no_repo)
expected_query.update(expected_query_add)
commit = True
testMethod = TestPulpClearRepos.test_details_with_commit_no_repo
self.check_details(PDCClientClassMock, PulpAdminConfigClassMock, expected_details,
expected_query, testMethod, query_result, commit)
def check_get_cmd(self, PulpAdminConfigClassMock, expected, commit, testMethod, repos, password, addpassword):
"""Check the expected and actual."""
clear = PulpClearRepos(self.env, self.release, self.data['repo_family'],
self.data['variant_uid'], self.data['arch'])
clear.repos = repos
clear.pulp_password = password
pulpAdminConfig = PulpAdminConfigClassMock.return_value
pulpAdminConfig.name = 'pulp-test'
pulpAdminConfig.config_path = 'some_path.json'
client = pulpAdminConfig.__getitem__.return_value
client.__getitem__.return_value = 'admin'
actual = clear.get_cmd(add_password=addpassword, commit=commit)
self.assertEqual(actual, expected, testMethod.__doc__)
@patch('releng_sop.pulp_clear_repos.PulpAdminConfig', autospec=True)
def test_get_cmd_no_commit_no_repo(self, PulpAdminConfigClassMock):
"""Get command with no repo and password, while not commiting."""
repos = []
password = 'like'
expected = []
for repo in repos:
expected_cmd = ["echo"]
expected_cmd += "pulp-admin --config={config} --user={username} --password={passwd}".format(
config=self.env_spec['config_path'],
username=self.pulp_spec['user'],
passwd=password).split()
expected_cmd += "rpm repo remove rpm --filters='{filters}' --repo-id {repo}".format(
filters='{}',
repo=repo).split()
expected.append(expected_cmd)
commit = False
addpassword = True
testMethod = TestPulpClearRepos.test_get_cmd_no_commit_no_repo
self.check_get_cmd(PulpAdminConfigClassMock, expected, commit, testMethod, repos, password, addpassword)
@patch('releng_sop.pulp_clear_repos.PulpAdminConfig', autospec=True)
def test_get_cmd_with_commit_one_repo(self, PulpAdminConfigClassMock):
"""Get command with one repo and password, when commiting."""
repos = ['rhel-7-workstation-htb-rpms']
password = 'like'
expected = []
for repo in repos:
expected_cmd = "pulp-admin --config={config} --user={username} --password={passwd}".format(
config=self.env_spec['config_path'],
username=self.pulp_spec['user'],
passwd=password).split()
expected_cmd += "rpm repo remove rpm \
--filters='{filters}' --repo-id {repo}".format(
filters='{}',
repo=repo).split()
expected.append(expected_cmd)
commit = True
addpassword = True
testMethod = TestPulpClearRepos.test_get_cmd_with_commit_one_repo
self.check_get_cmd(PulpAdminConfigClassMock, expected, commit, testMethod, repos, password, addpassword)
@patch('releng_sop.pulp_clear_repos.PulpAdminConfig', autospec=True)
def test_get_cmd_with_commit_two_repo(self, PulpAdminConfigClassMock):
"""Get command with two repos when commiting."""
repos = ['rhel-7-workstation-htb-rpms', 'rhel-7-server-htb-source-rpms']
password = ''
expected = []
for repo in repos:
expected_cmd = "pulp-admin --config={config} --user={username} rpm repo remove rpm".format(
config=self.env_spec['config_path'],
username=self.pulp_spec['user']).split()
expected_cmd += "--filters='{filters}' --repo-id {repo}".format(
filters='{}',
repo=repo).split()
expected.append(expected_cmd)
commit = True
addpassword = False
testMethod = TestPulpClearRepos.test_get_cmd_with_commit_two_repo
self.check_get_cmd(PulpAdminConfigClassMock, expected, commit, testMethod, repos, password, addpassword)
class TestPulpClearReposParser(ParserTestBase, unittest.TestCase):
"""Set Arguments and Parser for Test generator."""
ARGUMENTS = {
'envHelp': {
'arg': '--env ENV',
'env_default': ['rhel-7.1', 'htb'],
'env_set': ['rhel-7.1', 'htb', '--commit', "--env", "some_env"],
},
'helpReleaseId': {
'arg': 'RELEASE_ID',
},
'commitHelp': {
'arg': '--commit',
'commit_default': ['rhel-7.1', 'htb'],
'commit_set': ['rhel-7.1', 'htb', '--commit'],
},
'helpRepoFamily': {
'arg': 'REPO_FAMILY',
},
'helpVariant': {
'arg': '--variant',
},
'helpArch': {
'arg': '--arch',
},
}
PARSER = get_parser()
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "d4685fba1bd92f591f3235811188edee",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 114,
"avg_line_length": 38.56334841628959,
"alnum_prop": 0.5881490173071282,
"repo_name": "release-engineering/releng-sop",
"id": "2127925fca6eb1545de033106d0f4a3b58d54663",
"size": "17089",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_pulp_clear_repos.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "151744"
},
{
"name": "Shell",
"bytes": "3468"
}
],
"symlink_target": ""
}
|
"""
This module contains the main logic.
"""
import pandas as pd
import numpy as np
import shapely.ops as so
import gtfs_kit as gk
from . import constants as cs
def get_duration(timestr1, timestr2, units="s"):
"""
Return the duration of the time period between the first and second
time string in the given units.
Allowable units are 's' (seconds), 'min' (minutes), 'h' (hours).
Assume ``timestr1 < timestr2``.
"""
valid_units = ["s", "min", "h"]
assert units in valid_units, "Units must be one of {!s}".format(valid_units)
duration = gk.timestr_to_seconds(timestr2) - gk.timestr_to_seconds(timestr1)
if units == "s":
result = duration
elif units == "min":
result = duration / 60
else:
result = duration / 3600
return result
def build_stop_ids(shape_id):
"""
Create a pair of stop IDs based on the given shape ID.
"""
return [cs.SEP.join(["stp", shape_id, str(i)]) for i in range(2)]
def build_stop_names(shape_id):
"""
Create a pair of stop names based on the given shape ID.
"""
return ["Stop {!s} on shape {!s} ".format(i, shape_id) for i in range(2)]
def build_agency(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
"""
return pd.DataFrame(
{
"agency_name": pfeed.meta["agency_name"].iat[0],
"agency_url": pfeed.meta["agency_url"].iat[0],
"agency_timezone": pfeed.meta["agency_timezone"].iat[0],
},
index=[0],
)
def build_calendar_etc(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``calendar.txt``
and a dictionary of the form <service window ID> -> <service ID>,
respectively.
"""
windows = pfeed.service_windows.copy()
# Create a service ID for each distinct days_active field and map the
# service windows to those service IDs
def get_sid(bitlist):
return "srv" + "".join([str(b) for b in bitlist])
weekdays = [
"monday",
"tuesday",
"wednesday",
"thursday",
"friday",
"saturday",
"sunday",
]
bitlists = set()
# Create a dictionary <service window ID> -> <service ID>
d = dict()
for index, window in windows.iterrows():
bitlist = window[weekdays].tolist()
d[window["service_window_id"]] = get_sid(bitlist)
bitlists.add(tuple(bitlist))
service_by_window = d
# Create calendar
start_date = pfeed.meta["start_date"].iat[0]
end_date = pfeed.meta["end_date"].iat[0]
F = []
for bitlist in bitlists:
F.append([get_sid(bitlist)] + list(bitlist) + [start_date, end_date])
calendar = pd.DataFrame(
F, columns=(["service_id"] + weekdays + ["start_date", "end_date"])
)
return calendar, service_by_window
def build_routes(pfeed):
"""
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
"""
f = (
pfeed.frequencies.filter(
["route_short_name", "route_long_name", "route_type", "shape_id"]
)
.drop_duplicates()
.copy()
)
# Create route IDs
f["route_id"] = "r" + f["route_short_name"].map(str)
del f["shape_id"]
return f
def build_shapes(pfeed):
"""
Given a ProtoFeed, return DataFrame representing ``shapes.txt``.
Only use shape IDs that occur in both ``pfeed.shapes`` and
``pfeed.frequencies``.
Create reversed shapes where routes traverse shapes in both
directions.
"""
rows = []
for shape, geom in pfeed.shapes[["shape_id", "geometry"]].itertuples(index=False):
#
if shape not in pfeed.shapes_extra:
continue
if pfeed.shapes_extra[shape] == 2:
# Add shape and its reverse
shid = shape + "-1"
new_rows = [[shid, i, lon, lat] for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
shid = shape + "-0"
new_rows = [
[shid, i, lon, lat]
for i, (lon, lat) in enumerate(reversed(geom.coords))
]
rows.extend(new_rows)
else:
# Add shape
shid = "{}{}{}".format(shape, cs.SEP, pfeed.shapes_extra[shape])
new_rows = [[shid, i, lon, lat] for i, (lon, lat) in enumerate(geom.coords)]
rows.extend(new_rows)
return pd.DataFrame(
rows, columns=["shape_id", "shape_pt_sequence", "shape_pt_lon", "shape_pt_lat"]
)
def build_stops(pfeed, shapes=None):
"""
Given a ProtoFeed, return a DataFrame representing ``stops.txt``.
If ``pfeed.stops`` is not ``None``, then return that.
Otherwise, require built shapes output by :func:`build_shapes`,
create one stop at the beginning (the first point) of each shape
and one at the end (the last point) of each shape,
and drop stops with duplicate coordinates.
Note that this will yield one stop for shapes that are loops.
"""
if pfeed.stops is not None:
stops = pfeed.stops.copy()
else:
if shapes is None:
raise ValueError("Must input shapes built by build_shapes()")
geo_shapes = gk.geometrize_shapes_0(shapes)
rows = []
for shape, geom in geo_shapes[["shape_id", "geometry"]].itertuples(index=False):
#
stop_ids = build_stop_ids(shape)
stop_names = build_stop_names(shape)
for i in range(2):
stop_id = stop_ids[i]
stop_name = stop_names[i]
stop_lon, stop_lat = geom.interpolate(i, normalized=True).coords[0]
rows.append([stop_id, stop_name, stop_lon, stop_lat])
stops = pd.DataFrame(
rows, columns=["stop_id", "stop_name", "stop_lon", "stop_lat"]
).drop_duplicates(subset=["stop_lon", "stop_lat"])
return stops
def build_trips(pfeed, routes, service_by_window):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
service-by-window (dictionary), return a DataFrame representing
``trips.txt``.
Trip IDs encode route, direction, and service window information
to make it easy to compute stop times later.
"""
# Put together the route and service data
routes = (
routes[["route_id", "route_short_name"]]
.merge(pfeed.frequencies)
.merge(pfeed.service_windows)
)
# For each row in routes, add trips at the specified frequency in
# the specified direction
rows = []
for index, row in routes.iterrows():
shape = row["shape_id"]
route = row["route_id"]
window = row["service_window_id"]
start, end = row[["start_time", "end_time"]].values
duration = get_duration(start, end, "h")
frequency = row["frequency"]
if not frequency:
# No trips during this service window
continue
# Rounding down occurs here if the duration isn't integral
# (bad input)
num_trips_per_direction = int(frequency * duration)
service = service_by_window[window]
direction = row["direction"]
if direction == 2:
directions = [0, 1]
else:
directions = [direction]
for direction in directions:
# Warning: this shape-ID-making logic needs to match that
# in ``build_shapes``
shid = "{}{}{}".format(shape, cs.SEP, direction)
rows.extend(
[
[
route,
cs.SEP.join(
["t", route, window, start, str(direction), str(i)]
),
direction,
shid,
service,
]
for i in range(num_trips_per_direction)
]
)
return pd.DataFrame(
rows, columns=["route_id", "trip_id", "direction_id", "shape_id", "service_id"]
)
def buffer_side(linestring, side, buffer):
"""
Given a Shapely LineString, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer size in the distance units of
the LineString, buffer the LineString on the given side by
the buffer size and return the resulting Shapely polygon.
"""
b = linestring.buffer(buffer, cap_style=2)
if side in ["left", "right"] and buffer > 0:
# Make a tiny buffer to split the normal-size buffer
# in half across the linestring
eps = min(buffer / 2, 0.001)
b0 = linestring.buffer(eps, cap_style=3)
diff = b.difference(b0)
polys = so.polygonize(diff)
# Buffer sides slightly to include original linestring
if side == "left":
b = list(polys)[0].buffer(1.1 * eps)
else:
b = list(polys)[-1].buffer(1.1 * eps)
return b
def get_nearby_stops(geo_stops, linestring, side, buffer=cs.BUFFER):
"""
Given a GeoDataFrame of stops, a Shapely LineString in the
same coordinate system, a side of the LineString
(string; 'left' = left hand side of LineString,
'right' = right hand side of LineString, or
'both' = both sides), and a buffer in the distance units of that
coordinate system, do the following.
Return a GeoDataFrame of all the stops that lie within
``buffer`` distance units to the ``side`` of the LineString.
"""
b = buffer_side(linestring, side, buffer)
# Collect stops
return geo_stops.loc[geo_stops.intersects(b)].copy()
def build_stop_times(pfeed, routes, shapes, stops, trips, buffer=cs.BUFFER):
"""
Given a ProtoFeed and its corresponding routes (DataFrame),
shapes (DataFrame), stops (DataFrame), trips (DataFrame),
return DataFrame representing ``stop_times.txt``.
Includes the optional ``shape_dist_traveled`` column.
Don't make stop times for trips with no nearby stops.
"""
# Get the table of trips and add frequency and service window details
routes = routes.filter(["route_id", "route_short_name"]).merge(
pfeed.frequencies.drop(["shape_id"], axis=1)
)
trips = trips.assign(
service_window_id=lambda x: x.trip_id.map(lambda y: y.split(cs.SEP)[2])
).merge(routes)
# Get the geometries of ``shapes`` and not ``pfeed.shapes``
geometry_by_shape = dict(
gk.geometrize_shapes_0(shapes, use_utm=True)
.filter(["shape_id", "geometry"])
.values
)
# Save on distance computations by memoizing
dist_by_stop_by_shape = {shape: {} for shape in geometry_by_shape}
def compute_stops_dists_times(geo_stops, linestring, shape, start_time, end_time):
"""
Given a GeoDataFrame of stops on one side of a given Shapely
LineString with given shape ID, compute distances and departure
times of a trip traversing the LineString from start to end
at the given start and end times (in seconds past midnight)
and stopping at the stops encountered along the way.
Do not assume that the stops are ordered by trip encounter.
Return three lists of the same length: the stop IDs in order
that the trip encounters them, the shape distances traveled
along distances at the stops, and the times the stops are
encountered, respectively.
"""
g = geo_stops.copy()
dists_and_stops = []
for i, stop in enumerate(g["stop_id"].values):
if stop in dist_by_stop_by_shape[shape]:
d = dist_by_stop_by_shape[shape][stop]
else:
d = gk.get_segment_length(linestring, g.geometry.iat[i]) / 1000 # km
dist_by_stop_by_shape[shape][stop] = d
dists_and_stops.append((d, stop))
dists, stops = zip(*sorted(dists_and_stops))
D = linestring.length / 1000
dists_are_reasonable = all([dist < D + 100 for dist in dists])
if not dists_are_reasonable:
# Assume equal distances between stops :-(
n = len(stops)
delta = D / (n - 1)
dists = [i * delta for i in range(n)]
# Compute times using distances, start and end stop times,
# and linear interpolation
t0, t1 = start_time, end_time
d0, d1 = dists[0], dists[-1]
# Interpolate
times = np.interp(dists, [d0, d1], [t0, t1])
return stops, dists, times
# Iterate through trips and set stop times based on stop ID
# and service window frequency.
# Remember that every trip has a valid shape ID.
# Gather stops geographically from ``stops``.
rows = []
geo_stops = gk.geometrize_stops_0(stops, use_utm=True)
# Look on the side of the traffic side of street for this timezone
side = cs.TRAFFIC_BY_TIMEZONE[pfeed.meta.agency_timezone.iat[0]]
for index, row in trips.iterrows():
shape = row["shape_id"]
geom = geometry_by_shape[shape]
stops = get_nearby_stops(geo_stops, geom, side, buffer=buffer)
# Don't make stop times for trips without nearby stops
if stops.empty:
continue
length = geom.length / 1000 # km
speed = row["speed"] # km/h
duration = int((length / speed) * 3600) # seconds
frequency = row["frequency"]
if not frequency:
# No stop times for this trip/frequency combo
continue
headway = 3600 / frequency # seconds
trip = row["trip_id"]
__, route, window, base_timestr, direction, i = trip.split(cs.SEP)
direction = int(direction)
base_time = gk.timestr_to_seconds(base_timestr)
start_time = base_time + headway * int(i)
end_time = start_time + duration
stops, dists, times = compute_stops_dists_times(
stops, geom, shape, start_time, end_time
)
new_rows = [
[trip, stop, j, time, time, dist]
for j, (stop, time, dist) in enumerate(zip(stops, times, dists))
]
rows.extend(new_rows)
g = pd.DataFrame(
rows,
columns=[
"trip_id",
"stop_id",
"stop_sequence",
"arrival_time",
"departure_time",
"shape_dist_traveled",
],
)
# Convert seconds back to time strings
g[["arrival_time", "departure_time"]] = g[
["arrival_time", "departure_time"]
].applymap(lambda x: gk.timestr_to_seconds(x, inverse=True))
return g
def build_feed(pfeed, buffer=cs.BUFFER):
# Create Feed tables
agency = build_agency(pfeed)
calendar, service_by_window = build_calendar_etc(pfeed)
routes = build_routes(pfeed)
shapes = build_shapes(pfeed)
stops = build_stops(pfeed, shapes)
trips = build_trips(pfeed, routes, service_by_window)
stop_times = build_stop_times(pfeed, routes, shapes, stops, trips, buffer=buffer)
# Create Feed and remove unused stops etc.
return gk.Feed(
agency=agency,
calendar=calendar,
routes=routes,
shapes=shapes,
stops=stops,
stop_times=stop_times,
trips=trips,
dist_units="km",
).drop_zombies()
|
{
"content_hash": "6ac887761291fc2d83ff85b2dae21c3f",
"timestamp": "",
"source": "github",
"line_count": 442,
"max_line_length": 88,
"avg_line_length": 34.73076923076923,
"alnum_prop": 0.5879747247736303,
"repo_name": "araichev/make_gtfs",
"id": "9b8ce04b11a9e2f9cd4f998ffb31ac854ae1b0f3",
"size": "15351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "make_gtfs/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "271144"
},
{
"name": "Python",
"bytes": "27805"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.http import HttpResponse
from search import do_search
from time import clock
PAGE_SIZE = 10
def home(request):
return render(request, 'home.html')
def search(request):
if 'q' in request.GET:
q = request.GET['q']
if 'page' in request.GET:
page = request.GET['page']
else:
page = 1
start = clock()
search_res = do_search(q, page)
total_hits = search_res["total"]
results = [each["_source"] for each in search_res["hits"]]
end = clock()
return render(request, 'res_search.html', {'results': results,
'query': q,
'count': total_hits,
'time': end - start,
'page': page,
'total_page': total_hits / PAGE_SIZE,
'host': request.META['SERVER_NAME'],
'port': request.META['SERVER_PORT'],
'nextpage': int(page) + 1})
else:
message = 'You submitted an empty form.'
return HttpResponse(message)
|
{
"content_hash": "79be2e5ad0d839ec1c43baf4dc2a23d9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 88,
"avg_line_length": 36.945945945945944,
"alnum_prop": 0.4272128749085589,
"repo_name": "Richardlihui/xundao",
"id": "a8bf17635b39be0d1a81146fa414e06b4b576e3f",
"size": "1382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elastic/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3193"
},
{
"name": "HTML",
"bytes": "5655"
},
{
"name": "Python",
"bytes": "6906"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
#At a minimum you will need username,
default_shib_attributes = {
"Shibboleth-eppn": (True, "username"),
}
SHIB_ATTRIBUTE_MAP = getattr(settings, 'SHIBBOLETH_ATTRIBUTE_MAP', default_shib_attributes)
#Set to true if you are testing and want to insert sample headers.
SHIB_MOCK_HEADERS = getattr(settings, 'SHIBBOLETH_MOCK_HEADERS', False)
LOGIN_URL = getattr(settings, 'LOGIN_URL', None)
if not LOGIN_URL:
raise ImproperlyConfigured("A LOGIN_URL is required. Specify in settings.py")
#Optional logout parameters
#This should look like: https://sso.school.edu/idp/logout.jsp?return=%s
#The return url variable will be replaced in the LogoutView.
LOGOUT_URL = getattr(settings, 'SHIBBOLETH_LOGOUT_URL', None)
#LOGOUT_REDIRECT_URL specifies a default logout page that will always be used when
#users logout from Shibboleth.
LOGOUT_REDIRECT_URL = getattr(settings, 'SHIBBOLETH_LOGOUT_REDIRECT_URL', None)
#Name of key. Probably no need to change this.
LOGOUT_SESSION_KEY = getattr(settings, 'SHIBBOLETH_FORCE_REAUTH_SESSION_KEY', 'shib_force_reauth')
|
{
"content_hash": "492ada3e0b42ddbf96b3f55116a95746",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 98,
"avg_line_length": 40.827586206896555,
"alnum_prop": 0.7508445945945946,
"repo_name": "UCL-RITS/django-shibboleth-remoteuser",
"id": "e64b19fc1a1a86408bf6d1f5186b6ba7300bb958",
"size": "1184",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "shibboleth/app_settings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "371"
},
{
"name": "Python",
"bytes": "23786"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class DataDisk(Model):
"""Describes a data disk.
:param lun: The logical unit number.
:type lun: int
:param name: The disk name.
:type name: str
:param vhd: The virtual hard disk.
:type vhd: :class:`VirtualHardDisk
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualHardDisk>`
:param image: The source user image virtual hard disk. This virtual hard
disk will be copied before using it to attach to the virtual machine. If
SourceImage is provided, the destination virtual hard disk must not exist.
:type image: :class:`VirtualHardDisk
<azure.mgmt.compute.compute.v2016_03_30.models.VirtualHardDisk>`
:param caching: The caching type. Possible values include: 'None',
'ReadOnly', 'ReadWrite'
:type caching: str or :class:`CachingTypes
<azure.mgmt.compute.compute.v2016_03_30.models.CachingTypes>`
:param create_option: The create option. Possible values include:
'fromImage', 'empty', 'attach'
:type create_option: str or :class:`DiskCreateOptionTypes
<azure.mgmt.compute.compute.v2016_03_30.models.DiskCreateOptionTypes>`
:param disk_size_gb: The initial disk size in GB for blank data disks, and
the new desired size for resizing existing OS and data disks.
:type disk_size_gb: int
"""
_validation = {
'lun': {'required': True},
'name': {'required': True},
'vhd': {'required': True},
'create_option': {'required': True},
}
_attribute_map = {
'lun': {'key': 'lun', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'vhd': {'key': 'vhd', 'type': 'VirtualHardDisk'},
'image': {'key': 'image', 'type': 'VirtualHardDisk'},
'caching': {'key': 'caching', 'type': 'CachingTypes'},
'create_option': {'key': 'createOption', 'type': 'DiskCreateOptionTypes'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
}
def __init__(self, lun, name, vhd, create_option, image=None, caching=None, disk_size_gb=None):
self.lun = lun
self.name = name
self.vhd = vhd
self.image = image
self.caching = caching
self.create_option = create_option
self.disk_size_gb = disk_size_gb
|
{
"content_hash": "f897788af64dc70c2ae9300bcaded9b3",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 99,
"avg_line_length": 40.80357142857143,
"alnum_prop": 0.6315098468271335,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "e5c703e1bdc6de65fcc83b6f908a241b88240986",
"size": "2759",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-compute/azure/mgmt/compute/compute/v2016_03_30/models/data_disk.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
}
|
import unittest
import numpy as np
from .. import permutation_test as pt
from nipy.algorithms.graph import wgraph_from_3d_grid
nperms = 2
ndraws = 10
def make_data(n=10,mask_shape=(10,10,10),axis=0):
mask = np.zeros(mask_shape,int)
XYZ = np.array(np.where(mask==0))
p = XYZ.shape[1]
data = np.random.randn(n,p)
vardata = np.random.randn(n,p)**2
if axis==1:
data = data.T
vardata = vardata.T
return data, vardata, XYZ
class test_permutation_test(unittest.TestCase):
def test_onesample(self):
data, vardata, XYZ = make_data()
# rfx calibration
P = pt.permutation_test_onesample(data, XYZ, ndraws=ndraws)
c = [(P.random_Tvalues[P.ndraws*(0.95)],None), (
P.random_Tvalues[P.ndraws*(0.5)], 18.)]
r = np.ones(data.shape[1],int)
r[data.shape[1]/2:] *= 10
#p_values, cluster_results, region_results = P.calibrate(nperms=100, clusters=c, regions=[r])
# mfx calibration
P = pt.permutation_test_onesample(
data, XYZ, vardata=vardata, stat_id="student_mfx", ndraws=ndraws)
p_values, cluster_results, region_results = P.calibrate(
nperms=nperms, clusters=c, regions=[r])
def test_onesample_graph(self):
data, vardata, XYZ = make_data()
G = wgraph_from_3d_grid(XYZ.T)
# rfx calibration
P = pt.permutation_test_onesample_graph(data, G, ndraws=ndraws)
c = [(P.random_Tvalues[P.ndraws*(0.95)],None)]
r = np.ones(data.shape[1],int)
r[data.shape[1]/2:] *= 10
#p_values, cluster_results, region_results = P.calibrate(nperms=100, clusters=c, regions=[r])
# mfx calibration
P = pt.permutation_test_onesample_graph(
data, G, vardata=vardata, stat_id="student_mfx", ndraws=ndraws)
p_values, cluster_results, region_results = P.calibrate(
nperms=nperms, clusters=c, regions=[r])
def test_twosample(self):
data, vardata, XYZ = make_data(n=20)
data1, vardata1, data2, vardata2 = (
data[:10], vardata[:10], data[10:],vardata[10:])
# rfx calibration
P = pt.permutation_test_twosample(data1, data2, XYZ, ndraws=ndraws)
c = [(P.random_Tvalues[P.ndraws*(0.95)],None),(P.random_Tvalues[P.ndraws*(0.5)],10)]
r = [np.zeros(data.shape[1])]
# Assuming our data.shape[1] is divisible by 2
r[data.shape[1]//2:] *= 10
#p_values, cluster_results, region_results=P.calibrate(nperms=100, clusters=c, regions=r)
# mfx calibration
P = pt.permutation_test_twosample(data1, data2, XYZ, vardata1=vardata1, vardata2=vardata2, stat_id="student_mfx", ndraws=ndraws)
p_values, cluster_results, region_results = P.calibrate(nperms=nperms, clusters=c, regions=r)
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "ad18e7a2a72799c24e6614a635845241",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 136,
"avg_line_length": 40.309859154929576,
"alnum_prop": 0.6149545772187281,
"repo_name": "bthirion/nipy",
"id": "da57e345515522c6bb12e51deeb3fd143843d217",
"size": "2976",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nipy/labs/group/tests/test_permutation_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6381240"
},
{
"name": "C++",
"bytes": "6189"
},
{
"name": "CSS",
"bytes": "8170"
},
{
"name": "M",
"bytes": "560"
},
{
"name": "Matlab",
"bytes": "4948"
},
{
"name": "Python",
"bytes": "3068962"
},
{
"name": "TeX",
"bytes": "238"
}
],
"symlink_target": ""
}
|
import time
def find_closest(look_for, target_data):
def whats_the_difference(first, second):
if first == second:
return(0)
elif first > second:
return(first - second)
else:
return(second - first)
max_diff = 9999999
for each_thing in target_data:
diff = whats_the_difference(each_thing, look_for)
if diff == 0:
found_it = each_thing
break
elif diff < max_diff:
max_diff = diff
found_it = each_thing
return(found_it)
def time2secs(time_string):
(hours, mins, secs) = time_string.split(':')
seconds = int(secs) + (int(mins)*60) + (int(hours)*60*60)
return(seconds)
def secs2time(seconds):
return(time.strftime('%H:%M:%S', time.gmtime(seconds)))
|
{
"content_hash": "72f6cd35c541abc03f1f682d4accae5d",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 61,
"avg_line_length": 27.066666666666666,
"alnum_prop": 0.5665024630541872,
"repo_name": "tdean1995/HFPythonSandbox",
"id": "f7f3d9fa4822c626cc3ed2ceaac1567d407ca197",
"size": "813",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "hfpython_code/hfpy_code/chapter11/find_it.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5893"
},
{
"name": "Python",
"bytes": "103816"
}
],
"symlink_target": ""
}
|
import matplotlib.pyplot as plt
import pandas as pd
import itertools as it
import numpy as np
import pymc3 as pm
import sys
from pymc3.variational.callbacks import CheckParametersConvergence
def FitMyModel(trainDM,PredDM):
with pm.Model() as model:
# partition dataframes df
Ydf = trainDM[0]
TXdf = trainDM[1]
PXdf = PredDM
## Parameters for linear predictor
#b0 = pm.Normal('b0',mu=0,sd=10)
#dum_names = filter(lambda col : str(col).startswith('inegiv5name'),TXdf)
#dumsdf = TXdf[dum_names]
#dumshape = dumscols.shape
#coordsdf = TXdf[['Longitude','Latitude']]
# Create vectors for dumi vars
#drvs = map(lambda col : pm.Normal(col,mu=0,sd=1.5),dum_names)
## Create theano vector
dimX = len(TXdf.columns)
b = pm.Normal('b',mu=0,sd=1.5,shape=dimX)
#mk = pm.math.matrix_dot(TXdf.values,b.transpose())
## The latent function
x_index = TXdf.columns.get_loc("Longitude")
y_index = TXdf.columns.get_loc("Latitude")
## Building the covariance structure
tau = pm.HalfNormal('tau',sd=10)
sigma = pm.HalfNormal('sigma',sd=10)
#phi = pm.Uniform('phi',0,15)
phi = pm.HalfNormal('phi',sd=6)
Tau = pm.gp.cov.Constant(tau)
cov = (sigma * pm.gp.cov.Matern32(2,phi,active_dims=[x_index,y_index])) + Tau
mean_f = pm.gp.mean.Linear(coeffs=b)
gp = pm.gp.Latent(mean_func=mean_f,cov_func=cov)
f = gp.prior("latent_field", X=TXdf.values,reparameterize=False)
yy = pm.Bernoulli("yy",logit_p=f,observed=Ydf.values)
#trace = pm.fit(method='advi', callbacks=[CheckParametersConvergence()],n=15000)
trace = pm.sample(15,init='adapt_diag')
#trace = trace.sample(draws=5000)
# Remove any column that doesnt appear in the training data
ValidPreds = PredDM[TXdf.columns]
PredX = ValidPreds.values
f_star = gp.conditional("f_star", PredX)
pred_samples = pm.sample_ppc(trace, vars=[f_star], samples=100)
return pred_samples,trace
sys.path.append('/home/hpc/28/escamill/spystats')
from spystats import utilities as ut
#train_path = '/storage/users/escamill/presence-only-model/input/train'
train_path = '/mnt/data1/outputs/presence_only_models/data/burseras'
train_dataset = ut.loadDataset(train_path)
#train_path = '/outputs/presence_only_models/data/root'
#train_dataset = ut.loadDataset(train_path)
## Predictors
#pred_path = '/storage/users/escamill/presence-only-model/input/pred'
pred_path = '/mnt/data1/outputs/presence_only_models/predictors/datasetp2'
pred_dataset = ut.loadDataset(pred_path)
### PATCH, the thing is taking backwards the order of the lists of files, because of the name
#pred_dataset.reverse()
prediction_dataset_dic= map(lambda p : ut.preparePredictors(p),pred_dataset)
i = 4
datatrain = list(train_dataset)[i]
#Y = datatrain.Burseraceae
#Y = datatrain.Burseraceae
datapred = list(prediction_dataset_dic)[i]
## Assign categorical values
datatrain.name = datatrain.name.astype('category')
datapred['full'].name = datapred['full'].name.astype('category')
datapred['clean'].name = datapred['clean'].name.astype('category')
from patsy import dmatrices,dmatrix
TM = dmatrices('LUCA ~ Longitude + Latitude + Q("Dist.to.road_m") + Population_m ',datatrain,return_type="dataframe")
#TM = dmatrices('Burseraceae ~ Longitude + Latitude + DistanceToRoadMex_mean + WorldPopLatam2010_mean + inegiv5name',datatrain)
PM = dmatrix('Longitude + Latitude + Q("Dist.to.road_m") + Population_m',datapred['clean'],return_type='dataframe')
#PM = dmatrix('Longitude + Latitude + Q("Dist.to.road_m") + Population_m + name',datapred['clean'])
|
{
"content_hash": "28587e56425da9a0c066abcc749a1922",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 127,
"avg_line_length": 37.277227722772274,
"alnum_prop": 0.6735723771580345,
"repo_name": "molgor/spystats",
"id": "d96ddf09d9741fc42b9770b4c400d667b49f1b76",
"size": "3822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "biospytial_models/testmodel.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "1026"
},
{
"name": "Dockerfile",
"bytes": "905"
},
{
"name": "Jupyter Notebook",
"bytes": "62905486"
},
{
"name": "Python",
"bytes": "135774"
},
{
"name": "Shell",
"bytes": "705"
}
],
"symlink_target": ""
}
|
import argparse
from pyminutiaeviewer import gui
parser = argparse.ArgumentParser(description='Py Minutiae Viewer')
parser.add_argument('-d', '--draw-minutiae', nargs='+', dest='draw_minutiae',
metavar=('FINGERPRINT_IMAGE', 'MINUTIAE_FILE'),
help='Draws minutiae on to the FINGERPRINT_IMAGE, needs the output-image flag to be set. '
'If no MINUTIAE_FILE is set then a .min file with the same names as the FINGERPRINT_IMAGE '
'is assumed.')
parser.add_argument('-o', '--output-image', nargs=1, dest='output_image',
metavar='OUTPUT_IMAGE',
help='The location to save the output image.')
args = parser.parse_args()
if args.draw_minutiae is not None:
if args.output_image is None:
parser.error('Missing output image, set --output-image.')
pass
else:
gui.Root().mainloop()
|
{
"content_hash": "f40ac9984cbf5f4671fa458f88b69324",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 116,
"avg_line_length": 41.90909090909091,
"alnum_prop": 0.6193058568329718,
"repo_name": "IgniparousTempest/py-minutiae-viewer",
"id": "a96f3a872ef518140ef6869713d017010eef90f5",
"size": "922",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py-minutiae-viewer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "48308"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
from django.core.checks import register, Tags
from django.utils.translation import gettext_lazy as _
class TourismConfig(AppConfig):
name = 'geotrek.tourism'
verbose_name = _("Tourism")
def ready(self):
from .forms import TouristicContentForm, TouristicEventForm
def check_hidden_fields_settings(app_configs, **kwargs):
# Check all Forms hidden fields settings
errors = TouristicContentForm.check_fields_to_hide()
errors.extend(TouristicEventForm.check_fields_to_hide())
return errors
register(check_hidden_fields_settings, Tags.security)
|
{
"content_hash": "ac2e349d3cfabbf64ad86baef9b7d875",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 34.8421052631579,
"alnum_prop": 0.7009063444108762,
"repo_name": "makinacorpus/Geotrek",
"id": "dbd5dd2beedfe63009b1aec63f7edc466a4b9c28",
"size": "662",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geotrek/tourism/apps.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "30638"
},
{
"name": "HTML",
"bytes": "141008"
},
{
"name": "JavaScript",
"bytes": "184508"
},
{
"name": "Makefile",
"bytes": "4170"
},
{
"name": "PLpgSQL",
"bytes": "85546"
},
{
"name": "Python",
"bytes": "2768434"
},
{
"name": "Shell",
"bytes": "18090"
}
],
"symlink_target": ""
}
|
import argparse
import re
import os
import shutil
import sqlite3
import subprocess
import sys
import tempfile
import zipfile
def extract_namespaces(source_path, base_namespace='', depth=1):
namespaces = set()
for entry in os.listdir(source_path):
full_path = os.path.join(source_path, entry)
if os.path.isdir(full_path):
new_namespace = '{0}{1}'.format(base_namespace, entry)
if depth > 1:
namespaces.add(new_namespace)
sub_namespaces = extract_namespaces(
full_path, base_namespace=new_namespace + '.', depth=depth + 1
)
namespaces = namespaces.union(sub_namespaces)
return namespaces
def convert_to_jar(dex2jar_path, directory):
dex_path = os.path.join(directory, 'classes.dex')
jar_path = os.path.join(directory, 'classes.jar')
cmd = [dex2jar_path, '-e', '/dev/null', '-o', jar_path, dex_path]
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, timeout=60)
if b'Detail Error Information in File' in output:
# Workaround for dex2jar not properly setting a return code on failure
raise subprocess.CalledProcessError(1, cmd)
return jar_path
def process_apk(config, apk_path):
with tempfile.TemporaryDirectory(dir=config.working_directory) as tmp_dir:
with zipfile.ZipFile(apk_path) as apk_zip:
print('==> Extracting classes.dex... ', end='')
apk_zip.extract('classes.dex', path=tmp_dir)
print('done.')
try:
print('==> Converting classes.dex to classes.jar...', end='')
jar_path = convert_to_jar(config.dex2jar_path, tmp_dir)
print('done.')
except (subprocess.CalledProcessError, subprocess.TimeoutExpired):
print('failed.\n[ERROR] Failed to convert dex into jar.')
return []
with zipfile.ZipFile(jar_path) as jar_zip:
print('==> Extracting JAR classes... ', end='')
jar_zip.extractall(path=tmp_dir)
print('done.')
return extract_namespaces(tmp_dir)
def process_apks(config):
apk_regex = re.compile('^([^-]+)-?(\d*)?-(\d{4}_\d{2}_\d{2})\.apk$',
re.IGNORECASE)
items = os.listdir(config.path)
total = len(items)
current = 1
for name in items:
apk_path = os.path.join(config.path, name)
print(':: Processing {0} of {1}: '.format(current, total), end='')
match = apk_regex.match(name)
if os.path.isfile(apk_path) and match:
insert_apk(config, match)
print(match.group(1))
namespaces = process_apk(config, apk_path)
insert_namespaces(config, match.group(1), namespaces)
else:
print('{0} is not an APK'.format(name))
current += 1
def insert_namespaces(config, apk_id, namespaces):
cur = config.db.cursor()
# TODO: use executemany instead of looping with execute
for namespace in namespaces:
cur.execute('''
INSERT INTO namespaces
(apk_id, body)
VALUES(?, ?)
''', (apk_id, namespace))
cur.close()
def insert_apk(config, match):
cur = config.db.cursor()
cur.execute('''
INSERT INTO apks
(id, date, filename)
VALUES(?, ?, ?)
''', (match.group(1), match.group(3), match.group(0)))
cur.close()
def init_database(config):
config.db = sqlite3.connect(config.db_filename)
cur = config.db.cursor()
cur.execute('''
CREATE TABLE IF NOT EXISTS apks
(
id text,
date text,
filename text
)
''')
cur.execute('''
CREATE TABLE IF NOT EXISTS namespaces
(
apk_id text,
body text,
FOREIGN KEY(apk_id) REFERENCES apks(id)
)
''')
cur.close()
err_msg = '{0} is not a valid path'
def dir(argument):
if not os.path.exists(argument):
raise argparse.ArgumentTypeError(err_msg.format(argument))
return argument
def file(argument):
path = os.path.abspath(argument)
if not os.path.exists(path):
if not os.path.exists(os.path.dirname(path)):
raise argparse.ArgumentTypeError(err_msg.format(argument))
return argument
def executable(argument):
if not shutil.which(argument):
if not os.path.exists(argument):
raise argparse.ArgumentTypeError(err_msg.format(argument))
return argument
def parse_arguments():
parser = argparse.ArgumentParser(
description='Script to unpackage, extract, and analyze a directory ' \
'of APKs in order to determine the usage of namespaces.'
)
parser.add_argument(
'path',
type=dir,
help='Path to the directory with the APKs.'
)
parser.add_argument(
'--database',
dest='db_filename',
default='results.sqlite',
metavar='PATH',
type=file,
help='Name of the resulting SQLite database filename.'
)
parser.add_argument(
'--dex2jar',
dest='dex2jar_path',
default='dex2jar',
metavar='PATH',
type=executable,
help='Path to the `dex2jar\' executable.'
)
parser.add_argument(
'--working-dir',
dest='working_directory',
default='/tmp',
metavar='PATH',
type=dir,
help='Path to the working directory in which to extract and analyze ' \
'each APK.'
)
if len(sys.argv) < 2:
parser.print_help()
sys.exit(1)
return parser.parse_args()
def main():
config = parse_arguments()
init_database(config)
process_apks(config)
config.db.commit()
config.db.close()
if __name__ == '__main__':
main()
|
{
"content_hash": "c265acc6295a58251c75eb5b54eab66e",
"timestamp": "",
"source": "github",
"line_count": 209,
"max_line_length": 79,
"avg_line_length": 27.60287081339713,
"alnum_prop": 0.5917836713468538,
"repo_name": "craigcabrey/apk-namespace-analyzer",
"id": "efec82c624f844f4f9d2a95fdaec710bdde77c3e",
"size": "5793",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "namespace-analyzer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5793"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
autodoc_member_order = 'bysource'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Dropbox'
copyright = u'2011, Dropbox, Inc.'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = None
# The full version, including alpha/beta/rc tags.
# release = None
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'dropboxdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'dropbox.tex', u'dropbox Documentation',
u'Dropbox, Inc.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'dropbox', u'dropbox Documentation',
[u'Dropbox, Inc.'], 1)
]
|
{
"content_hash": "711ecdccf587eae561af1bde73b7c121",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 80,
"avg_line_length": 32.57142857142857,
"alnum_prop": 0.7055353901996371,
"repo_name": "architv/dropbox-api-dev",
"id": "c455e0eda0f2158685b985eb589a1a24e0c1b999",
"size": "7030",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "17354"
},
{
"name": "JavaScript",
"bytes": "54458"
},
{
"name": "Python",
"bytes": "147030"
}
],
"symlink_target": ""
}
|
"""Beta API server implementation."""
import threading
from grpc._links import service
from grpc.beta import interfaces
from grpc.framework.core import implementations as _core_implementations
from grpc.framework.crust import implementations as _crust_implementations
from grpc.framework.foundation import logging_pool
from grpc.framework.interfaces.base import base
from grpc.framework.interfaces.links import utilities
_DEFAULT_POOL_SIZE = 8
_DEFAULT_TIMEOUT = 300
_MAXIMUM_TIMEOUT = 24 * 60 * 60
def _set_event():
event = threading.Event()
event.set()
return event
class _GRPCServicer(base.Servicer):
def __init__(self, delegate):
self._delegate = delegate
def service(self, group, method, context, output_operator):
try:
return self._delegate.service(group, method, context, output_operator)
except base.NoSuchMethodError as e:
if e.code is None and e.details is None:
raise base.NoSuchMethodError(
interfaces.StatusCode.UNIMPLEMENTED,
'Method "%s" of service "%s" not implemented!' % (method, group))
else:
raise
class _Server(interfaces.Server):
def __init__(
self, implementations, multi_implementation, pool, pool_size,
default_timeout, maximum_timeout, grpc_link):
self._lock = threading.Lock()
self._implementations = implementations
self._multi_implementation = multi_implementation
self._customer_pool = pool
self._pool_size = pool_size
self._default_timeout = default_timeout
self._maximum_timeout = maximum_timeout
self._grpc_link = grpc_link
self._end_link = None
self._stop_events = None
self._pool = None
def _start(self):
with self._lock:
if self._end_link is not None:
raise ValueError('Cannot start already-started server!')
if self._customer_pool is None:
self._pool = logging_pool.pool(self._pool_size)
assembly_pool = self._pool
else:
assembly_pool = self._customer_pool
servicer = _GRPCServicer(
_crust_implementations.servicer(
self._implementations, self._multi_implementation, assembly_pool))
self._end_link = _core_implementations.service_end_link(
servicer, self._default_timeout, self._maximum_timeout)
self._grpc_link.join_link(self._end_link)
self._end_link.join_link(self._grpc_link)
self._grpc_link.start()
self._end_link.start()
def _dissociate_links_and_shut_down_pool(self):
self._grpc_link.end_stop()
self._grpc_link.join_link(utilities.NULL_LINK)
self._end_link.join_link(utilities.NULL_LINK)
self._end_link = None
if self._pool is not None:
self._pool.shutdown(wait=True)
self._pool = None
def _stop_stopping(self):
self._dissociate_links_and_shut_down_pool()
for stop_event in self._stop_events:
stop_event.set()
self._stop_events = None
def _stop_started(self):
self._grpc_link.begin_stop()
self._end_link.stop(0).wait()
self._dissociate_links_and_shut_down_pool()
def _foreign_thread_stop(self, end_stop_event, stop_events):
end_stop_event.wait()
with self._lock:
if self._stop_events is stop_events:
self._stop_stopping()
def _schedule_stop(self, grace):
with self._lock:
if self._end_link is None:
return _set_event()
server_stop_event = threading.Event()
if self._stop_events is None:
self._stop_events = [server_stop_event]
self._grpc_link.begin_stop()
else:
self._stop_events.append(server_stop_event)
end_stop_event = self._end_link.stop(grace)
end_stop_thread = threading.Thread(
target=self._foreign_thread_stop,
args=(end_stop_event, self._stop_events))
end_stop_thread.start()
return server_stop_event
def _stop_now(self):
with self._lock:
if self._end_link is not None:
if self._stop_events is None:
self._stop_started()
else:
self._stop_stopping()
def add_insecure_port(self, address):
with self._lock:
if self._end_link is None:
return self._grpc_link.add_port(address, None)
else:
raise ValueError('Can\'t add port to serving server!')
def add_secure_port(self, address, server_credentials):
with self._lock:
if self._end_link is None:
return self._grpc_link.add_port(
address, server_credentials._low_credentials) # pylint: disable=protected-access
else:
raise ValueError('Can\'t add port to serving server!')
def start(self):
self._start()
def stop(self, grace):
if 0 < grace:
return self._schedule_stop(grace)
else:
self._stop_now()
return _set_event()
def __enter__(self):
self._start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._stop_now()
return False
def __del__(self):
self._stop_now()
def server(
implementations, multi_implementation, request_deserializers,
response_serializers, thread_pool, thread_pool_size, default_timeout,
maximum_timeout):
grpc_link = service.service_link(request_deserializers, response_serializers)
return _Server(
implementations, multi_implementation, thread_pool,
_DEFAULT_POOL_SIZE if thread_pool_size is None else thread_pool_size,
_DEFAULT_TIMEOUT if default_timeout is None else default_timeout,
_MAXIMUM_TIMEOUT if maximum_timeout is None else maximum_timeout,
grpc_link)
|
{
"content_hash": "095a18936e241800916d7a774665872d",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 93,
"avg_line_length": 30.677777777777777,
"alnum_prop": 0.6608113002535313,
"repo_name": "miselin/grpc",
"id": "eb0aadb42f975a894ae1633f6d215910ce220967",
"size": "7051",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/python/grpcio/grpc/beta/_server.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "12656"
},
{
"name": "C",
"bytes": "4911267"
},
{
"name": "C#",
"bytes": "1107955"
},
{
"name": "C++",
"bytes": "1361012"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "282482"
},
{
"name": "M4",
"bytes": "34062"
},
{
"name": "Makefile",
"bytes": "591839"
},
{
"name": "Objective-C",
"bytes": "274724"
},
{
"name": "PHP",
"bytes": "128996"
},
{
"name": "Protocol Buffer",
"bytes": "102293"
},
{
"name": "Python",
"bytes": "1586857"
},
{
"name": "Ruby",
"bytes": "498986"
},
{
"name": "Shell",
"bytes": "49986"
},
{
"name": "Swift",
"bytes": "5279"
}
],
"symlink_target": ""
}
|
import numpy as np
import sys
sys.path.append("src")
import yamcmcpp
class Theta(yamcmcpp.Parameter):
def __init__(self, track, name, temperature=1.0):
yamcmcpp.Parameter.__init__(self, track, name, temperature)
self._data = None
self._ndata = 0
def setData(self, data):
self._data = data
self._ndata = len(data)
def setPrior(self, pmean, pvar, palpha, pbeta):
self._pmean = pmean
self._pvar = pvar
self._palpha = palpha
self._pbeta = pbeta
def StartingValue(self):
return np.array(np.mean(self._data), np.variance(self._variance))
def test_ensemble_mcmc():
RandGen = yamcmcpp.RandomGenerator()
true_mean = 3.4
true_var = 2.3
nsample = 1000
simulated_data = []
for i in range(nsample):
simulated_data.append(RandGen.normal(true_mean, np.sqrt(true_var)))
simulated_data = np.array(simulated_data)
print simulated_data
theta = Theta(True, "theta = (mu,sigsqr)")
theta.setData(simulated_data);
prior_mean = 0.0
prior_variance = 1.0e6
prior_alpha = 0.0
prior_beta = 0.0
theta.setPrior(prior_mean, prior_variance, prior_alpha, prior_beta)
print theta
covariance = np.identity((2))
covariance *= 1e-2
print covariance
thetaProp = yamcmcpp.NormalProposal(1.0)
print thetaProp
burnin = 100
thin = 1
normalSampler = yamcmcpp.Sampler(nsample, burnin, thin)
print normalSampler
if __name__ == "__main__":
test_ensemble_mcmc()
|
{
"content_hash": "f7b2917d580e2d9e5b2998fe61f81e9c",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 75,
"avg_line_length": 25.60655737704918,
"alnum_prop": 0.6190781049935979,
"repo_name": "brandonckelly/yamcmcpp",
"id": "7b4049f1bdcbd1f614bfcd4cf5709f652ed5ccac",
"size": "1562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yamcmc++/yamcmc_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "121546"
},
{
"name": "Python",
"bytes": "22214"
}
],
"symlink_target": ""
}
|
""" Dummy Module
"""
from zope.interface import moduleProvides
from zope.interface.tests.idummy import IDummyModule
moduleProvides(IDummyModule)
def bar(baz):
# Note: no 'self', because the module provides the interface directly.
pass
|
{
"content_hash": "5c425e2a832e04a6f2fc0e128b5c7b6f",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 75,
"avg_line_length": 25.6,
"alnum_prop": 0.73046875,
"repo_name": "timkrentz/SunTracker",
"id": "29a2a18f3c63cf9740d61b3056976c7757ddf567",
"size": "911",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "IMU/VTK-6.2.0/ThirdParty/ZopeInterface/zope/interface/tests/dummy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Arduino",
"bytes": "185699"
},
{
"name": "Assembly",
"bytes": "38582"
},
{
"name": "Batchfile",
"bytes": "110"
},
{
"name": "C",
"bytes": "48362836"
},
{
"name": "C++",
"bytes": "70478135"
},
{
"name": "CMake",
"bytes": "1755036"
},
{
"name": "CSS",
"bytes": "147795"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "190912"
},
{
"name": "Groff",
"bytes": "66799"
},
{
"name": "HTML",
"bytes": "295090"
},
{
"name": "Java",
"bytes": "203238"
},
{
"name": "JavaScript",
"bytes": "1146098"
},
{
"name": "Lex",
"bytes": "47145"
},
{
"name": "Makefile",
"bytes": "5461"
},
{
"name": "Objective-C",
"bytes": "74727"
},
{
"name": "Objective-C++",
"bytes": "265817"
},
{
"name": "Pascal",
"bytes": "3407"
},
{
"name": "Perl",
"bytes": "178176"
},
{
"name": "Prolog",
"bytes": "4556"
},
{
"name": "Python",
"bytes": "16497901"
},
{
"name": "Shell",
"bytes": "48835"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Tcl",
"bytes": "1955829"
},
{
"name": "Yacc",
"bytes": "180651"
}
],
"symlink_target": ""
}
|
import grpc
import csv
#from tables import Table
from enum import IntEnum
from dynagatewaytypes import datatypes_pb2
from dynagatewaytypes import enums_pb2
from dynagatewaytypes import general_types_pb2
from dynagatewaytypes import authentication_pb2_grpc
from dynagatewaytypes import authentication_pb2
from dynagatewaytypes import action_pb2_grpc
from dynagatewaytypes import action_pb2
from dynagatewaytypes import topology_pb2_grpc
from dynagatewaytypes import topology_pb2
from dynagatewaytypes import label_pb2_grpc
from dynagatewaytypes import label_pb2
from dynagatewaytypes import instance_pb2_grpc
from dynagatewaytypes import instance_pb2
from dynagatewaytypes import query_pb2_grpc
from dynagatewaytypes import query_pb2
from dynagatewaytypes import networkquery_pb2_grpc
from dynagatewaytypes import networkquery_pb2
class Service(IntEnum):
ACTION_SERVICE = 0
TOPOLOGY_SERVICE = 1
LABEL_SERVICE = 2
INSTANCE_SERVICE = 3
QUERY_SERVICE = 4
NETWORK_QUERY_SERVICE = 5
class Client:
def __init__(self, host, port):
self._channel = grpc.insecure_channel('{0}:{1}'.format(host, port))
self._authservice = authentication_pb2_grpc.AuthenticateServiceStub(self._channel)
self._services = [None]*6
self._services[Service.ACTION_SERVICE] = action_pb2_grpc.ActionServiceStub(self._channel)
self._services[Service.TOPOLOGY_SERVICE] = topology_pb2_grpc.TopologyServiceStub(self._channel)
self._services[Service.LABEL_SERVICE] = label_pb2_grpc.LabelServiceStub(self._channel)
self._services[Service.INSTANCE_SERVICE] = instance_pb2_grpc.InstanceServiceStub(self._channel)
self._services[Service.QUERY_SERVICE] = query_pb2_grpc.QueryServiceStub(self._channel)
self._services[Service.NETWORK_QUERY_SERVICE] = networkquery_pb2_grpc.NetworkServiceStub(self._channel)
self._token = None
self._metadata = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self._channel.close()
def user_login(self, username, password):
loginReq = authentication_pb2.GetTokenReq(
user = authentication_pb2.UserAuth(
user_name = username,
password = password
)
)
try:
tokenResp = self._authservice.GetToken(loginReq)
except grpc.RpcError as err:
print(err)
return False
self._token = tokenResp.token
self._metadata = [('authorization', 'Bearer {0}'.format(self._token))]
return True
def service_login(self, client_id, secret):
loginReq = authentication_pb2.GetTokenReq(
service = authentication_pb2.ServiceAuth(
client_id = client_id,
secret = secret
)
)
try:
tokenResp = self._authservice.GetToken(loginReq)
except grpc.RpcError as err:
print(err)
return False
self._token = tokenResp.token
self._metadata = [('authorization', 'Bearer {0}'.format(self._token))]
return True
def bearer_login(self, bearer_token):
loginReq = authentication_pb2.GetTokenReq(
bearer = authentication_pb2.BearerToken(
token = bearer_token
)
)
try:
tokenResp = self._authservice.GetToken(loginReq)
except grpc.RpcError as err:
print(err)
return False
self._token = tokenResp.token
self._metadata = [('authorization', 'Bearer {0}'.format(self._token))]
return True
def call(self, service_func, arg):
return service_func(arg, metadata=self._metadata)
def service(self, service):
return self._services[service]
|
{
"content_hash": "6925f2be9c870f18d2bbcfbaa216c6c6",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 111,
"avg_line_length": 34.99090909090909,
"alnum_prop": 0.6625097427903351,
"repo_name": "Dynactionize/Dyna-Python",
"id": "c9e8bf895cf17f30016a938ebad9adabdef9c7dc",
"size": "3849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynapython/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4715"
},
{
"name": "Shell",
"bytes": "287"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
from subprocess import call
def test_start_new_project():
"""
Test creating a new project with project bootstrap command.
"""
project_name = 'test_arctic_project'
project_path = tempfile.mkdtemp()
manage_py_bin = os.path.join(project_path, 'manage.py')
# remove DJANGO_SETTINGS_MODULE form env or arctic command will try loading
# 'example.settings' module.
env = os.environ.copy()
env.pop('DJANGO_SETTINGS_MODULE')
args = ['arctic', 'start', project_name, project_path]
retcode = call(args, env=env)
assert retcode == 0
assert os.path.exists(manage_py_bin)
# run Django system checks of generated project
args = [manage_py_bin, 'check']
retcode = call(args, env=env)
assert retcode == 0
# run Django migrations of generated project
args = [manage_py_bin, 'migrate']
retcode = call(args, env=env)
assert retcode == 0
# cleanup test data
shutil.rmtree(project_path)
|
{
"content_hash": "83c10281b99bdc134df1302d040b6e94",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 79,
"avg_line_length": 27.27027027027027,
"alnum_prop": 0.6699702675916749,
"repo_name": "dgbc/django-arctic",
"id": "47d5a896cad677d6e35424abba21fb63c2234dca",
"size": "1009",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_commands/test_start_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "65213"
},
{
"name": "HTML",
"bytes": "59271"
},
{
"name": "JavaScript",
"bytes": "32924"
},
{
"name": "Python",
"bytes": "152886"
}
],
"symlink_target": ""
}
|
from helper import *
ev = 0.00000001
class Q:
def __init__(self, key, value):
self.key = key
self.value = value
self.left = None
self.right = None
def find(self, key):
if self.key is None:
return False
c = compare_by_y(key, self.key)
if c==0:
return True
elif c==-1:
if self.left:
self.left.find(key)
else:
return False
else:
if self.right:
self.right.find(key)
else:
return False
def insert(self, key, value):
if self.key is None:
self.key = key
self.value = value
c = compare_by_y(key, self.key)
if c==0:
self.value += value
elif c==-1:
if self.left is None:
self.left = Q(key, value)
else:
self.left.insert(key, value)
else:
if self.right is None:
self.right = Q(key, value)
else:
self.right.insert(key, value)
# must return key AND value
def get_and_del_min(self, parent=None):
if self.left is not None:
return self.left.get_and_del_min(self)
else:
k = self.key
v = self.value
if parent:
parent.left = self.right
# i.e. is root node
else:
if self.right:
self.key = self.right.key
self.value = self.right.value
self.left = self.right.left
self.right = self.right.right
else:
self.key = None
return k,v
def print_tree(self):
if self.left:
self.left.print_tree()
print self.key
print self.value
if self.right:
self.right.print_tree()
|
{
"content_hash": "f4180ec3f052be94edaaf3af7abbc787",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 41,
"avg_line_length": 19.2,
"alnum_prop": 0.6111111111111112,
"repo_name": "Maplenormandy/list-62x",
"id": "41f51aab1a29344c31e8e50cb376ec9d04cd6790",
"size": "1704",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/twocamera/Q.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "7465"
},
{
"name": "Makefile",
"bytes": "70"
},
{
"name": "Matlab",
"bytes": "2934"
},
{
"name": "OpenEdge ABL",
"bytes": "5339834"
},
{
"name": "Python",
"bytes": "85302"
}
],
"symlink_target": ""
}
|
from collections import OrderedDict
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon import tabs
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.dashboards.project.networks.tabs import OverviewTab
from openstack_dashboard.dashboards.project.networks import views as user_views
from openstack_dashboard.utils import filters
from openstack_dashboard.dashboards.admin.networks.agents import tabs \
as agents_tabs
from openstack_dashboard.dashboards.admin.networks \
import forms as project_forms
from openstack_dashboard.dashboards.admin.networks.ports \
import tables as ports_tables
from openstack_dashboard.dashboards.admin.networks.subnets \
import tables as subnets_tables
from openstack_dashboard.dashboards.admin.networks \
import tables as networks_tables
class IndexView(tables.DataTableView):
table_class = networks_tables.NetworksTable
template_name = 'admin/networks/index.html'
page_title = _("Networks")
@memoized.memoized_method
def _get_tenant_list(self):
try:
tenants, has_more = api.keystone.tenant_list(self.request)
except Exception:
tenants = []
msg = _("Unable to retrieve information about the "
"networks' projects.")
exceptions.handle(self.request, msg)
tenant_dict = OrderedDict([(t.id, t) for t in tenants])
return tenant_dict
def _get_agents_data(self, network):
agents = []
data = _("Unknown")
try:
if api.neutron.is_extension_supported(self.request,
'dhcp_agent_scheduler'):
# This method is called for each network. If agent-list cannot
# be retrieved, we will see many pop-ups. So the error message
# will be popup-ed in get_data() below.
agents = api.neutron.list_dhcp_agent_hosting_networks(
self.request, network)
data = len(agents)
except Exception:
msg = _('Unable to list dhcp agents hosting network.')
exceptions.handle(self.request, msg)
return data
def get_data(self):
try:
networks = api.neutron.network_list(self.request)
except Exception:
networks = []
msg = _('Network list can not be retrieved.')
exceptions.handle(self.request, msg)
if networks:
self.exception = False
tenant_dict = self._get_tenant_list()
for n in networks:
# Set tenant name
tenant = tenant_dict.get(n.tenant_id, None)
n.tenant_name = getattr(tenant, 'name', None)
n.num_agents = self._get_agents_data(n.id)
return networks
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateNetwork
template_name = 'admin/networks/create.html'
success_url = reverse_lazy('horizon:admin:networks:index')
page_title = _("Create Network")
class UpdateView(user_views.UpdateView):
form_class = project_forms.UpdateNetwork
template_name = 'admin/networks/update.html'
success_url = reverse_lazy('horizon:admin:networks:index')
submit_url = "horizon:admin:networks:update"
def get_initial(self):
network = self._get_object()
return {'network_id': network['id'],
'tenant_id': network['tenant_id'],
'name': network['name'],
'admin_state': network['admin_state_up'],
'shared': network['shared'],
'external': network['router__external']}
class NetworkDetailsTabs(tabs.TabGroup):
slug = "network_tabs"
tabs = (OverviewTab, subnets_tables.SubnetsTab, ports_tables.PortsTab,
agents_tabs.DHCPAgentsTab, )
sticky = True
class DetailView(tabs.TabbedTableView):
tab_group_class = NetworkDetailsTabs
template_name = 'horizon/common/_detail.html'
page_title = '{{ network.name | default:network.id }}'
@memoized.memoized_method
def _get_data(self):
try:
network_id = self.kwargs['network_id']
network = api.neutron.network_get(self.request, network_id)
network.set_id_as_name_if_empty(length=0)
except Exception:
network = None
msg = _('Unable to retrieve details for network "%s".') \
% (network_id)
exceptions.handle(self.request, msg,
redirect=self.get_redirect_url())
return network
@staticmethod
def get_redirect_url():
return reverse_lazy('horizon:admin:networks:index')
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
network = self._get_data()
context["network"] = network
table = networks_tables.NetworksTable(self.request)
context["url"] = self.get_redirect_url()
context["actions"] = table.render_row_actions(network)
choices = networks_tables.DISPLAY_CHOICES
network.admin_state_label = (
filters.get_display_label(choices, network.admin_state))
return context
|
{
"content_hash": "eed4e5fa1016ea96a5e14c01b8cbeb42",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 79,
"avg_line_length": 37.15068493150685,
"alnum_prop": 0.6323746312684366,
"repo_name": "wolverineav/horizon",
"id": "831030d8221c8cbd39680405c0770cdfb815e042",
"size": "6032",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/networks/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "182861"
},
{
"name": "HTML",
"bytes": "547294"
},
{
"name": "JavaScript",
"bytes": "1954942"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "5103444"
},
{
"name": "Shell",
"bytes": "19593"
}
],
"symlink_target": ""
}
|
"""
This script tests the steps of the promoter workflow.
- Checks the dlrn API that the hash under test has been promoted
to the promotion target
- Checks that containers with that hash are pushed to repo 2
- Checks that images are uploaded with that hash and linked to
promotion target
- Checks the promoter logs for expected strings
"""
import argparse
import logging
import os
import re
import stat
import dlrnapi_client
try:
import urllib2 as url_lib
except ImportError:
import urllib.request as url_lib
import yaml
from dlrn_hash import DlrnHash
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger("promoter-integration-checks")
log.setLevel(logging.DEBUG)
def check_dlrn_promoted_hash(stage_info=None, **kwargs):
"""
Check that the the supposed hash has been promoted to
promotion_target as recorded in DLRN.
:param stage_info: a dictionary containing parameter of the staging env
:param kwargs: additional parameter for non-staged executions
:return: None
"""
if stage_info is not None:
# We are checking a stage
api_url = stage_info['dlrn']['server']['api_url']
promotion_target = stage_info['dlrn']['promotion_target']
candidate_commit = \
stage_info['dlrn']['promotions']['promotion_candidate']
candidate_hash = DlrnHash(source=candidate_commit)
api_client = dlrnapi_client.ApiClient(host=api_url)
dlrn_client = dlrnapi_client.DefaultApi(api_client=api_client)
params = dlrnapi_client.PromotionQuery()
params.limit = 1
params.promote_name = promotion_target
else:
# We are checking production server
# TODO(gcerami) implement this branch ?
pass
try:
api_response = dlrn_client.api_promotions_get(params)
log.debug(api_response)
except dlrnapi_client.rest.ApiException:
log.error('Exception when calling api_promotions_get: %s',
dlrnapi_client.rest.ApiException)
raise
error_msg = "No promotions for hash {}".format(candidate_hash)
assert api_response != [], error_msg
promotion_hash = DlrnHash(source=api_response[0])
error_message = ("Expected full hash: {}"
" has not been promoted to {}."
"".format(promotion_hash.full_hash, promotion_target))
conditions = [(promotion.promote_name == promotion_target)
for promotion in api_response]
assert any(conditions), error_message
def query_container_registry_promotion(stage_info=None, **kwargs):
"""
Check that the hash containers have been pushed to the
promotion registry with the promotion_target tag
:param stage_info: a dictionary containing parameter of the staging env
:param kwargs: additional parameter for non-staged executions
:return: None
"""
if stage_info is not None:
registry_target = stage_info['registries']['targets'][0]['host']
promotion_target = stage_info['dlrn']['promotion_target']
candidate_dict = stage_info['dlrn']['promotions']['promotion_candidate']
candidate_hash = DlrnHash(source=candidate_dict)
missing_images = []
no_ppc = stage_info.get('ppc_manifests', True)
for line in stage_info['containers']['images']:
name, tag = line.split(":")
reg_url = "http://{}/v2/{}/manifests/{}".format(
registry_target, name, tag
)
log.info("Checking for promoted container hash: %s", reg_url)
try:
url_lib.urlopen(reg_url)
log.debug("%s:%s found", name, tag)
except url_lib.HTTPError as ex:
log.exception(ex)
if no_ppc and '_ppc64le' in tag:
log.info("(expected - ppc manifests disabled)"
"Image not found - %s", line)
else:
log.error("Image not found - %s", line)
missing_images.append(line)
# For the full_hash lines only, check that there is
# an equivalent promotion_target entry
if tag == candidate_hash.full_hash:
reg_url = "http://{}/v2/{}/manifests/{}".format(
registry_target, name, promotion_target
)
log.info("Checking for promoted container tag: %s", reg_url)
try:
url_lib.urlopen(reg_url)
log.debug("%s:%s found", name, promotion_target)
except url_lib.HTTPError as ex:
log.exception(ex)
log.error("Image with named tag not found - %s", line)
promo_tgt_line = line.replace(candidate_hash.full_hash,
promotion_target)
missing_images.append(promo_tgt_line)
else:
# We are checking production
# TODO: how to verify promoter containers
log.info("Compare images tagged with hash and promotion target:")
log.error("Not implemented")
assert missing_images == [], "Images are missing {}".format(missing_images)
def compare_tagged_image_hash(stage_info=None, **kwargs):
"""
Ensure that the promotion target images directory
is a soft link to the promoted full hash images directory.
:param stage_info: a dictionary containing parameter of the staging env
:param kwargs: additional parameter for non-staged executions
:return: None
"""
if stage_info is not None:
# We are cheking a stage
distro_name = stage_info['main']['distro_name']
distro_version = stage_info['main']['distro_version']
distro = "{}{}".format(distro_name, distro_version)
release = stage_info['main']['release']
target_label = stage_info['dlrn']['promotion_target']
images_top_root = stage_info['overcloud_images']['root']
images_top_root = images_top_root.rstrip("/")
images_root = os.path.join(images_top_root, distro, release,
"rdo_trunk")
promotion_link = os.path.join(images_root, target_label)
candidate_dict = stage_info['dlrn']['promotions']['promotion_candidate']
candidate_hash = DlrnHash(source=candidate_dict)
promotion_dir = os.path.join(images_root, candidate_hash.full_hash)
current_dict = stage_info['dlrn']['promotions']['currently_promoted']
current_hash = DlrnHash(source=current_dict)
previous_dict = stage_info['dlrn']['promotions']['previously_promoted']
previous_label = previous_dict['name']
previous_link = os.path.join(images_root, previous_label)
previous_dir = os.path.join(images_root, current_hash.full_hash)
rl_module = os
else:
# We are checking production
# FIXME(gerami) this branch needs revisiting
images_base_dir = kwargs['image_base']
user = kwargs['user']
key_path = kwargs['key_path']
# promotion_target = args[3]
# full_hash = args[4]
# release = kwargs['release']
log.debug("Install required for nonstaging env")
import pysftp
sftp = pysftp.Connection(
host=images_base_dir,
username=user, private_key=key_path)
# images_dir = os.path.join(
# '/var/www/html/images',
# release, 'rdo_trunk')
rl_module = sftp
check_links(rl_module, promotion_link, target_label, promotion_dir,
previous_link=previous_link, previous_dir=previous_dir)
def check_links(rl_module, promotion_link, target_label, promotion_dir,
previous_link=None, previous_dir=None):
try:
file_mode = rl_module.lstat(promotion_link).st_mode
assert True
except OSError:
assert False, "No link was created"
linked_dir = rl_module.readlink(promotion_link)
assert stat.S_ISLNK(file_mode), "promoter dir is not a symlink"
error_msg = "{} points to wrong dir {} instead of {}".format(target_label,
linked_dir,
promotion_dir)
assert linked_dir == promotion_dir, error_msg
if previous_dir is not None and previous_link is not None:
try:
file_mode = rl_module.lstat(previous_link).st_mode
assert True
except OSError:
assert False, "No link was created"
assert stat.S_ISLNK(file_mode), "Promoted dir is not a symlink"
p_link = rl_module.readlink(previous_link)
msg = "{} != {}".format(p_link, previous_dir)
assert p_link == previous_dir, msg
def parse_promotion_logs(stage_info=None, **kwargs):
"""
Check that the promotion logs have the right
strings printed for the promotion status
:param stage_info: a dictionary containing parameter of the staging env
:param kwargs: additional parameter for non-staged executions
:return: None
"""
if stage_info is not None:
# We are checking a stage
# There's a difference between function and integration tests here.
# Functional tests drive promoter configuration and forces a
# logfile location in the stage dir. In functional tests we need to
# check that log file.
# In Integration tests the promoter is run independently and the log
# file used does not depend on stage env at all
# We need to check first if we are logging in the primary location,
# and if the file does not exist, we can use the location proposed by
# the stage
try:
logfile = stage_info['main']['log_file']
except KeyError:
logfile = ""
log.info("Verifying presence of log file in %s", logfile)
try:
os.stat(os.path.expanduser(logfile))
except OSError:
log.warning("%s not found", logfile)
logfile = stage_info['main']['log_file']
log.info("Verifying presence of log file in %s", logfile)
try:
os.stat(os.path.expanduser(logfile))
except OSError:
log.error("No log file found")
raise
log.info("Using %s as log file to parse", logfile)
candidate_dict = stage_info['dlrn']['promotions']['promotion_candidate']
candidate_hash = DlrnHash(source=candidate_dict)
with open(os.path.expanduser(logfile), 'r') as lf:
logfile_contents = lf.read()
else:
# We are checking production
# logfile = kwargs['logfile']
# from bs4 import BeautifulSoup
log.debug("Reading web hosted log file")
log.error("Not implemented")
# url = url_lib.request.urlopen(logfile).read()
# soup = BeautifulSoup(url, 'html.parser')
# logfile_contents = soup.get_text()
# Check that the promoter process finished
error_message = "Promoter never finished"
termination_message = "Promoter terminated normally"
assert termination_message in logfile_contents, error_message
# We have a list of hashes at our disposal, we know which one
# will have to fail, and which one will have to pass
# We can do all in the same pass
# Patterns for the log in the new code
candidate_hash_pattern = re.sub("timestamp:.*",
"timestamp:.*",
str(candidate_hash))
# TODO(gcerami) check if something can be broken is we are not checking
# the component correctly
candidate_hash_pattern = re.sub("component:.*",
"component:.*",
candidate_hash_pattern)
success_pattern_container = re.compile(
"Containers promote '{}' to tripleo-ci-staging-promoted: Successful "
"promotion".format(candidate_hash_pattern)
)
success_pattern_images = re.compile(
"Qcow promote '{}' to tripleo-ci-staging-promoted: "
"Successful promotion".format(candidate_hash_pattern)
)
success_pattern_criteria = re.compile(
"Candidate hash '{}': criteria met, attempting promotion to "
"tripleo-ci-staging-promoted".format(candidate_hash_pattern)
)
success_pattern_summary = re.compile(
"Summary: Promoted 1 hashes this round"
)
success_pattern_target = re.compile(
"Candidate hash '{}': SUCCESSFUL promotion to "
"tripleo-ci-staging-promoted".format(candidate_hash_pattern)
)
success_patterns = [
success_pattern_summary,
success_pattern_criteria,
success_pattern_images,
success_pattern_target,
success_pattern_container,
]
# This commit is supposed succeed
# Check strings for passing hashes
log.info("Status Passing: %s", candidate_hash)
# Build pattern for successful promotion
for check_pattern in success_patterns:
success_pattern_search = \
check_pattern.search(logfile_contents)
error_message = "Pattern not found: %s" % check_pattern.pattern
assert success_pattern_search is not None, error_message
def main():
parser = argparse.ArgumentParser(
description='Pass a config file.')
parser.add_argument('--stage-info-file', default="/tmp/stage-info.yaml")
args = parser.parse_args()
with open(args.stage_info_file) as si:
stage_info = yaml.safe_load(si)
log.info('Running test: check_dlrn_promoted_hash')
check_dlrn_promoted_hash(stage_info=stage_info)
log.info('Running test: query_container_registry_promotion')
query_container_registry_promotion(stage_info=stage_info)
log.info('Running test: compare_tagged_image_hash')
compare_tagged_image_hash(stage_info=stage_info)
log.info('Running test: parse_promotion_logs')
parse_promotion_logs(stage_info=stage_info)
if __name__ == "__main__":
main()
|
{
"content_hash": "c2631ce6cdbd448cd25d5d35146ff498",
"timestamp": "",
"source": "github",
"line_count": 352,
"max_line_length": 80,
"avg_line_length": 40.04261363636363,
"alnum_prop": 0.6164597374955658,
"repo_name": "rdo-infra/ci-config",
"id": "9e4dbd91a5638d5e385f7e245c95d6a4b3f05797",
"size": "14117",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ci-scripts/dlrnapi_promoter/promoter_integration_checks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "261"
},
{
"name": "Dockerfile",
"bytes": "2312"
},
{
"name": "Go",
"bytes": "30459"
},
{
"name": "HTML",
"bytes": "31437"
},
{
"name": "Jinja",
"bytes": "9855"
},
{
"name": "Python",
"bytes": "791140"
},
{
"name": "Shell",
"bytes": "63444"
}
],
"symlink_target": ""
}
|
"""Setup module for python-gflags."""
from distutils.core import setup
setup(name='python-gflags',
version='3.1.0',
description='Google Commandline Flags Module',
license='BSD',
author='Google Inc. and others',
author_email='google-gflags@googlegroups.com',
url='https://github.com/google/python-gflags',
packages=['gflags', 'gflags.third_party', 'gflags.third_party.pep257'],
data_files=[('bin', ['gflags2man.py'])],
requires=['six'],
classifiers=[
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
]
)
|
{
"content_hash": "9c21b730893d9ad6f57e0b3575c2adfd",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 77,
"avg_line_length": 36.869565217391305,
"alnum_prop": 0.6025943396226415,
"repo_name": "damienmg/bazel",
"id": "663121a7d4053ba4539167bd24aacd4bb5cfadcb",
"size": "2396",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/py/gflags/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "14332"
},
{
"name": "C++",
"bytes": "1010722"
},
{
"name": "HTML",
"bytes": "20974"
},
{
"name": "Java",
"bytes": "26224225"
},
{
"name": "JavaScript",
"bytes": "9186"
},
{
"name": "Makefile",
"bytes": "248"
},
{
"name": "PowerShell",
"bytes": "5473"
},
{
"name": "Python",
"bytes": "606463"
},
{
"name": "Roff",
"bytes": "511"
},
{
"name": "Shell",
"bytes": "964833"
}
],
"symlink_target": ""
}
|
"""Timesketch datastores."""
|
{
"content_hash": "8408825ff5b13a8f75c4c2c61cf0bb0b",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.6896551724137931,
"repo_name": "lockhy/timesketch",
"id": "4f40b96b4ef2624e7e4511a00af71f285a4812a6",
"size": "625",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "timesketch/lib/datastores/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "6572"
},
{
"name": "HTML",
"bytes": "44266"
},
{
"name": "JavaScript",
"bytes": "33096"
},
{
"name": "Python",
"bytes": "140454"
},
{
"name": "Shell",
"bytes": "1836"
}
],
"symlink_target": ""
}
|
import mock
import uuid
from rally.cmd.commands import task
from rally import exceptions
from rally.openstack.common import test
class TaskCommandsTestCase(test.BaseTestCase):
def setUp(self):
super(TaskCommandsTestCase, self).setUp()
self.task = task.TaskCommands()
@mock.patch('rally.cmd.commands.task.TaskCommands.detailed')
@mock.patch('rally.orchestrator.api.create_task',
return_value=dict(uuid='fc1a9bbe-1ead-4740-92b5-0feecf421634',
created_at='2014-01-14 09:14:45.395822',
status='init',
failed=False,
tag=None))
@mock.patch('rally.cmd.commands.task.api.start_task')
@mock.patch('rally.cmd.commands.task.open',
mock.mock_open(read_data='{"some": "json"}'),
create=True)
def test_start(self, mock_api, mock_create_task,
mock_task_detailed):
deploy_id = str(uuid.uuid4())
self.task.start('path_to_config.json', deploy_id)
mock_api.assert_called_once_with(deploy_id, {u'some': u'json'},
task=mock_create_task.return_value)
@mock.patch('rally.cmd.commands.task.envutils.get_global')
def test_start_no_deploy_id(self, mock_default):
mock_default.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.start, 'path_to_config.json', None)
@mock.patch("rally.cmd.commands.task.api")
def test_abort(self, mock_api):
test_uuid = str(uuid.uuid4())
mock_api.abort_task = mock.MagicMock()
self.task.abort(test_uuid)
task.api.abort_task.assert_called_once_with(test_uuid)
@mock.patch('rally.cmd.commands.task.envutils.get_global')
def test_abort_no_task_id(self, mock_default):
mock_default.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.abort, None)
def test_status(self):
test_uuid = str(uuid.uuid4())
value = {'task_id': "task", "status": "status"}
with mock.patch("rally.cmd.commands.task.db") as mock_db:
mock_db.task_get = mock.MagicMock(return_value=value)
self.task.status(test_uuid)
mock_db.task_get.assert_called_once_with(test_uuid)
@mock.patch('rally.cmd.commands.task.envutils.get_global')
def test_status_no_task_id(self, mock_default):
mock_default.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.status, None)
@mock.patch('rally.cmd.commands.task.db')
def test_detailed(self, mock_db):
test_uuid = str(uuid.uuid4())
value = {
"id": "task",
"uuid": test_uuid,
"status": "status",
"results": [],
"failed": False
}
mock_db.task_get_detailed = mock.MagicMock(return_value=value)
self.task.detailed(test_uuid)
mock_db.task_get_detailed.assert_called_once_with(test_uuid)
@mock.patch('rally.cmd.commands.task.envutils.get_global')
def test_detailed_no_task_id(self, mock_default):
mock_default.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.detailed, None)
@mock.patch('rally.cmd.commands.task.db')
def test_detailed_wrong_id(self, mock_db):
test_uuid = str(uuid.uuid4())
mock_db.task_get_detailed = mock.MagicMock(return_value=None)
self.task.detailed(test_uuid)
mock_db.task_get_detailed.assert_called_once_with(test_uuid)
@mock.patch('rally.cmd.commands.task.db')
def test_results(self, mock_db):
test_uuid = str(uuid.uuid4())
value = [
{'key': 'key', 'data': {'raw': 'raw'}}
]
mock_db.task_result_get_all_by_uuid.return_value = value
self.task.results(test_uuid)
mock_db.task_result_get_all_by_uuid.assert_called_once_with(test_uuid)
@mock.patch('rally.cmd.commands.task.envutils.get_global')
@mock.patch("rally.cmd.commands.task.db")
def test_list(self, mock_db, mock_default):
mock_default.side_effect = exceptions.InvalidArgumentsException
self.assertRaises(exceptions.InvalidArgumentsException,
self.task.results, None)
db_response = [
{'uuid': 'a',
'created_at': 'b',
'status': 'c',
'failed': True,
'tag': 'd'}
]
mock_db.task_list = mock.MagicMock(return_value=db_response)
self.task.list()
mock_db.task_list.assert_called_once_with()
def test_delete(self):
task_uuid = str(uuid.uuid4())
force = False
with mock.patch("rally.cmd.commands.task.api") as mock_api:
mock_api.delete_task = mock.Mock()
self.task.delete(force, task_uuid)
mock_api.delete_task.assert_called_once_with(task_uuid,
force=force)
def test_percentile(self):
l = range(1, 101)
result = task.percentile(l, 0.1)
self.assertTrue(result == 10.9)
|
{
"content_hash": "c82f5cb3e8c4b24a3bf597f4a3e384c3",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 78,
"avg_line_length": 41.18045112781955,
"alnum_prop": 0.5963118495526748,
"repo_name": "ytsarev/rally",
"id": "1bb039273a7660347a761fe3fcb4edc0d99a75ca",
"size": "6107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/cmd/commands/test_task.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "984256"
},
{
"name": "Shell",
"bytes": "14201"
}
],
"symlink_target": ""
}
|
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Logoff User',
'Author': ['@harmj0y'],
'Description': ("Logs the current user (or all users) off the machine."),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : False,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': []
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'AllUsers' : {
'Description' : 'Switch. Log off all current users.',
'Required' : False,
'Value' : ''
},
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
allUsers = self.options['AllUsers']['Value']
if allUsers.lower() == "true":
script = "'Logging off all users.'; Start-Sleep -s 3; $null = (gwmi win32_operatingsystem).Win32Shutdown(4)"
else:
script = "'Logging off current user.'; Start-Sleep -s 3; shutdown /l /f"
return script
|
{
"content_hash": "26470841397afc21e9754ddf95e4b583",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 120,
"avg_line_length": 29.323076923076922,
"alnum_prop": 0.4727177334732424,
"repo_name": "Hackplayers/Empire-mod-Hpys-tests",
"id": "def27dc075745ba59309b0e834041b02208c2665",
"size": "1906",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/modules/powershell/management/logoff.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1966"
},
{
"name": "Java",
"bytes": "496"
},
{
"name": "Objective-C",
"bytes": "2664"
},
{
"name": "PHP",
"bytes": "2041"
},
{
"name": "PowerShell",
"bytes": "16200977"
},
{
"name": "Python",
"bytes": "2675256"
},
{
"name": "Shell",
"bytes": "3603"
}
],
"symlink_target": ""
}
|
"""Support for presenting detailed information in failing assertions."""
import sys
from typing import Any
from typing import Generator
from typing import List
from typing import Optional
from typing import TYPE_CHECKING
from _pytest.assertion import rewrite
from _pytest.assertion import truncate
from _pytest.assertion import util
from _pytest.assertion.rewrite import assertstate_key
from _pytest.config import Config
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.nodes import Item
if TYPE_CHECKING:
from _pytest.main import Session
def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("debugconfig")
group.addoption(
"--assert",
action="store",
dest="assertmode",
choices=("rewrite", "plain"),
default="rewrite",
metavar="MODE",
help=(
"Control assertion debugging tools.\n"
"'plain' performs no assertion debugging.\n"
"'rewrite' (the default) rewrites assert statements in test modules"
" on import to provide assert expression information."
),
)
parser.addini(
"enable_assertion_pass_hook",
type="bool",
default=False,
help="Enables the pytest_assertion_pass hook."
"Make sure to delete any previously generated pyc cache files.",
)
def register_assert_rewrite(*names: str) -> None:
"""Register one or more module names to be rewritten on import.
This function will make sure that this module or all modules inside
the package will get their assert statements rewritten.
Thus you should make sure to call this before the module is
actually imported, usually in your __init__.py if you are a plugin
using a package.
:raises TypeError: If the given module names are not strings.
"""
for name in names:
if not isinstance(name, str):
msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable]
raise TypeError(msg.format(repr(names)))
for hook in sys.meta_path:
if isinstance(hook, rewrite.AssertionRewritingHook):
importhook = hook
break
else:
# TODO(typing): Add a protocol for mark_rewrite() and use it
# for importhook and for PytestPluginManager.rewrite_hook.
importhook = DummyRewriteHook() # type: ignore
importhook.mark_rewrite(*names)
class DummyRewriteHook:
"""A no-op import hook for when rewriting is disabled."""
def mark_rewrite(self, *names: str) -> None:
pass
class AssertionState:
"""State for the assertion plugin."""
def __init__(self, config: Config, mode) -> None:
self.mode = mode
self.trace = config.trace.root.get("assertion")
self.hook: Optional[rewrite.AssertionRewritingHook] = None
def install_importhook(config: Config) -> rewrite.AssertionRewritingHook:
"""Try to install the rewrite hook, raise SystemError if it fails."""
config.stash[assertstate_key] = AssertionState(config, "rewrite")
config.stash[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config)
sys.meta_path.insert(0, hook)
config.stash[assertstate_key].trace("installed rewrite import hook")
def undo() -> None:
hook = config.stash[assertstate_key].hook
if hook is not None and hook in sys.meta_path:
sys.meta_path.remove(hook)
config.add_cleanup(undo)
return hook
def pytest_collection(session: "Session") -> None:
# This hook is only called when test modules are collected
# so for example not in the managing process of pytest-xdist
# (which does not collect test modules).
assertstate = session.config.stash.get(assertstate_key, None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(session)
@hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_protocol(item: Item) -> Generator[None, None, None]:
"""Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks.
The rewrite module will use util._reprcompare if it exists to use custom
reporting via the pytest_assertrepr_compare hook. This sets up this custom
comparison for the test.
"""
ihook = item.ihook
def callbinrepr(op, left: object, right: object) -> Optional[str]:
"""Call the pytest_assertrepr_compare hook and prepare the result.
This uses the first result from the hook and then ensures the
following:
* Overly verbose explanations are truncated unless configured otherwise
(eg. if running in verbose mode).
* Embedded newlines are escaped to help util.format_explanation()
later.
* If the rewrite mode is used embedded %-characters are replaced
to protect later % formatting.
The result can be formatted by util.format_explanation() for
pretty printing.
"""
hook_result = ihook.pytest_assertrepr_compare(
config=item.config, op=op, left=left, right=right
)
for new_expl in hook_result:
if new_expl:
new_expl = truncate.truncate_if_required(new_expl, item)
new_expl = [line.replace("\n", "\\n") for line in new_expl]
res = "\n~".join(new_expl)
if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%")
return res
return None
saved_assert_hooks = util._reprcompare, util._assertion_pass
util._reprcompare = callbinrepr
util._config = item.config
if ihook.pytest_assertion_pass.get_hookimpls():
def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None:
ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl)
util._assertion_pass = call_assertion_pass_hook
yield
util._reprcompare, util._assertion_pass = saved_assert_hooks
util._config = None
def pytest_sessionfinish(session: "Session") -> None:
assertstate = session.config.stash.get(assertstate_key, None)
if assertstate:
if assertstate.hook is not None:
assertstate.hook.set_session(None)
def pytest_assertrepr_compare(
config: Config, op: str, left: Any, right: Any
) -> Optional[List[str]]:
return util.assertrepr_compare(config=config, op=op, left=left, right=right)
|
{
"content_hash": "bc8c63ad6b6ed2938040027a85ae6c9a",
"timestamp": "",
"source": "github",
"line_count": 181,
"max_line_length": 96,
"avg_line_length": 35.773480662983424,
"alnum_prop": 0.6661003861003861,
"repo_name": "Akasurde/pytest",
"id": "480a26ad867181b4d6213e98403230515ba987a9",
"size": "6475",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "src/_pytest/assertion/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "192"
},
{
"name": "Python",
"bytes": "2594260"
}
],
"symlink_target": ""
}
|
import sqlalchemy as sa
from neutron.db import model_base
# Model classes for test resources
class MehModel(model_base.BASEV2, model_base.HasTenant):
meh = sa.Column(sa.String(8), primary_key=True)
class OtherMehModel(model_base.BASEV2, model_base.HasTenant):
othermeh = sa.Column(sa.String(8), primary_key=True)
|
{
"content_hash": "41dbd792bb5a182dfa76c8530871a298",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 61,
"avg_line_length": 25.153846153846153,
"alnum_prop": 0.7522935779816514,
"repo_name": "wolverineav/neutron",
"id": "c8265f94fd682d47228413548ec3a03372f43152",
"size": "967",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "neutron/tests/unit/quota/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "7688704"
},
{
"name": "Shell",
"bytes": "14690"
}
],
"symlink_target": ""
}
|
import falcon
from falcon import API
from falcon_web_demo.person_resources import PersonListResource, PersonResource
from people.people_application import PeopleApplication
from .persistence import SessionScope
from .postgres_people_repository import PostgresPeopleRepository
def get_app() -> API:
session_scope = SessionScope()
people_repository = PostgresPeopleRepository(session_scope=session_scope)
people_application = PeopleApplication(people_repository=people_repository)
person_list_resource = PersonListResource(
people_application=people_application)
person_resource = PersonResource(people_application=people_application)
_app = falcon.API()
_app.add_route('/people', person_list_resource)
_app.add_route('/people/{identifier}', person_resource)
return _app
|
{
"content_hash": "028172de72c1d717b726fbcd9647c993",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 79,
"avg_line_length": 37.18181818181818,
"alnum_prop": 0.7787286063569682,
"repo_name": "wileykestner/falcon-sqlalchemy-demo",
"id": "91f007dbd963859bfdb8f91c3786557d35c4069f",
"size": "818",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "falcon_web_demo/application_routes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "493"
},
{
"name": "Python",
"bytes": "15877"
},
{
"name": "Shell",
"bytes": "1224"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Ipsec(A10BaseClass):
""" :param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param packet_round_robin: {"default": 0, "optional": true, "type": "number", "description": "Enable packet round robin for IPsec packets", "format": "flag"}
:param crypto_core: {"description": "Crypto cores assigned for IPsec processing", "format": "number", "default": 0, "optional": true, "maximum": 56, "minimum": 0, "type": "number"}
:param crypto_mem: {"description": "Crypto memory percentage assigned for IPsec processing", "format": "number", "default": 0, "optional": true, "maximum": 100, "minimum": 0, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
Class Description::
Configure Crypto Cores for IPsec processing.
Class ipsec supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/system/ipsec`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "ipsec"
self.a10_url="/axapi/v3/system/ipsec"
self.DeviceProxy = ""
self.uuid = ""
self.packet_round_robin = ""
self.crypto_core = ""
self.crypto_mem = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "7f1ba1c4c9c9464be27550f3992134c2",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 196,
"avg_line_length": 40.02439024390244,
"alnum_prop": 0.6337599024984766,
"repo_name": "amwelch/a10sdk-python",
"id": "60031005fe9c8398032d39040ce1ff1dd734e5c8",
"size": "1641",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/system/system_ipsec.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
}
|
import platform
import sys
try:
from setuptools import setup, Extension
except:
from distutils.core import setup, Extension, Command
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
# Fix to build sdist under vagrant
import os
if 'vagrant' in str(os.environ):
del os.link
include_dirs = []
if sys.platform == 'win32':
include_dirs.append('compat/win32')
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError)
else:
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError as x:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors as x:
raise BuildFailed()
def run_setup(with_binary):
if with_binary:
extensions = dict(
ext_modules = [
Extension('thrift.protocol.fastbinary',
sources = ['src/protocol/fastbinary.c'],
include_dirs = include_dirs,
)
],
cmdclass=dict(build_ext=ve_build_ext)
)
else:
extensions = dict()
setup(name = 'thrift',
version = '1.0.0-dev',
description = 'Python bindings for the Apache Thrift RPC system',
author = 'Thrift Developers',
author_email = 'dev@thrift.apache.org',
url = 'http://thrift.apache.org',
license = 'Apache License 2.0',
install_requires=['six>=1.7.2'],
packages = [
'thrift',
'thrift.protocol',
'thrift.transport',
'thrift.server',
],
package_dir = {'thrift' : 'src'},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking'
],
**extensions
)
try:
with_binary = False
# Don't even try to build the C module unless we're on CPython 2.x.
# TODO: fix it for CPython 3.x
if platform.python_implementation() == 'CPython' and sys.version_info < (3,):
with_binary = True
run_setup(with_binary)
except BuildFailed:
print()
print('*' * 80)
print("An error occurred while trying to compile with the C extension enabled")
print("Attempting to build without the extension now")
print('*' * 80)
print()
run_setup(False)
|
{
"content_hash": "29fb0f2c08cb94d5b9304b756d0add61",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 87,
"avg_line_length": 30.427083333333332,
"alnum_prop": 0.593974666210202,
"repo_name": "siemens/thrift",
"id": "090544ce98b5ab15e16efc8e12fbc6b9b8676e24",
"size": "3728",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "lib/py/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "75532"
},
{
"name": "Batchfile",
"bytes": "5757"
},
{
"name": "C",
"bytes": "679644"
},
{
"name": "C#",
"bytes": "386053"
},
{
"name": "C++",
"bytes": "3908033"
},
{
"name": "CMake",
"bytes": "98825"
},
{
"name": "CSS",
"bytes": "1070"
},
{
"name": "D",
"bytes": "645069"
},
{
"name": "Dart",
"bytes": "146402"
},
{
"name": "Emacs Lisp",
"bytes": "5361"
},
{
"name": "Erlang",
"bytes": "310672"
},
{
"name": "Go",
"bytes": "451008"
},
{
"name": "HTML",
"bytes": "23089"
},
{
"name": "Haskell",
"bytes": "122881"
},
{
"name": "Haxe",
"bytes": "304443"
},
{
"name": "Java",
"bytes": "964369"
},
{
"name": "JavaScript",
"bytes": "352684"
},
{
"name": "LLVM",
"bytes": "16087"
},
{
"name": "Lua",
"bytes": "48477"
},
{
"name": "Makefile",
"bytes": "15010"
},
{
"name": "OCaml",
"bytes": "39241"
},
{
"name": "Objective-C",
"bytes": "153651"
},
{
"name": "PHP",
"bytes": "314576"
},
{
"name": "Pascal",
"bytes": "387563"
},
{
"name": "Perl",
"bytes": "119756"
},
{
"name": "Python",
"bytes": "349166"
},
{
"name": "Ruby",
"bytes": "391948"
},
{
"name": "Shell",
"bytes": "28875"
},
{
"name": "Smalltalk",
"bytes": "22944"
},
{
"name": "Swift",
"bytes": "28538"
},
{
"name": "Thrift",
"bytes": "311211"
},
{
"name": "VimL",
"bytes": "2846"
},
{
"name": "Yacc",
"bytes": "26807"
}
],
"symlink_target": ""
}
|
"""Tests for BPD's implementation of the MPD protocol.
"""
from __future__ import division, absolute_import, print_function
import unittest
from test.helper import TestHelper
import os
import sys
import multiprocessing as mp
import threading
import socket
import time
import yaml
import tempfile
from contextlib import contextmanager
from beets.util import py3_path, bluelet
from beetsplug import bpd
import confuse
# Mock GstPlayer so that the forked process doesn't attempt to import gi:
import mock
import imp
gstplayer = imp.new_module("beetsplug.bpd.gstplayer")
def _gstplayer_play(*_): # noqa: 42
bpd.gstplayer._GstPlayer.playing = True
return mock.DEFAULT
gstplayer._GstPlayer = mock.MagicMock(
spec_set=[
"time", "volume", "playing", "run", "play_file", "pause", "stop",
"seek", "play", "get_decoders",
], **{
'playing': False,
'volume': 0,
'time.return_value': (0, 0),
'play_file.side_effect': _gstplayer_play,
'play.side_effect': _gstplayer_play,
'get_decoders.return_value': {'default': ({'audio/mpeg'}, {'mp3'})},
})
gstplayer.GstPlayer = lambda _: gstplayer._GstPlayer
sys.modules["beetsplug.bpd.gstplayer"] = gstplayer
bpd.gstplayer = gstplayer
class CommandParseTest(unittest.TestCase):
def test_no_args(self):
s = r'command'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [])
def test_one_unquoted_arg(self):
s = r'command hello'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello'])
def test_two_unquoted_args(self):
s = r'command hello there'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello', u'there'])
def test_one_quoted_arg(self):
s = r'command "hello there"'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello there'])
def test_heterogenous_args(self):
s = r'command "hello there" sir'
c = bpd.Command(s)
self.assertEqual(c.name, u'command')
self.assertEqual(c.args, [u'hello there', u'sir'])
def test_quote_in_arg(self):
s = r'command "hello \" there"'
c = bpd.Command(s)
self.assertEqual(c.args, [u'hello " there'])
def test_backslash_in_arg(self):
s = r'command "hello \\ there"'
c = bpd.Command(s)
self.assertEqual(c.args, [u'hello \\ there'])
class MPCResponse(object):
def __init__(self, raw_response):
body = b'\n'.join(raw_response.split(b'\n')[:-2]).decode('utf-8')
self.data = self._parse_body(body)
status = raw_response.split(b'\n')[-2].decode('utf-8')
self.ok, self.err_data = self._parse_status(status)
def _parse_status(self, status):
""" Parses the first response line, which contains the status.
"""
if status.startswith('OK') or status.startswith('list_OK'):
return True, None
elif status.startswith('ACK'):
code, rest = status[5:].split('@', 1)
pos, rest = rest.split(']', 1)
cmd, rest = rest[2:].split('}')
return False, (int(code), int(pos), cmd, rest[1:])
else:
raise RuntimeError('Unexpected status: {!r}'.format(status))
def _parse_body(self, body):
""" Messages are generally in the format "header: content".
Convert them into a dict, storing the values for repeated headers as
lists of strings, and non-repeated ones as string.
"""
data = {}
repeated_headers = set()
for line in body.split('\n'):
if not line:
continue
if ':' not in line:
raise RuntimeError('Unexpected line: {!r}'.format(line))
header, content = line.split(':', 1)
content = content.lstrip()
if header in repeated_headers:
data[header].append(content)
elif header in data:
data[header] = [data[header], content]
repeated_headers.add(header)
else:
data[header] = content
return data
class MPCClient(object):
def __init__(self, sock, do_hello=True):
self.sock = sock
self.buf = b''
if do_hello:
hello = self.get_response()
if not hello.ok:
raise RuntimeError('Bad hello')
def get_response(self, force_multi=None):
""" Wait for a full server response and wrap it in a helper class.
If the request was a batch request then this will return a list of
`MPCResponse`s, one for each processed subcommand.
"""
response = b''
responses = []
while True:
line = self.readline()
response += line
if line.startswith(b'OK') or line.startswith(b'ACK'):
if force_multi or any(responses):
if line.startswith(b'ACK'):
responses.append(MPCResponse(response))
n_remaining = force_multi - len(responses)
responses.extend([None] * n_remaining)
return responses
else:
return MPCResponse(response)
if line.startswith(b'list_OK'):
responses.append(MPCResponse(response))
response = b''
elif not line:
raise RuntimeError('Unexpected response: {!r}'.format(line))
def serialise_command(self, command, *args):
cmd = [command.encode('utf-8')]
for arg in [a.encode('utf-8') for a in args]:
if b' ' in arg:
cmd.append(b'"' + arg + b'"')
else:
cmd.append(arg)
return b' '.join(cmd) + b'\n'
def send_command(self, command, *args):
request = self.serialise_command(command, *args)
self.sock.sendall(request)
return self.get_response()
def send_commands(self, *commands):
""" Use MPD command batching to send multiple commands at once.
Each item of commands is a tuple containing a command followed by
any arguments.
"""
requests = []
for command_and_args in commands:
command = command_and_args[0]
args = command_and_args[1:]
requests.append(self.serialise_command(command, *args))
requests.insert(0, b'command_list_ok_begin\n')
requests.append(b'command_list_end\n')
request = b''.join(requests)
self.sock.sendall(request)
return self.get_response(force_multi=len(commands))
def readline(self, terminator=b'\n', bufsize=1024):
""" Reads a line of data from the socket.
"""
while True:
if terminator in self.buf:
line, self.buf = self.buf.split(terminator, 1)
line += terminator
return line
self.sock.settimeout(1)
data = self.sock.recv(bufsize)
if data:
self.buf += data
else:
line = self.buf
self.buf = b''
return line
def implements(commands, expectedFailure=False): # noqa: N803
def _test(self):
with self.run_bpd() as client:
response = client.send_command('commands')
self._assert_ok(response)
implemented = response.data['command']
self.assertEqual(commands.intersection(implemented), commands)
return unittest.expectedFailure(_test) if expectedFailure else _test
bluelet_listener = bluelet.Listener
@mock.patch("beets.util.bluelet.Listener")
def start_server(args, assigned_port, listener_patch):
"""Start the bpd server, writing the port to `assigned_port`.
"""
def listener_wrap(host, port):
"""Wrap `bluelet.Listener`, writing the port to `assigend_port`.
"""
# `bluelet.Listener` has previously been saved to
# `bluelet_listener` as this function will replace it at its
# original location.
listener = bluelet_listener(host, port)
# read port assigned by OS
assigned_port.put_nowait(listener.sock.getsockname()[1])
return listener
listener_patch.side_effect = listener_wrap
import beets.ui
beets.ui.main(args)
class BPDTestHelper(unittest.TestCase, TestHelper):
def setUp(self):
self.setup_beets(disk=True)
self.load_plugins('bpd')
self.item1 = self.add_item(
title='Track One Title', track=1,
album='Album Title', artist='Artist Name')
self.item2 = self.add_item(
title='Track Two Title', track=2,
album='Album Title', artist='Artist Name')
self.lib.add_album([self.item1, self.item2])
def tearDown(self):
self.teardown_beets()
self.unload_plugins()
@contextmanager
def run_bpd(self, host='localhost', password=None, do_hello=True,
second_client=False):
""" Runs BPD in another process, configured with the same library
database as we created in the setUp method. Exposes a client that is
connected to the server, and kills the server at the end.
"""
# Create a config file:
config = {
'pluginpath': [py3_path(self.temp_dir)],
'plugins': 'bpd',
# use port 0 to let the OS choose a free port
'bpd': {'host': host, 'port': 0, 'control_port': 0},
}
if password:
config['bpd']['password'] = password
config_file = tempfile.NamedTemporaryFile(
mode='wb', dir=py3_path(self.temp_dir), suffix='.yaml',
delete=False)
config_file.write(
yaml.dump(config, Dumper=confuse.Dumper, encoding='utf-8'))
config_file.close()
# Fork and launch BPD in the new process:
assigned_port = mp.Queue(2) # 2 slots, `control_port` and `port`
server = mp.Process(target=start_server, args=([
'--library', self.config['library'].as_filename(),
'--directory', py3_path(self.libdir),
'--config', py3_path(config_file.name),
'bpd'
], assigned_port))
server.start()
try:
assigned_port.get(timeout=1) # skip control_port
port = assigned_port.get(timeout=0.5) # read port
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock.connect((host, port))
if second_client:
sock2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
sock2.connect((host, port))
yield (
MPCClient(sock, do_hello),
MPCClient(sock2, do_hello),
)
finally:
sock2.close()
else:
yield MPCClient(sock, do_hello)
finally:
sock.close()
finally:
server.terminate()
server.join(timeout=0.2)
def _assert_ok(self, *responses):
for response in responses:
self.assertTrue(response is not None)
self.assertTrue(response.ok, 'Response failed: {}'.format(
response.err_data))
def _assert_failed(self, response, code, pos=None):
""" Check that a command failed with a specific error code. If this
is a list of responses, first check all preceding commands were OK.
"""
if pos is not None:
previous_commands = response[0:pos]
self._assert_ok(*previous_commands)
response = response[pos]
self.assertFalse(response.ok)
if pos is not None:
self.assertEqual(pos, response.err_data[1])
if code is not None:
self.assertEqual(code, response.err_data[0])
def _bpd_add(self, client, *items, **kwargs):
""" Add the given item to the BPD playlist or queue.
"""
paths = ['/'.join([
item.artist, item.album,
py3_path(os.path.basename(item.path))]) for item in items]
playlist = kwargs.get('playlist')
if playlist:
commands = [('playlistadd', playlist, path) for path in paths]
else:
commands = [('add', path) for path in paths]
responses = client.send_commands(*commands)
self._assert_ok(*responses)
class BPDTest(BPDTestHelper):
def test_server_hello(self):
with self.run_bpd(do_hello=False) as client:
self.assertEqual(client.readline(), b'OK MPD 0.16.0\n')
def test_unknown_cmd(self):
with self.run_bpd() as client:
response = client.send_command('notacommand')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_unexpected_argument(self):
with self.run_bpd() as client:
response = client.send_command('ping', 'extra argument')
self._assert_failed(response, bpd.ERROR_ARG)
def test_missing_argument(self):
with self.run_bpd() as client:
response = client.send_command('add')
self._assert_failed(response, bpd.ERROR_ARG)
def test_system_error(self):
with self.run_bpd() as client:
response = client.send_command('crash_TypeError')
self._assert_failed(response, bpd.ERROR_SYSTEM)
def test_empty_request(self):
with self.run_bpd() as client:
response = client.send_command('')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
class BPDQueryTest(BPDTestHelper):
test_implements_query = implements({
'clearerror',
})
def test_cmd_currentsong(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1)
responses = client.send_commands(
('play',),
('currentsong',),
('stop',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('1', responses[1].data['Id'])
self.assertNotIn('Id', responses[3].data)
def test_cmd_currentsong_tagtypes(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1)
responses = client.send_commands(
('play',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual(
BPDConnectionTest.TAGTYPES.union(BPDQueueTest.METADATA),
set(responses[1].data.keys()))
def test_cmd_status(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('status',),
('play',),
('status',))
self._assert_ok(*responses)
fields_not_playing = {
'repeat', 'random', 'single', 'consume', 'playlist',
'playlistlength', 'mixrampdb', 'state',
'volume'
}
self.assertEqual(fields_not_playing, set(responses[0].data.keys()))
fields_playing = fields_not_playing | {
'song', 'songid', 'time', 'elapsed', 'bitrate', 'duration',
'audio', 'nextsong', 'nextsongid'
}
self.assertEqual(fields_playing, set(responses[2].data.keys()))
def test_cmd_stats(self):
with self.run_bpd() as client:
response = client.send_command('stats')
self._assert_ok(response)
details = {'artists', 'albums', 'songs', 'uptime', 'db_playtime',
'db_update', 'playtime'}
self.assertEqual(details, set(response.data.keys()))
def test_cmd_idle(self):
def _toggle(c):
for _ in range(3):
rs = c.send_commands(('play',), ('pause',))
# time.sleep(0.05) # uncomment if test is flaky
if any(not r.ok for r in rs):
raise RuntimeError('Toggler failed')
with self.run_bpd(second_client=True) as (client, client2):
self._bpd_add(client, self.item1, self.item2)
toggler = threading.Thread(target=_toggle, args=(client2,))
toggler.start()
# Idling will hang until the toggler thread changes the play state.
# Since the client sockets have a 1s timeout set at worst this will
# raise a socket.timeout and fail the test if the toggler thread
# manages to finish before the idle command is sent here.
response = client.send_command('idle', 'player')
toggler.join()
self._assert_ok(response)
def test_cmd_idle_with_pending(self):
with self.run_bpd(second_client=True) as (client, client2):
response1 = client.send_command('random', '1')
response2 = client2.send_command('idle')
self._assert_ok(response1, response2)
self.assertEqual('options', response2.data['changed'])
def test_cmd_noidle(self):
with self.run_bpd() as client:
# Manually send a command without reading a response.
request = client.serialise_command('idle')
client.sock.sendall(request)
time.sleep(0.01)
response = client.send_command('noidle')
self._assert_ok(response)
def test_cmd_noidle_when_not_idle(self):
with self.run_bpd() as client:
# Manually send a command without reading a response.
request = client.serialise_command('noidle')
client.sock.sendall(request)
response = client.send_command('notacommand')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
class BPDPlaybackTest(BPDTestHelper):
test_implements_playback = implements({
'random',
})
def test_cmd_consume(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('consume', '0'),
('playlistinfo',),
('next',),
('playlistinfo',),
('consume', '1'),
('playlistinfo',),
('play', '0'),
('next',),
('playlistinfo',),
('status',))
self._assert_ok(*responses)
self.assertEqual(responses[1].data['Id'], responses[3].data['Id'])
self.assertEqual(['1', '2'], responses[5].data['Id'])
self.assertEqual('2', responses[8].data['Id'])
self.assertEqual('1', responses[9].data['consume'])
self.assertEqual('play', responses[9].data['state'])
def test_cmd_consume_in_reverse(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('consume', '1'),
('play', '1'),
('playlistinfo',),
('previous',),
('playlistinfo',),
('status',))
self._assert_ok(*responses)
self.assertEqual(['1', '2'], responses[2].data['Id'])
self.assertEqual('1', responses[4].data['Id'])
self.assertEqual('play', responses[5].data['state'])
def test_cmd_single(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('status',),
('single', '1'),
('play',),
('status',),
('next',),
('status',))
self._assert_ok(*responses)
self.assertEqual('0', responses[0].data['single'])
self.assertEqual('1', responses[3].data['single'])
self.assertEqual('play', responses[3].data['state'])
self.assertEqual('stop', responses[5].data['state'])
def test_cmd_repeat(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('repeat', '1'),
('play',),
('currentsong',),
('next',),
('currentsong',),
('next',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('1', responses[2].data['Id'])
self.assertEqual('2', responses[4].data['Id'])
self.assertEqual('1', responses[6].data['Id'])
def test_cmd_repeat_with_single(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('repeat', '1'),
('single', '1'),
('play',),
('currentsong',),
('next',),
('status',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('1', responses[3].data['Id'])
self.assertEqual('play', responses[5].data['state'])
self.assertEqual('1', responses[6].data['Id'])
def test_cmd_repeat_in_reverse(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('repeat', '1'),
('play',),
('currentsong',),
('previous',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('1', responses[2].data['Id'])
self.assertEqual('2', responses[4].data['Id'])
def test_cmd_repeat_with_single_in_reverse(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('repeat', '1'),
('single', '1'),
('play',),
('currentsong',),
('previous',),
('status',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('1', responses[3].data['Id'])
self.assertEqual('play', responses[5].data['state'])
self.assertEqual('1', responses[6].data['Id'])
def test_cmd_crossfade(self):
with self.run_bpd() as client:
responses = client.send_commands(
('status',),
('crossfade', '123'),
('status',),
('crossfade', '-2'))
response = client.send_command('crossfade', '0.5')
self._assert_failed(responses, bpd.ERROR_ARG, pos=3)
self._assert_failed(response, bpd.ERROR_ARG)
self.assertNotIn('xfade', responses[0].data)
self.assertAlmostEqual(123, int(responses[2].data['xfade']))
def test_cmd_mixrampdb(self):
with self.run_bpd() as client:
responses = client.send_commands(
('mixrampdb', '-17'),
('status',))
self._assert_ok(*responses)
self.assertAlmostEqual(-17, float(responses[1].data['mixrampdb']))
def test_cmd_mixrampdelay(self):
with self.run_bpd() as client:
responses = client.send_commands(
('mixrampdelay', '2'),
('status',),
('mixrampdelay', 'nan'),
('status',),
('mixrampdelay', '-2'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=4)
self.assertAlmostEqual(2, float(responses[1].data['mixrampdelay']))
self.assertNotIn('mixrampdelay', responses[3].data)
def test_cmd_setvol(self):
with self.run_bpd() as client:
responses = client.send_commands(
('setvol', '67'),
('status',),
('setvol', '32'),
('status',),
('setvol', '101'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=4)
self.assertEqual('67', responses[1].data['volume'])
self.assertEqual('32', responses[3].data['volume'])
def test_cmd_volume(self):
with self.run_bpd() as client:
responses = client.send_commands(
('setvol', '10'),
('volume', '5'),
('volume', '-2'),
('status',))
self._assert_ok(*responses)
self.assertEqual('13', responses[3].data['volume'])
def test_cmd_replay_gain(self):
with self.run_bpd() as client:
responses = client.send_commands(
('replay_gain_mode', 'track'),
('replay_gain_status',),
('replay_gain_mode', 'notanoption'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=2)
self.assertAlmostEqual('track', responses[1].data['replay_gain_mode'])
class BPDControlTest(BPDTestHelper):
test_implements_control = implements({
'seek', 'seekid', 'seekcur',
}, expectedFailure=True)
def test_cmd_play(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('status',),
('play',),
('status',),
('play', '1'),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('stop', responses[0].data['state'])
self.assertEqual('play', responses[2].data['state'])
self.assertEqual('2', responses[4].data['Id'])
def test_cmd_playid(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('playid', '2'),
('currentsong',),
('clear',))
self._bpd_add(client, self.item2, self.item1)
responses.extend(client.send_commands(
('playid', '2'),
('currentsong',)))
self._assert_ok(*responses)
self.assertEqual('2', responses[1].data['Id'])
self.assertEqual('2', responses[4].data['Id'])
def test_cmd_pause(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1)
responses = client.send_commands(
('play',),
('pause',),
('status',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('pause', responses[2].data['state'])
self.assertEqual('1', responses[3].data['Id'])
def test_cmd_stop(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1)
responses = client.send_commands(
('play',),
('stop',),
('status',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('stop', responses[2].data['state'])
self.assertNotIn('Id', responses[3].data)
def test_cmd_next(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('play',),
('currentsong',),
('next',),
('currentsong',),
('next',),
('status',))
self._assert_ok(*responses)
self.assertEqual('1', responses[1].data['Id'])
self.assertEqual('2', responses[3].data['Id'])
self.assertEqual('stop', responses[5].data['state'])
def test_cmd_previous(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('play', '1'),
('currentsong',),
('previous',),
('currentsong',),
('previous',),
('status',),
('currentsong',))
self._assert_ok(*responses)
self.assertEqual('2', responses[1].data['Id'])
self.assertEqual('1', responses[3].data['Id'])
self.assertEqual('play', responses[5].data['state'])
self.assertEqual('1', responses[6].data['Id'])
class BPDQueueTest(BPDTestHelper):
test_implements_queue = implements({
'addid', 'clear', 'delete', 'deleteid', 'move',
'moveid', 'playlist', 'playlistfind',
'playlistsearch', 'plchanges',
'plchangesposid', 'prio', 'prioid', 'rangeid', 'shuffle',
'swap', 'swapid', 'addtagid', 'cleartagid',
}, expectedFailure=True)
METADATA = {'Pos', 'Time', 'Id', 'file', 'duration'}
def test_cmd_add(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1)
def test_cmd_playlistinfo(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('playlistinfo',),
('playlistinfo', '0'),
('playlistinfo', '0:2'),
('playlistinfo', '200'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=3)
self.assertEqual('1', responses[1].data['Id'])
self.assertEqual(['1', '2'], responses[2].data['Id'])
def test_cmd_playlistinfo_tagtypes(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1)
response = client.send_command('playlistinfo', '0')
self._assert_ok(response)
self.assertEqual(
BPDConnectionTest.TAGTYPES.union(BPDQueueTest.METADATA),
set(response.data.keys()))
def test_cmd_playlistid(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, self.item2)
responses = client.send_commands(
('playlistid', '2'),
('playlistid',))
self._assert_ok(*responses)
self.assertEqual('Track Two Title', responses[0].data['Title'])
self.assertEqual(['1', '2'], responses[1].data['Track'])
class BPDPlaylistsTest(BPDTestHelper):
test_implements_playlists = implements({'playlistadd'})
def test_cmd_listplaylist(self):
with self.run_bpd() as client:
response = client.send_command('listplaylist', 'anything')
self._assert_failed(response, bpd.ERROR_NO_EXIST)
def test_cmd_listplaylistinfo(self):
with self.run_bpd() as client:
response = client.send_command('listplaylistinfo', 'anything')
self._assert_failed(response, bpd.ERROR_NO_EXIST)
def test_cmd_listplaylists(self):
with self.run_bpd() as client:
response = client.send_command('listplaylists')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_load(self):
with self.run_bpd() as client:
response = client.send_command('load', 'anything')
self._assert_failed(response, bpd.ERROR_NO_EXIST)
@unittest.skip
def test_cmd_playlistadd(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1, playlist='anything')
def test_cmd_playlistclear(self):
with self.run_bpd() as client:
response = client.send_command('playlistclear', 'anything')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_playlistdelete(self):
with self.run_bpd() as client:
response = client.send_command('playlistdelete', 'anything', '0')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_playlistmove(self):
with self.run_bpd() as client:
response = client.send_command(
'playlistmove', 'anything', '0', '1')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_rename(self):
with self.run_bpd() as client:
response = client.send_command('rename', 'anything', 'newname')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_rm(self):
with self.run_bpd() as client:
response = client.send_command('rm', 'anything')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
def test_cmd_save(self):
with self.run_bpd() as client:
self._bpd_add(client, self.item1)
response = client.send_command('save', 'newplaylist')
self._assert_failed(response, bpd.ERROR_UNKNOWN)
class BPDDatabaseTest(BPDTestHelper):
test_implements_database = implements({
'albumart', 'find', 'findadd', 'listall',
'listallinfo', 'listfiles', 'readcomments',
'searchadd', 'searchaddpl', 'update', 'rescan',
}, expectedFailure=True)
def test_cmd_search(self):
with self.run_bpd() as client:
response = client.send_command('search', 'track', '1')
self._assert_ok(response)
self.assertEqual(self.item1.title, response.data['Title'])
def test_cmd_list(self):
with self.run_bpd() as client:
responses = client.send_commands(
('list', 'album'),
('list', 'track'),
('list', 'album', 'artist', 'Artist Name', 'track'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=2)
self.assertEqual('Album Title', responses[0].data['Album'])
self.assertEqual(['1', '2'], responses[1].data['Track'])
def test_cmd_list_three_arg_form(self):
with self.run_bpd() as client:
responses = client.send_commands(
('list', 'album', 'artist', 'Artist Name'),
('list', 'album', 'Artist Name'),
('list', 'track', 'Artist Name'))
self._assert_failed(responses, bpd.ERROR_ARG, pos=2)
self.assertEqual(responses[0].data, responses[1].data)
def test_cmd_lsinfo(self):
with self.run_bpd() as client:
response1 = client.send_command('lsinfo')
self._assert_ok(response1)
response2 = client.send_command(
'lsinfo', response1.data['directory'])
self._assert_ok(response2)
response3 = client.send_command(
'lsinfo', response2.data['directory'])
self._assert_ok(response3)
self.assertIn(self.item1.title, response3.data['Title'])
def test_cmd_count(self):
with self.run_bpd() as client:
response = client.send_command('count', 'track', '1')
self._assert_ok(response)
self.assertEqual('1', response.data['songs'])
self.assertEqual('0', response.data['playtime'])
class BPDMountsTest(BPDTestHelper):
test_implements_mounts = implements({
'mount', 'unmount', 'listmounts', 'listneighbors',
}, expectedFailure=True)
class BPDStickerTest(BPDTestHelper):
test_implements_stickers = implements({
'sticker',
}, expectedFailure=True)
class BPDConnectionTest(BPDTestHelper):
test_implements_connection = implements({
'close', 'kill',
})
ALL_MPD_TAGTYPES = {
'Artist', 'ArtistSort', 'Album', 'AlbumSort', 'AlbumArtist',
'AlbumArtistSort', 'Title', 'Track', 'Name', 'Genre', 'Date',
'Composer', 'Performer', 'Comment', 'Disc', 'Label',
'OriginalDate', 'MUSICBRAINZ_ARTISTID', 'MUSICBRAINZ_ALBUMID',
'MUSICBRAINZ_ALBUMARTISTID', 'MUSICBRAINZ_TRACKID',
'MUSICBRAINZ_RELEASETRACKID', 'MUSICBRAINZ_WORKID',
}
UNSUPPORTED_TAGTYPES = {
'MUSICBRAINZ_WORKID', # not tracked by beets
'Performer', # not tracked by beets
'AlbumSort', # not tracked by beets
'Name', # junk field for internet radio
}
TAGTYPES = ALL_MPD_TAGTYPES.difference(UNSUPPORTED_TAGTYPES)
def test_cmd_password(self):
with self.run_bpd(password='abc123') as client:
response = client.send_command('status')
self._assert_failed(response, bpd.ERROR_PERMISSION)
response = client.send_command('password', 'wrong')
self._assert_failed(response, bpd.ERROR_PASSWORD)
responses = client.send_commands(
('password', 'abc123'),
('status',))
self._assert_ok(*responses)
def test_cmd_ping(self):
with self.run_bpd() as client:
response = client.send_command('ping')
self._assert_ok(response)
def test_cmd_tagtypes(self):
with self.run_bpd() as client:
response = client.send_command('tagtypes')
self._assert_ok(response)
self.assertEqual(
self.TAGTYPES,
set(response.data['tagtype']))
@unittest.skip
def test_tagtypes_mask(self):
with self.run_bpd() as client:
response = client.send_command('tagtypes', 'clear')
self._assert_ok(response)
class BPDPartitionTest(BPDTestHelper):
test_implements_partitions = implements({
'partition', 'listpartitions', 'newpartition',
}, expectedFailure=True)
class BPDDeviceTest(BPDTestHelper):
test_implements_devices = implements({
'disableoutput', 'enableoutput', 'toggleoutput', 'outputs',
}, expectedFailure=True)
class BPDReflectionTest(BPDTestHelper):
test_implements_reflection = implements({
'config', 'commands', 'notcommands', 'urlhandlers',
}, expectedFailure=True)
def test_cmd_decoders(self):
with self.run_bpd() as client:
response = client.send_command('decoders')
self._assert_ok(response)
self.assertEqual('default', response.data['plugin'])
self.assertEqual('mp3', response.data['suffix'])
self.assertEqual('audio/mpeg', response.data['mime_type'])
class BPDPeersTest(BPDTestHelper):
test_implements_peers = implements({
'subscribe', 'unsubscribe', 'channels', 'readmessages',
'sendmessage',
}, expectedFailure=True)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
|
{
"content_hash": "32f3d87dc002c12a3f8b77740b0abcca",
"timestamp": "",
"source": "github",
"line_count": 1030,
"max_line_length": 79,
"avg_line_length": 37.73495145631068,
"alnum_prop": 0.547276609977616,
"repo_name": "sampsyo/beets",
"id": "dd47ac62b6cdd68ff9cbbae60e0bfdae499fa9ac",
"size": "39538",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/test_player.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2951"
},
{
"name": "HTML",
"bytes": "3307"
},
{
"name": "JavaScript",
"bytes": "85950"
},
{
"name": "Python",
"bytes": "1707638"
},
{
"name": "Shell",
"bytes": "7413"
}
],
"symlink_target": ""
}
|
"""Settings Module"""
import logging
import os
MSW_API = os.environ.get('SF_MSW_API', None)
LOG_LEVEL = getattr(logging, os.environ.get('SF_LOG_LEVEL', 'DEBUG'))
|
{
"content_hash": "173b693914a994780ad7f974488c56f1",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 69,
"avg_line_length": 23.428571428571427,
"alnum_prop": 0.6951219512195121,
"repo_name": "Smotko/surfbot",
"id": "081eb6be2fbfd5ed43eb1b22b66b4cc64ac40bc4",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27788"
},
{
"name": "Shell",
"bytes": "692"
}
],
"symlink_target": ""
}
|
from scrapy.contrib.spiders.init import InitSpider
import datetime
class SpiderBase(InitSpider):
def extractor(self, xpathselector, selector):
"""
Helper function that extract info from xpathselector object
using the selector constrains.
"""
val = xpathselector.xpath(selector).extract()
return val[0] if val else ''
def response_to_file(self, name, response):
with open(name, 'wb') as f:
f.write(response.body)
class CaptchaMixin(object):
def process_captcha(self, *args, **kwargs):
raise NotImplementedError(
'SpiderBaseUtils requires a process_captcha method.'
)
def get_captcha(self, *args, **kwargs):
self.crawler.engine.pause()
captcha = self.process_captcha(*args, **kwargs)
self.crawler.engine.unpause()
return captcha
class ValidationMixin(object):
def clean_size(self, input_str, size):
if not len(input_str) == size:
return False
return True
def only_chars(self, value):
pass
def only_digits(self, value):
if value.isdigit():
return True
return False
def clean_all_white_spaces(self, item):
return ''.join(item.split())
def clean_line_white_spaces(self, item):
return item.replace('\n', '').replace('\t', '').replace('\r', '')
def str2datetime(self, date_time):
try:
return datetime.datetime.strptime(date_time, '%d/%m/%Y %H:%M:%S')
except ValueError:
return None
def str2date(self, date):
try:
return datetime.datetime.strptime(date, '%d/%m/%Y')
except ValueError:
return None
class ItemMixin(ValidationMixin):
item_class = None
xpath = None
def get_item_fields(self):
return self.item_class.fields
def extract_item(self, selector):
return selector.xpath(self.xpath).extract()
def clean_item(self, extraction):
return extraction or {}
def build_item(self, cleaned_data):
return self.item_class(cleaned_data)
def update_item(self, old_item, cleaned_data):
return self.item_class(cleaned_data)
def process_item(self, selector):
extraction = self.extract_item(selector)
cleaned_data = self.clean_item(extraction)
return self.build_item(cleaned_data)
def _process_item(self, selector, item_name=None):
if item_name is None:
return self.process_item(selector)
if hasattr(self, 'extract_%s' % item_name):
extraction = getattr(self, 'extract_%s' % item_name)(selector)
else:
extraction = self.extract_item(selector)
if hasattr(self, 'clean_%s' % item_name):
cleaned_data = getattr(self, 'clean_%s' % item_name)(extraction)
else:
cleaned_data = self.clean_item(extraction)
if hasattr(self, 'build_%s' % item_name):
return getattr(self, 'build_%s' % item_name)(cleaned_data)
else:
return self.build_item(cleaned_data)
class FieldExampleMixin(object):
field = None
default = None
def get_field(self):
if self.field is None:
return self.default
else:
return self.field
|
{
"content_hash": "e5daba7c89249783db50ba31cc29f704",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 77,
"avg_line_length": 27.974789915966387,
"alnum_prop": 0.6037849203965154,
"repo_name": "bopo/scrapy-project-template",
"id": "6abde84d17875dc470b4a79264fcd9de225aed27",
"size": "3354",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "{{ cookiecutter.repo_name }}/{{ cookiecutter.repo_name }}/spiders/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "6503"
},
{
"name": "Makefile",
"bytes": "9140"
},
{
"name": "Python",
"bytes": "29091"
},
{
"name": "Ruby",
"bytes": "503"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.mixins import LoginRequiredMixin
from rest_framework.response import Response
from rest_framework.schemas import SchemaGenerator
from rest_framework.views import APIView
from rest_framework_swagger import renderers
from .urls.v1 import urlpatterns
class SwaggerSchemaView(LoginRequiredMixin, APIView):
renderer_classes = [
renderers.SwaggerUIRenderer,
]
def get(self, request):
generator = SchemaGenerator(
title="RDMO API",
patterns=urlpatterns,
url=request.path
)
schema = generator.get_schema(request=request)
return Response(schema)
|
{
"content_hash": "ce1f3b47242e4b5de5fe3a75ea3aa7ac",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 57,
"avg_line_length": 28.47826086956522,
"alnum_prop": 0.7145038167938931,
"repo_name": "DMPwerkzeug/DMPwerkzeug",
"id": "a9b43c71fc1977e9050dc8daf132741b4e8eb0dd",
"size": "655",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rdmo/core/swagger.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9735"
},
{
"name": "HTML",
"bytes": "126570"
},
{
"name": "JavaScript",
"bytes": "46177"
},
{
"name": "Python",
"bytes": "120676"
}
],
"symlink_target": ""
}
|
import functools
from tempest_lib.cli import base
from tempest_lib.cli import output_parser
import testtools
from tempest.common import credentials
from tempest import config
from tempest import exceptions
from tempest.openstack.common import versionutils
from tempest import test
CONF = config.CONF
def check_client_version(client, version):
"""Checks if the client's version is compatible with the given version
@param client: The client to check.
@param version: The version to compare against.
@return: True if the client version is compatible with the given version
parameter, False otherwise.
"""
current_version = base.execute(client, '', params='--version',
merge_stderr=True, cli_dir=CONF.cli.cli_dir)
if not current_version.strip():
raise exceptions.TempestException('"%s --version" output was empty' %
client)
return versionutils.is_compatible(version, current_version,
same_major=False)
def min_client_version(*args, **kwargs):
"""A decorator to skip tests if the client used isn't of the right version.
@param client: The client command to run. For python-novaclient, this is
'nova', for python-cinderclient this is 'cinder', etc.
@param version: The minimum version required to run the CLI test.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not check_client_version(kwargs['client'], kwargs['version']):
msg = "requires %s client version >= %s" % (kwargs['client'],
kwargs['version'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
class ClientTestBase(test.BaseTestCase):
@classmethod
def skip_checks(cls):
super(ClientTestBase, cls).skip_checks()
if not CONF.identity_feature_enabled.api_v2:
raise cls.skipException("CLI clients rely on identity v2 API, "
"which is configured as not available")
@classmethod
def resource_setup(cls):
if not CONF.cli.enabled:
msg = "cli testing disabled"
raise cls.skipException(msg)
super(ClientTestBase, cls).resource_setup()
cls.isolated_creds = credentials.get_isolated_credentials(cls.__name__)
cls.creds = cls.isolated_creds.get_admin_creds()
def _get_clients(self):
clients = base.CLIClient(self.creds.username,
self.creds.password,
self.creds.tenant_name,
CONF.identity.uri, CONF.cli.cli_dir)
return clients
# TODO(mtreinish): The following code is basically copied from tempest-lib.
# The base cli test class in tempest-lib 0.0.1 doesn't work as a mixin like
# is needed here. The code below should be removed when tempest-lib
# provides a way to provide this functionality
def setUp(self):
super(ClientTestBase, self).setUp()
self.clients = self._get_clients()
self.parser = output_parser
def assertTableStruct(self, items, field_names):
"""Verify that all items has keys listed in field_names.
:param items: items to assert are field names in the output table
:type items: list
:param field_names: field names from the output table of the cmd
:type field_names: list
"""
for item in items:
for field in field_names:
self.assertIn(field, item)
def assertFirstLineStartsWith(self, lines, beginning):
"""Verify that the first line starts with a string
:param lines: strings for each line of output
:type lines: list
:param beginning: verify this is at the beginning of the first line
:type beginning: string
"""
self.assertTrue(lines[0].startswith(beginning),
msg=('Beginning of first line has invalid content: %s'
% lines[:3]))
|
{
"content_hash": "f1ad10beef65b66a0e9d3267bd64976e",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 79,
"avg_line_length": 38.4954954954955,
"alnum_prop": 0.6154926281301194,
"repo_name": "hpcloud-mon/tempest",
"id": "6733204736a4248469be1120c8be5a92ae0aed41",
"size": "4909",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/cli/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2804899"
},
{
"name": "Shell",
"bytes": "8560"
}
],
"symlink_target": ""
}
|
from pysmt.shortcuts import And, Not, Symbol, Bool, Exists, Solver
from pysmt.shortcuts import get_env, qelim, Or
from pysmt.test import TestCase, skipIfSolverNotAvailable, main
from pysmt.test.examples import get_example_formulae
from pysmt.logics import BOOL
from pysmt.exceptions import PysmtValueError
class TestBdd(TestCase):
@skipIfSolverNotAvailable("bdd")
def setUp(self):
self.x, self.y = Symbol("x"), Symbol("y")
self.bdd_solver = Solver(logic=BOOL,
name='bdd')
self.bdd_converter = self.bdd_solver.converter
trail = [And, Or, And, Or]
f = And(self.x, self.y)
for op in trail:
f = op(f, f)
self.big_tree = f
@skipIfSolverNotAvailable("bdd")
def test_basic_bdd_variables(self):
convert = self.bdd_converter.convert
bdd_x = convert(self.x)
bdd_x_2 = convert(self.x)
bdd_y = convert(self.y)
self.assertIsNotNone(bdd_x)
self.assertEqual(bdd_x, bdd_x_2)
self.assertNotEqual(bdd_x, bdd_y)
@skipIfSolverNotAvailable("bdd")
def test_basic_expr(self):
convert = self.bdd_converter.convert
bdd_x = convert(self.x)
bdd_y = convert(self.y)
bdd_x_and_y = self.bdd_converter.ddmanager.And(bdd_x, bdd_y)
x_and_y = And(self.x, self.y)
converted_expr = convert(x_and_y)
self.assertEqual(bdd_x_and_y, converted_expr)
@skipIfSolverNotAvailable("bdd")
def test_examples_conversion(self):
convert = self.bdd_converter.convert
for example in get_example_formulae():
if example.logic != BOOL:
continue
expr = convert(example.expr)
self.assertIsNotNone(expr)
@skipIfSolverNotAvailable("bdd")
def test_examples_solving(self):
for example in get_example_formulae():
if example.logic != BOOL:
continue
solver = Solver(logic=BOOL,
name='bdd')
solver.add_assertion(example.expr)
if example.is_sat:
self.assertTrue(solver.solve())
else:
self.assertFalse(solver.solve())
@skipIfSolverNotAvailable("bdd")
def test_basic_solving(self):
solver = self.bdd_solver
f = And(self.x, Not(self.y))
solver.add_assertion(f)
self.assertTrue(solver.solve())
model = solver.get_model()
self.assertEqual(model[self.x], Bool(True))
self.assertEqual(model[self.y], Bool(False))
self.assertFalse(solver.get_py_value(self.y))
solver.push()
solver.add_assertion(Not(self.x))
self.assertFalse(solver.solve())
solver.pop()
self.assertTrue(solver.solve())
@skipIfSolverNotAvailable("bdd")
def test_quantifier_elimination(self):
convert = self.bdd_converter.convert
f = Exists([self.x], And(self.x, self.y))
bdd_g = convert(f)
g = self.bdd_converter.back(bdd_g)
self.assertEqual(g, self.y)
@skipIfSolverNotAvailable("bdd")
def test_quantifier_eliminator(self):
f = Exists([self.x], And(self.x, self.y))
g = qelim(f, solver_name="bdd")
self.assertEqual(g, self.y)
@skipIfSolverNotAvailable("bdd")
def test_reordering(self):
with Solver(name="bdd", logic=BOOL,
solver_options={'dynamic_reordering': True}) as s:
s.add_assertion(self.big_tree)
self.assertTrue(s.solve())
@skipIfSolverNotAvailable("bdd")
def test_reordering_algorithms(self):
from pysmt.solvers.bdd import BddOptions
for algo in BddOptions.CUDD_ALL_REORDERING_ALGORITHMS:
with Solver(name="bdd", logic=BOOL,
solver_options={'dynamic_reordering': True,
'reordering_algorithm': algo}) as s:
s.add_assertion(self.big_tree)
self.assertTrue(s.solve())
self.assertEqual(algo, s.ddmanager.ReorderingStatus()[1])
@skipIfSolverNotAvailable("bdd")
def test_fixed_ordering(self):
f_order = [self.x, self.y]
r_order = list(reversed(f_order))
for order in [f_order, r_order]:
with Solver(name="bdd", logic=BOOL,
solver_options={'static_ordering': order}) as s:
s.add_assertion(self.big_tree)
self.assertTrue(s.solve())
# Check that the ordering is understood by CUDD
for pos, var in enumerate(order):
var_idx = s.converter.var2node[var].NodeReadIndex()
perm = s.ddmanager.ReadPerm(var_idx)
self.assertEqual(pos, perm)
@skipIfSolverNotAvailable("bdd")
def test_invalid_ordering(self):
with self.assertRaises(PysmtValueError):
Solver(name="bdd", logic=BOOL,
solver_options={'static_ordering':
[And(self.x, self.y), self.y]})
@skipIfSolverNotAvailable("bdd")
def test_initial_ordering(self):
with Solver(name="bdd", logic=BOOL,
solver_options={'static_ordering':[self.x, self.y],
'dynamic_reordering':True}) as s:
s.add_assertion(self.big_tree)
self.assertTrue(s.solve())
self.assertNotEquals(s.ddmanager.ReorderingStatus()[1], 0)
if __name__ == '__main__':
main()
|
{
"content_hash": "777bd09e5ccff6e67a7d2e7b56cc18f8",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 76,
"avg_line_length": 35.318471337579616,
"alnum_prop": 0.5796212804328224,
"repo_name": "agriggio/pysmt",
"id": "f1dbcd141113dcbb81c46c6ac81e6b5b8c079b52",
"size": "6195",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pysmt/test/test_bdd.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1842"
},
{
"name": "PowerShell",
"bytes": "5987"
},
{
"name": "Python",
"bytes": "1019481"
},
{
"name": "Shell",
"bytes": "6094"
}
],
"symlink_target": ""
}
|
from .baserequest import BaseRequest
from oandapyV20.types import Units, PriceValue
from oandapyV20.definitions.orders import (
OrderType,
TimeInForce,
OrderPositionFill)
class MarketOrderRequest(BaseRequest):
"""create a MarketOrderRequest.
MarketOrderRequest is used to build the body for a MarketOrder.
The body can be used to pass to the OrderCreate endpoint.
"""
def __init__(self,
instrument,
units,
priceBound=None,
positionFill=OrderPositionFill.DEFAULT,
clientExtensions=None,
takeProfitOnFill=None,
timeInForce=TimeInForce.FOK,
stopLossOnFill=None,
trailingStopLossOnFill=None,
tradeClientExtensions=None):
"""
Instantiate a MarketOrderRequest.
Parameters
----------
instrument : string (required)
the instrument to create the order for
units: integer (required)
the number of units. If positive the order results in a LONG
order. If negative the order results in a SHORT order
Example
-------
>>> import json
>>> from oandapyV20 import API
>>> import oandapyV20.endpoints.orders as orders
>>> from oandapyV20.contrib.requests import MarketOrderRequest
>>>
>>> accountID = "..."
>>> client = API(access_token=...)
>>> mo = MarketOrderRequest(instrument="EUR_USD", units=10000)
>>> print(json.dumps(mo.data, indent=4))
{
"order": {
"type": "MARKET",
"positionFill": "DEFAULT",
"instrument": "EUR_USD",
"timeInForce": "FOK",
"units": "10000"
}
}
>>> # now we have the order specification, create the order request
>>> r = orders.OrderCreate(accountID, data=mo.data)
>>> # perform the request
>>> rv = client.request(r)
>>> print(rv)
>>> print(json.dumps(rv, indent=4))
{
"orderFillTransaction": {
"reason": "MARKET_ORDER",
"pl": "0.0000",
"accountBalance": "97864.8813",
"units": "10000",
"instrument": "EUR_USD",
"accountID": "101-004-1435156-001",
"time": "2016-11-11T19:59:43.253587917Z",
"type": "ORDER_FILL",
"id": "2504",
"financing": "0.0000",
"tradeOpened": {
"tradeID": "2504",
"units": "10000"
},
"orderID": "2503",
"userID": 1435156,
"batchID": "2503",
"price": "1.08463"
},
"lastTransactionID": "2504",
"relatedTransactionIDs": [
"2503",
"2504"
],
"orderCreateTransaction": {
"type": "MARKET_ORDER",
"reason": "CLIENT_ORDER",
"id": "2503",
"timeInForce": "FOK",
"units": "10000",
"time": "2016-11-11T19:59:43.253587917Z",
"positionFill": "DEFAULT",
"accountID": "101-004-1435156-001",
"instrument": "EUR_USD",
"batchID": "2503",
"userID": 1435156
}
}
>>>
"""
super(MarketOrderRequest, self).__init__()
# allowed: FOK/IOC
if timeInForce not in [TimeInForce.FOK,
TimeInForce.IOC]:
raise ValueError("timeInForce: {}".format(timeInForce))
# by default for a MARKET order
self._data.update({"type": OrderType.MARKET})
self._data.update({"timeInForce": timeInForce})
# required
self._data.update({"instrument": instrument})
self._data.update({"units": Units(units).value})
# optional
if priceBound:
self._data.update({"priceBound": PriceValue(priceBound).value})
if not hasattr(OrderPositionFill, positionFill):
raise ValueError("positionFill {}".format(positionFill))
self._data.update({"positionFill": positionFill})
self._data.update({"clientExtensions": clientExtensions})
self._data.update({"takeProfitOnFill": takeProfitOnFill})
self._data.update({"stopLossOnFill": stopLossOnFill})
self._data.update({"trailingStopLossOnFill": trailingStopLossOnFill})
self._data.update({"tradeClientExtensions": tradeClientExtensions})
@property
def data(self):
"""data property.
return the JSON body.
"""
return dict({"order": super(MarketOrderRequest, self).data})
|
{
"content_hash": "f958512671cc0fde872035fadf34f89d",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 77,
"avg_line_length": 33.58904109589041,
"alnum_prop": 0.5142740619902121,
"repo_name": "hootnot/oanda-api-v20",
"id": "1599ebde965e1cd77308b7bbc0ac2163e0131fbb",
"size": "4929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oandapyV20/contrib/requests/marketorder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "89407"
},
{
"name": "Python",
"bytes": "328653"
}
],
"symlink_target": ""
}
|
"""Wikipedia Cirrus Extractor:
Extracts and cleans text from a Wikipedia Cirrus dump and stores output in a
number of files of similar size in a given directory.
Each file will contain several documents in the format:
<doc id="" url="" title="">
...
</doc>
"""
import sys, os.path, time
import re
import json
import argparse
import bz2
import gzip
import logging
# Program version
version = '1.00'
urlbase = 'http://it.wikipedia.org/'
# ----------------------------------------------------------------------
class NextFile(object):
"""
Synchronous generation of next available file name.
"""
filesPerDir = 100
def __init__(self, path_name):
self.path_name = path_name
self.dir_index = -1
self.file_index = -1
def next(self):
self.file_index = (self.file_index + 1) % NextFile.filesPerDir
if self.file_index == 0:
self.dir_index += 1
dirname = self._dirname()
if not os.path.isdir(dirname):
os.makedirs(dirname)
return self._filepath()
def _dirname(self):
char1 = self.dir_index % 26
char2 = self.dir_index / 26 % 26
return os.path.join(self.path_name, '%c%c' % (ord('A') + char2, ord('A') + char1))
def _filepath(self):
return '%s/wiki_%02d' % (self._dirname(), self.file_index)
class OutputSplitter(object):
"""
File-like object, that splits output to multiple files of a given max size.
"""
def __init__(self, nextFile, max_file_size=0, compress=True):
"""
:param nextfile: a NextFile object from which to obtain filenames
to use.
:param max_file_size: the maximum size of each file.
:para compress: whether to write data with bzip compression.
"""
self.nextFile = nextFile
self.compress = compress
self.max_file_size = max_file_size
self.file = self.open(self.nextFile.next())
def reserve(self, size):
if self.file.tell() + size > self.max_file_size:
self.close()
self.file = self.open(self.nextFile.next())
def write(self, data):
self.reserve(len(data))
self.file.write(data)
def close(self):
self.file.close()
def open(self, filename):
if self.compress:
return bz2.BZ2File(filename + '.bz2', 'w')
else:
return open(filename, 'w')
# ----------------------------------------------------------------------
class Extractor(object):
def extract(self, out):
"""
:param out: output file.
"""
logging.debug("%s\t%s", self.id, self.title)
text = ''.join(self.page)
url = get_url(self.id)
#header = '<doc id="%s" url="%s" title="%s">\n' % (self.id, url, self.title)
# Separate header from text with a newline.
header = self.title + '\n\n'
header = header.encode('utf-8')
#footer = "\n</doc>\n"
out.write(header)
text = clean(self, text)
for line in compact(text):
out.write(line.encode('utf-8'))
out.write('\n')
#out.write(footer)
def process_dump(input_file, out_file, file_size, file_compress):
"""
:param input_file: name of the wikipedia dump file; '-' to read from stdin
:param out_file: directory where to store extracted data, or '-' for stdout
:param file_size: max size of each extracted file, or None for no max (one file)
:param file_compress: whether to compress files with bzip.
"""
if input_file == '-':
input = sys.stdin
else:
input = gzip.open(input_file)
if out_file == '-':
output = sys.stdout
if file_compress:
logging.warn("writing to stdout, so no output compression (use external tool)")
else:
nextFile = NextFile(out_file)
output = OutputSplitter(nextFile, file_size, file_compress)
# process dump
# format
# {"index":{"_type":"page","_id":"3825914"}}
# {"namespace":0,"title":TITLE,"timestamp":"2014-06-29T15:51:09Z","text":TEXT,...}
while True:
line = input.readline()
if not line:
break
index = json.loads(line)
content = json.loads(input.readline())
type = index['index']['_type']
id = index['index']['_id']
if type == 'page' and content['namespace'] == 0:
title = content['title']
text = content['text']
# drop references:
# ^ The Penguin Dictionary
text = re.sub(r' \^ .*', '', text)
url = urlbase + 'wiki?curid=' + id
#header = '<doc id="%s" url="%s" title="%s">\n' % (id, url, title)
page = title + '\n\n' + text + '\n'
output.write(page.encode('utf-8'))
# ----------------------------------------------------------------------
# Minimum size of output files
minFileSize = 200 * 1024
def main():
parser = argparse.ArgumentParser(prog=os.path.basename(sys.argv[0]),
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
parser.add_argument("input",
help="Cirrus Json wiki dump file")
groupO = parser.add_argument_group('Output')
groupO.add_argument("-o", "--output", default="text",
help="directory for extracted files (or '-' for dumping to stdin)")
groupO.add_argument("-b", "--bytes", default="1M",
help="maximum bytes per output file (default %(default)s)",
metavar="n[KMG]")
groupO.add_argument("-c", "--compress", action="store_true",
help="compress output files using bzip")
groupP = parser.add_argument_group('Processing')
groupP.add_argument("-ns", "--namespaces", default="", metavar="ns1,ns2",
help="accepted namespaces")
groupS = parser.add_argument_group('Special')
groupS.add_argument("-q", "--quiet", action="store_true",
help="suppress reporting progress info")
groupS.add_argument("-v", "--version", action="version",
version='%(prog)s ' + version,
help="print program version")
args = parser.parse_args()
try:
power = 'kmg'.find(args.bytes[-1].lower()) + 1
file_size = int(args.bytes[:-1]) * 1024 ** power
if file_size < minFileSize:
raise ValueError()
except ValueError:
logging.error('Insufficient or invalid size: %s', args.bytes)
return
FORMAT = '%(levelname)s: %(message)s'
logging.basicConfig(format=FORMAT)
logger = logging.getLogger()
if not args.quiet:
logger.setLevel(logging.INFO)
input_file = args.input
output_path = args.output
if output_path != '-' and not os.path.isdir(output_path):
try:
os.makedirs(output_path)
except:
logging.error('Could not create: %s', output_path)
return
process_dump(input_file, output_path, file_size, args.compress)
if __name__ == '__main__':
main()
|
{
"content_hash": "67e379fee2a768a8ebdbed16b2a50f07",
"timestamp": "",
"source": "github",
"line_count": 222,
"max_line_length": 91,
"avg_line_length": 32.38288288288288,
"alnum_prop": 0.5504242592850188,
"repo_name": "emrekgn/tr-rnn",
"id": "ff564b26b03993373949630dfeefd7c9f6f17f15",
"size": "8327",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/cirrus-extract.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "145576"
},
{
"name": "Shell",
"bytes": "4993"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinxcontrib.gnuplot',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinxcontrib-gnuplot'
copyright = u'2009, Vadim Gubergrits'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinxcontrib-gnuplotdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sphinxcontrib-gnuplot.tex', u'sphinxcontrib-gnuplot Documentation',
u'Vadim Gubergrits', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
{
"content_hash": "c69de09bd366f0c22f89fe3c47463477",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 80,
"avg_line_length": 32.536842105263155,
"alnum_prop": 0.7115820122937561,
"repo_name": "Lemma1/MAC-POSTS",
"id": "092ff21583d38363377a0847d01de1db801de1e6",
"size": "6614",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "doc_builder/sphinx-contrib/gnuplot/doc/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "3394"
},
{
"name": "Batchfile",
"bytes": "103388"
},
{
"name": "C",
"bytes": "5399"
},
{
"name": "C++",
"bytes": "3595985"
},
{
"name": "CMake",
"bytes": "53433"
},
{
"name": "CSS",
"bytes": "3618"
},
{
"name": "HTML",
"bytes": "18640"
},
{
"name": "JavaScript",
"bytes": "44610"
},
{
"name": "Jupyter Notebook",
"bytes": "7469541"
},
{
"name": "MATLAB",
"bytes": "5439"
},
{
"name": "Makefile",
"bytes": "148059"
},
{
"name": "Python",
"bytes": "1950140"
},
{
"name": "Shell",
"bytes": "2554"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Bet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField(verbose_name='BetValue')),
('potentialWin', models.FloatField(verbose_name='BetWin')),
('isBetSuccessful', models.BooleanField(verbose_name='BetSuccess')),
('isActive', models.NullBooleanField(default=True, verbose_name='BetActive')),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(default='null', max_length=64, verbose_name='ClientName')),
('email', models.EmailField(max_length=254, verbose_name='ClientMail')),
('password', models.CharField(max_length=1024, verbose_name='ClientPassword')),
('balance', models.FloatField(verbose_name='ClientBalance')),
('logged', models.NullBooleanField(verbose_name='ClientLogged')),
],
),
migrations.CreateModel(
name='Record',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField()),
('username', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Riding',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, unique=1, verbose_name='RideName')),
('firstHorseChance', models.FloatField(null=True, verbose_name='RideFirstHorseChance')),
('secondHorseChance', models.FloatField(null=True, verbose_name='RideSecondHorseChance')),
('winner', models.IntegerField(null=True, verbose_name='RideWinner')),
('visible', models.NullBooleanField(default=False, verbose_name='RideVisible')),
('ready', models.BooleanField(default=False, verbose_name='RidePrognosed')),
],
),
migrations.CreateModel(
name='Bookmaker',
fields=[
('client_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='polls.Client')),
],
bases=('polls.client',),
),
migrations.AddField(
model_name='client',
name='name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='bet',
name='ownerLink',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Client'),
),
migrations.AddField(
model_name='bet',
name='rides',
field=models.ManyToManyField(to='polls.Riding'),
),
]
|
{
"content_hash": "1b006005db5206e8dde032d4a850c7b6",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 189,
"avg_line_length": 45.6125,
"alnum_prop": 0.586736092080022,
"repo_name": "Fly-Style/metaprog_univ",
"id": "0edd52121113ec4c2b5a8abc1ec5ca1027ca6c06",
"size": "3722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lab1/polls/migrations/0001_initia_l.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "210064"
},
{
"name": "CoffeeScript",
"bytes": "2743"
},
{
"name": "HTML",
"bytes": "83550"
},
{
"name": "JavaScript",
"bytes": "467304"
},
{
"name": "Python",
"bytes": "34730"
},
{
"name": "Ruby",
"bytes": "138370"
}
],
"symlink_target": ""
}
|
import os
from subprocess import Popen, PIPE
def solve_it(input_data):
# Writes the inputData to a temporay file
tmp_file_name = 'tmp.data'
tmp_file = open(tmp_file_name, 'w')
tmp_file.write(input_data)
tmp_file.close()
# Runs the command: ./knapsack.bin tmp_file_name
process = Popen(['./knapsack.bin', tmp_file_name], stdout=PIPE)
(stdout, stderr) = process.communicate()
# removes the temporay file
os.remove(tmp_file_name)
return stdout.strip()
import sys
if __name__ == '__main__':
if len(sys.argv) > 1:
file_location = sys.argv[1].strip()
input_data_file = open(file_location, 'r')
input_data = ''.join(input_data_file.readlines())
input_data_file.close()
print solve_it(input_data)
else:
print 'This test requires an input file. Please select one from the data directory. (i.e. python solver.py ./data/ks_4_0)'
|
{
"content_hash": "49ef98bfb8e439562b37fc247c162fbc",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 131,
"avg_line_length": 27.323529411764707,
"alnum_prop": 0.6318622174381054,
"repo_name": "matematik7/Coursera-DiscreteOptimization",
"id": "56be7d1630a161734eb961ac6f83150eb3b1bd8f",
"size": "972",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "knapsack/solver.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "48269"
},
{
"name": "Erlang",
"bytes": "808"
},
{
"name": "Java",
"bytes": "2525"
},
{
"name": "JavaScript",
"bytes": "780"
},
{
"name": "Python",
"bytes": "36547"
}
],
"symlink_target": ""
}
|
from cinderclient.exceptions import NotFound as CinderNotFound
from cinderclient.exceptions import Forbidden as CinderForbidden
from glanceclient.exc import HTTPNotFound as GlanceNotFound
from glanceclient.exc import Unauthorized as GlanceUnauthorized
from glanceclient.exc import HTTPForbidden as GlanceForbidden
from keystoneauth1.exceptions.http import Unauthorized as KeystoneUnauthorized
from keystoneauth1.exceptions.http import Forbidden as KeystoneForbidden
from keystoneauth1.exceptions.http import NotFound as KeystoneNotFound
from neutronclient.common.exceptions import NotFound as NeutronNotFound
from novaclient.exceptions import NotFound as NovaNotFound
from novaclient.exceptions import Forbidden as NovaForbidden
from swiftclient.client import ClientException as SwiftClientException
from neutronclient.common.exceptions import Forbidden as NeutronForbidden
from swift_exceptions import SwiftNotAuthorized as SwiftNotAuthorized
from swift_exceptions import SwiftNotFoundException as SwiftNotFoundException
from swift_exceptions import SwiftForbidden as SwiftForbidden
__all__ = [
'CinderNotFound',
'CinderForbidden',
'GlanceNotFound',
'GlanceUnauthorized',
'GlanceForbidden',
'NeutronForbidden',
'NeutronNotFound',
'KeystoneNotFound',
'KeystoneUnauthorized',
'KeystoneForbidden'
'NovaNotFound',
'SwiftClientException',
'SwiftNotAuthorized',
'SwiftNotFoundException',
'SwiftForbidden'
]
|
{
"content_hash": "100578cfba66f3e0cd2f329107b59e1a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 78,
"avg_line_length": 43.029411764705884,
"alnum_prop": 0.8311688311688312,
"repo_name": "chalupaul/roletester",
"id": "69b7b1c213ed60022e80f75cdcccc874740b052c",
"size": "1463",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "roletester/exc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "321193"
}
],
"symlink_target": ""
}
|
import copy
import logging
from nose.tools import assert_true, assert_false, assert_equal, assert_raises
from django.contrib.auth.models import User
from desktop.lib.django_test_util import make_logged_in_client
from desktop.lib.test_utils import grant_access
from liboozie.oozie_api_test import OozieServerProvider
from jobsub import conf
from jobsub.management.commands import jobsub_setup
from jobsub.models import OozieDesign, OozieMapreduceAction, OozieStreamingAction
from jobsub.parameterization import recursive_walk, find_variables, substitute_variables
LOG = logging.getLogger(__name__)
def test_recursive_walk():
def f(_):
f.leafs += 1
f.leafs = 0
# Test that we apply the function the right number of times
recursive_walk(f, [0,1,2])
assert_equal(3, f.leafs)
f.leafs = 0
recursive_walk(f, 1)
assert_equal(1, f.leafs)
f.leafs = 0
D = dict(a=2, b=3, c=dict(d=4, e=5))
Dorig = copy.deepcopy(D)
recursive_walk(f, D)
assert_equal(4, f.leafs)
assert_equal(Dorig, D, 'Object unexpectedly modified.')
# Test application and replacement
def square(x):
return x * x
assert_equal(dict(a=4, b=9, c=dict(d=16, e=25)), recursive_walk(square, D))
def test_find_variables():
A = dict(one='$a',
two=dict(c='foo $b $$'),
three=['${foo}', 'xxx ${foo}'])
assert_equal(set(['a', 'b', 'foo']),
find_variables(A))
def test_substitute_variables():
data = ['$greeting', dict(a='${where} $where')]
assert_equal(['hi', dict(a='there there')],
substitute_variables(data, dict(greeting='hi', where='there')))
data = [None, 'foo', dict(a=None)]
assert_equal(data, substitute_variables(data, dict()), 'Nothing to substitute')
def test_job_design_cycle():
"""
Tests for the "job design" CMS.
Submission requires a cluster, so that's separate.
"""
c = make_logged_in_client()
# New should give us a form.
response = c.get('/jobsub/new_design/java')
assert_equal(1, response.content.count('action="/jobsub/new_design/java" method="POST"'))
# Streaming also:
response = c.get('/jobsub/new_design/streaming')
assert_equal(1, response.content.count('action="/jobsub/new_design/streaming" method="POST"'))
# Post back to create a new submission
design_count = OozieDesign.objects.count()
response = c.post('/jobsub/new_design/java', {
u'wf-name': [u'name-1'],
u'wf-description': [u'description name-1'],
u'action-args': [u'x y z'],
u'action-main_class': [u'MyClass'],
u'action-jar_path': [u'myfile.jar'],
u'action-java_opts': [u''],
u'action-archives': [u'[]'],
u'action-job_properties': [u'[]'],
u'action-files': [u'[]']})
assert_equal(design_count + 1, OozieDesign.objects.count())
job_id = OozieDesign.objects.get(name='name-1').id
response = c.post('/jobsub/new_design/mapreduce', {
u'wf-name': [u'name-2'],
u'wf-description': [u'description name-2'],
u'action-args': [u'x y z'],
u'action-jar_path': [u'myfile.jar'],
u'action-archives': [u'[]'],
u'action-job_properties': [u'[]'],
u'action-files': [u'[]']})
# Follow it
edit_url = '/jobsub/edit_design/%d' % job_id
response = c.get(edit_url)
assert_true('x y z' in response.content, response.content)
# Make an edit
response = c.post(edit_url, {
u'wf-name': [u'name-1'],
u'wf-description': [u'description name-1'],
u'action-args': [u'a b c'],
u'action-main_class': [u'MyClass'],
u'action-jar_path': [u'myfile.jar'],
u'action-java_opts': [u''],
u'action-archives': [u'[]'],
u'action-job_properties': [u'[]'],
u'action-files': [u'[]']})
assert_true('a b c' in c.get(edit_url).content)
# Try to post
response = c.post('/jobsub/new_design/java',
dict(name='test2', jarfile='myfile.jar', arguments='x y z', submit='Save'))
assert_false('This field is required' in response)
# Now check list
response = c.get('/jobsub/')
for design in OozieDesign.objects.all():
assert_true(design.name in response.content, response.content)
# With some filters
response = c.get('/jobsub/', dict(name='name-1'))
assert_true('name-1' in response.content, response.content)
assert_false('name-2' in response.content, response.content)
response = c.get('/jobsub/', dict(owner='doesnotexist'))
assert_false('doesnotexist' in response.content)
response = c.get('/jobsub/', dict(owner='test', name='name-1'))
assert_true('name-1' in response.content, response.content)
assert_false('name-2' in response.content, response.content)
response = c.get('/jobsub/', dict(name="name"))
assert_true('name-1' in response.content, response.content)
assert_true('name-2' in response.content, response.content)
assert_false('doesnotexist' in response.content, response.content)
# Combined filters
response = c.get('/jobsub/', dict(owner="test", name="name-2"))
assert_false('name-1' in response.content, response.content)
assert_true('name-2' in response.content, response.content)
assert_false('doesnotexist' in response.content, response.content)
# Try delete
job_id = OozieDesign.objects.get(name='name-1').id
response = c.post('/jobsub/delete_design/%d' % job_id)
assert_raises(OozieDesign.DoesNotExist, OozieDesign.objects.get, id=job_id)
# Let's make sure we can't delete other people's designs.
c.logout()
c = make_logged_in_client('test2', is_superuser=False)
grant_access('test2', 'test-grp', 'jobsub')
not_mine = OozieDesign.objects.get(name='name-2')
response = c.post('/jobsub/delete_design/%d' % not_mine.id)
assert_true('Permission denied.' in response.content, response.content)
class TestJobsubWithHadoop(OozieServerProvider):
def setUp(self):
OozieServerProvider.setup_class()
self.cluster.fs.do_as_user('test', self.cluster.fs.create_home_dir, '/user/jobsub_test')
self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, '/user/jobsub_test', 0777, True)
self.client = make_logged_in_client(username='jobsub_test')
# Ensure access to MR folder
self.cluster.fs.do_as_superuser(self.cluster.fs.chmod, '/tmp', 0777, recursive=True)
def test_jobsub_setup(self):
# User 'test' triggers the setup of the examples.
# 'hue' home will be deleted, the examples installed in the new one
# and 'test' will try to access them.
self.cluster.fs.setuser('jobsub_test')
username = 'hue'
home_dir = '/user/%s/' % username
finish = conf.REMOTE_DATA_DIR.set_for_testing('%s/jobsub' % home_dir)
try:
data_dir = conf.REMOTE_DATA_DIR.get()
if not jobsub_setup.Command().has_been_setup():
self.cluster.fs.setuser(self.cluster.fs.superuser)
if self.cluster.fs.exists(home_dir):
self.cluster.fs.rmtree(home_dir)
jobsub_setup.Command().handle()
self.cluster.fs.setuser('jobsub_test')
stats = self.cluster.fs.stats(home_dir)
assert_equal(stats['user'], username)
assert_equal(oct(stats['mode']), '040755') #04 because is a dir
stats = self.cluster.fs.stats(data_dir)
assert_equal(stats['user'], username)
assert_equal(oct(stats['mode']), '041777')
# Only examples should have been created by 'hue'
stats = self.cluster.fs.listdir_stats(data_dir)
sample_stats = filter(lambda stat: stat.user == username, stats)
assert_equal(len(sample_stats), 2)
finally:
finish()
def test_jobsub_setup_and_run_samples(self):
"""
Merely exercises jobsub_setup, and then runs the sleep example.
"""
if not jobsub_setup.Command().has_been_setup():
jobsub_setup.Command().handle()
self.cluster.fs.setuser('jobsub_test')
assert_equal(3, OozieDesign.objects.filter(owner__username='sample').count())
assert_equal(2, OozieMapreduceAction.objects.filter(ooziedesign__owner__username='sample').count())
assert_equal(1, OozieStreamingAction.objects.filter(ooziedesign__owner__username='sample').count())
# Make sure sample user got created.
assert_equal(1, User.objects.filter(username='sample').count())
# Clone design
assert_equal(0, OozieDesign.objects.filter(owner__username='jobsub_test').count())
jobid = OozieDesign.objects.get(name='sleep_job', owner__username='sample').id
self.client.post('/jobsub/clone_design/%d' % jobid)
assert_equal(1, OozieDesign.objects.filter(owner__username='jobsub_test').count())
jobid = OozieDesign.objects.get(owner__username='jobsub_test').id
# And now submit and run the sleep sample
response = self.client.post('/jobsub/submit_design/%d' % jobid, {
'num_reduces': 1,
'num_maps': 1,
'map_sleep_time': 1,
'reduce_sleep_time': 1}, follow=True)
assert_true('PREP' in response.content or 'OK' in response.content, response.content)
assert_true(str(jobid) in response.content)
oozie_job_id = response.context['jobid']
job = OozieServerProvider.wait_until_completion(oozie_job_id, timeout=120, step=1)
logs = OozieServerProvider.oozie.get_job_log(oozie_job_id)
assert_equal('SUCCEEDED', job.status, logs)
# Grep
n = OozieDesign.objects.filter(owner__username='jobsub_test').count()
jobid = OozieDesign.objects.get(name='grep_example').id
self.client.post('/jobsub/clone_design/%d' % jobid)
assert_equal(n + 1, OozieDesign.objects.filter(owner__username='jobsub_test').count())
jobid = OozieDesign.objects.get(owner__username='jobsub_test', name__contains='sleep_job').id
# And now submit and run the sleep sample
response = self.client.post('/jobsub/submit_design/%d' % jobid, {
'num_reduces': 1,
'num_maps': 1,
'map_sleep_time': 1,
'reduce_sleep_time': 1}, follow=True)
assert_true('PREP' in response.content or 'DONE' in response.content, response.content)
assert_true(str(jobid) in response.content)
oozie_job_id = response.context['jobid']
job = OozieServerProvider.wait_until_completion(oozie_job_id, timeout=60, step=1)
logs = OozieServerProvider.oozie.get_job_log(oozie_job_id)
assert_equal('SUCCEEDED', job.status, logs)
|
{
"content_hash": "aafc7d0e7b72eb02667b04d4b436cc52",
"timestamp": "",
"source": "github",
"line_count": 275,
"max_line_length": 103,
"avg_line_length": 36.75272727272727,
"alnum_prop": 0.6712179677451271,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "f2fdcbc405e0b52eb1deddfe8089adf9e3ab6012",
"size": "10899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/jobsub/src/jobsub/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
}
|
from aiohttp.web import Response
def get_all_todos(request):
return Response(text='get_all_todos')
def create_todos(request):
return Response(text='create_todos')
def update_todos(request):
return Response(text='update_todos')
def remove_todos(request):
return Response(text='remove_todos')
def get_todo(request):
id = request.match_info['id']
return Response(text='get_todo {}'.format(id))
def update_todo(request):
id = request.match_info['id']
return Response(text='update_todo {}'.format(id))
def remove_todo(request):
id = request.match_info['id']
return Response(text='remove_todo {}'.format(id))
|
{
"content_hash": "b7149ca438c43446390877f005ee2316",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 53,
"avg_line_length": 20.53125,
"alnum_prop": 0.684931506849315,
"repo_name": "alexeyraspopov/aiohttp-mongodb-example",
"id": "bb4e388569a71f1342c7fc07646396dcc6a0e3ba",
"size": "657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Todos/handlers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "132"
},
{
"name": "Python",
"bytes": "2014"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Content.source_url'
db.add_column('canvas_content', 'source_url', self.gf('django.db.models.fields.CharField')(default='', max_length=4000, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Content.source_url'
db.delete_column('canvas_content', 'source_url')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'replied_comment': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['canvas.Comment']", 'null': 'True', 'blank': 'True'}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'alpha': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'animated': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'remix_of': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'remixes'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'score': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'source_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '4000', 'blank': 'True'}),
'stamps_used': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'used_as_stamp'", 'blank': 'True', 'to': "orm['canvas.Content']"}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'url_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.ContentUrlMapping']", 'null': 'True', 'blank': 'True'}),
'visibility': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'canvas.contentsticker': {
'Meta': {'object_name': 'ContentSticker'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'canvas.contenturlmapping': {
'Meta': {'object_name': 'ContentUrlMapping'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'canvas.facebookuser': {
'Meta': {'object_name': 'FacebookUser'},
'email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'fb_uid': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'canvas.invitecode': {
'Meta': {'object_name': 'InviteCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitee': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'invited_from'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"}),
'inviter': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'sent_invites'", 'null': 'True', 'blank': 'True', 'to': "orm['auth.User']"})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.userinfo': {
'Meta': {'object_name': 'UserInfo'},
'free_invites': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_activity_check': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'post_anonymously': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'power_level': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['canvas']
|
{
"content_hash": "3a301628289252d7f4c0debb8261d726",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 192,
"avg_line_length": 74.8,
"alnum_prop": 0.547614150555327,
"repo_name": "drawquest/drawquest-web",
"id": "099af6d67202427729a3cf636a2464185abdb8f9",
"size": "9742",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "website/canvas/migrations/0058_add_content_source_url.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "634659"
},
{
"name": "CoffeeScript",
"bytes": "8968"
},
{
"name": "HTML",
"bytes": "898627"
},
{
"name": "JavaScript",
"bytes": "1507053"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "7220727"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "3700"
}
],
"symlink_target": ""
}
|
from typing import Any, Callable, Dict, List, Optional, Text
from zerver.models import (
get_stream_recipient,
get_stream,
MutedTopic,
UserProfile
)
from sqlalchemy.sql import (
and_,
column,
func,
not_,
or_,
Selectable
)
def get_topic_mutes(user_profile: UserProfile) -> List[List[Text]]:
rows = MutedTopic.objects.filter(
user_profile=user_profile,
).values(
'stream__name',
'topic_name'
)
return [
[row['stream__name'], row['topic_name']]
for row in rows
]
def set_topic_mutes(user_profile: UserProfile, muted_topics: List[List[Text]]) -> None:
'''
This is only used in tests.
'''
MutedTopic.objects.filter(
user_profile=user_profile,
).delete()
for stream_name, topic_name in muted_topics:
stream = get_stream(stream_name, user_profile.realm)
recipient = get_stream_recipient(stream.id)
add_topic_mute(
user_profile=user_profile,
stream_id=stream.id,
recipient_id=recipient.id,
topic_name=topic_name,
)
def add_topic_mute(user_profile: UserProfile, stream_id: int, recipient_id: int, topic_name: str) -> None:
MutedTopic.objects.create(
user_profile=user_profile,
stream_id=stream_id,
recipient_id=recipient_id,
topic_name=topic_name,
)
def remove_topic_mute(user_profile: UserProfile, stream_id: int, topic_name: str) -> None:
row = MutedTopic.objects.get(
user_profile=user_profile,
stream_id=stream_id,
topic_name__iexact=topic_name
)
row.delete()
def topic_is_muted(user_profile: UserProfile, stream_id: int, topic_name: Text) -> bool:
is_muted = MutedTopic.objects.filter(
user_profile=user_profile,
stream_id=stream_id,
topic_name__iexact=topic_name,
).exists()
return is_muted
def exclude_topic_mutes(conditions: List[Selectable],
user_profile: UserProfile,
stream_id: Optional[int]) -> List[Selectable]:
query = MutedTopic.objects.filter(
user_profile=user_profile,
)
if stream_id is not None:
# If we are narrowed to a stream, we can optimize the query
# by not considering topic mutes outside the stream.
query = query.filter(stream_id=stream_id)
query = query.values(
'recipient_id',
'topic_name'
)
rows = list(query)
if not rows:
return conditions
def mute_cond(row: Dict[str, Any]) -> Selectable:
recipient_id = row['recipient_id']
topic_name = row['topic_name']
stream_cond = column("recipient_id") == recipient_id
topic_cond = func.upper(column("subject")) == func.upper(topic_name)
return and_(stream_cond, topic_cond)
condition = not_(or_(*list(map(mute_cond, rows))))
return conditions + [condition]
def build_topic_mute_checker(user_profile: UserProfile) -> Callable[[int, Text], bool]:
rows = MutedTopic.objects.filter(
user_profile=user_profile,
).values(
'recipient_id',
'topic_name'
)
rows = list(rows)
tups = set()
for row in rows:
recipient_id = row['recipient_id']
topic_name = row['topic_name']
tups.add((recipient_id, topic_name.lower()))
def is_muted(recipient_id: int, topic: Text) -> bool:
return (recipient_id, topic.lower()) in tups
return is_muted
|
{
"content_hash": "88206c17053c1eb3adbd0cb20980fa32",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 106,
"avg_line_length": 28.241935483870968,
"alnum_prop": 0.6096516276413478,
"repo_name": "mahim97/zulip",
"id": "c8878c5d212a61829c023d5ab4e5fe83898f0fa5",
"size": "3502",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "zerver/lib/topic_mutes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "299188"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "GCC Machine Description",
"bytes": "142"
},
{
"name": "Groovy",
"bytes": "5509"
},
{
"name": "HTML",
"bytes": "542463"
},
{
"name": "JavaScript",
"bytes": "1605569"
},
{
"name": "Nginx",
"bytes": "1280"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "401825"
},
{
"name": "Puppet",
"bytes": "86990"
},
{
"name": "Python",
"bytes": "3510480"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "37821"
}
],
"symlink_target": ""
}
|
import re
import string
import unicodedata
from unstdlib.six import text_type, PY3, string_types, binary_type, u
from unstdlib.six.moves import xrange
if PY3:
text_type_magicmethod = "__str__"
else:
text_type_magicmethod = "__unicode__"
from .random_ import random
__all__ = [
'random_string',
'number_to_string', 'string_to_number', 'number_to_bytes', 'bytes_to_number',
'dollars_to_cents',
'to_str', 'to_unicode', 'to_int', 'to_float',
'format_int',
'slugify',
]
class r(object):
"""
A normalized repr for bytes/unicode between Python2 and Python3.
"""
def __init__(self, val):
self.val = val
def __repr__(self):
if PY3:
if isinstance(self.val, text_type):
return 'u' + repr(self.val)
else:
if isinstance(self.val, str):
return 'b' + repr(self.val)
return repr(self.val)
_Default = object()
def random_string(length=6, alphabet=string.ascii_letters+string.digits):
"""
Return a random string of given length and alphabet.
Default alphabet is url-friendly (base62).
"""
return ''.join([random.choice(alphabet) for i in xrange(length)])
def number_to_string(n, alphabet):
"""
Given an non-negative integer ``n``, convert it to a string composed of
the given ``alphabet`` mapping, where the position of each element in
``alphabet`` is its radix value.
Examples::
>>> number_to_string(12345678, '01')
'101111000110000101001110'
>>> number_to_string(12345678, 'ab')
'babbbbaaabbaaaababaabbba'
>>> number_to_string(12345678, string.ascii_letters + string.digits)
'ZXP0'
>>> number_to_string(12345, ['zero ', 'one ', 'two ', 'three ', 'four ', 'five ', 'six ', 'seven ', 'eight ', 'nine '])
'one two three four five '
"""
result = ''
base = len(alphabet)
current = int(n)
if current < 0:
raise ValueError("invalid n (must be non-negative): %s", n)
while current:
result = alphabet[current % base] + result
current = current // base
return result
def string_to_number(s, alphabet):
"""
Given a string ``s``, convert it to an integer composed of the given
``alphabet`` mapping, where the position of each element in ``alphabet`` is
its radix value.
Examples::
>>> string_to_number('101111000110000101001110', '01')
12345678
>>> string_to_number('babbbbaaabbaaaababaabbba', 'ab')
12345678
>>> string_to_number('ZXP0', string.ascii_letters + string.digits)
12345678
"""
base = len(alphabet)
inverse_alphabet = dict(zip(alphabet, xrange(0, base)))
n = 0
exp = 0
for i in reversed(s):
n += inverse_alphabet[i] * (base ** exp)
exp += 1
return n
def bytes_to_number(b, endian='big'):
"""
Convert a string to an integer.
:param b:
String or bytearray to convert.
:param endian:
Byte order to convert into ('big' or 'little' endian-ness, default
'big')
Assumes bytes are 8 bits.
This is a special-case version of string_to_number with a full base-256
ASCII alphabet. It is the reverse of ``number_to_bytes(n)``.
Examples::
>>> bytes_to_number(b'*')
42
>>> bytes_to_number(b'\\xff')
255
>>> bytes_to_number(b'\\x01\\x00')
256
>>> bytes_to_number(b'\\x00\\x01', endian='little')
256
"""
if endian == 'big':
b = reversed(b)
n = 0
for i, ch in enumerate(bytearray(b)):
n ^= ch << i * 8
return n
def number_to_bytes(n, endian='big'):
"""
Convert an integer to a corresponding string of bytes..
:param n:
Integer to convert.
:param endian:
Byte order to convert into ('big' or 'little' endian-ness, default
'big')
Assumes bytes are 8 bits.
This is a special-case version of number_to_string with a full base-256
ASCII alphabet. It is the reverse of ``bytes_to_number(b)``.
Examples::
>>> r(number_to_bytes(42))
b'*'
>>> r(number_to_bytes(255))
b'\\xff'
>>> r(number_to_bytes(256))
b'\\x01\\x00'
>>> r(number_to_bytes(256, endian='little'))
b'\\x00\\x01'
"""
res = []
while n:
n, ch = divmod(n, 256)
if PY3:
res.append(ch)
else:
res.append(chr(ch))
if endian == 'big':
res.reverse()
if PY3:
return bytes(res)
else:
return ''.join(res)
def to_str(obj, encoding='utf-8', **encode_args):
r"""
Returns a ``str`` of ``obj``, encoding using ``encoding`` if necessary. For
example::
>>> some_str = b"\xff"
>>> some_unicode = u"\u1234"
>>> some_exception = Exception(u'Error: ' + some_unicode)
>>> r(to_str(some_str))
b'\xff'
>>> r(to_str(some_unicode))
b'\xe1\x88\xb4'
>>> r(to_str(some_exception))
b'Error: \xe1\x88\xb4'
>>> r(to_str([42]))
b'[42]'
See source code for detailed semantics.
"""
# Note: On py3, ``b'x'.__str__()`` returns ``"b'x'"``, so we need to do the
# explicit check first.
if isinstance(obj, binary_type):
return obj
# We coerce to unicode if '__unicode__' is available because there is no
# way to specify encoding when calling ``str(obj)``, so, eg,
# ``str(Exception(u'\u1234'))`` will explode.
if isinstance(obj, text_type) or hasattr(obj, text_type_magicmethod):
# Note: unicode(u'foo') is O(1) (by experimentation)
return text_type(obj).encode(encoding, **encode_args)
return binary_type(obj)
def to_unicode(obj, encoding='utf-8', fallback='latin1', **decode_args):
r"""
Returns a ``unicode`` of ``obj``, decoding using ``encoding`` if necessary.
If decoding fails, the ``fallback`` encoding (default ``latin1``) is used.
Examples::
>>> r(to_unicode(b'\xe1\x88\xb4'))
u'\u1234'
>>> r(to_unicode(b'\xff'))
u'\xff'
>>> r(to_unicode(u'\u1234'))
u'\u1234'
>>> r(to_unicode(Exception(u'\u1234')))
u'\u1234'
>>> r(to_unicode([42]))
u'[42]'
See source code for detailed semantics.
"""
# Note: on py3, the `bytes` type defines an unhelpful "__str__" function,
# so we need to do this check (see comments in ``to_str``).
if not isinstance(obj, binary_type):
if isinstance(obj, text_type) or hasattr(obj, text_type_magicmethod):
return text_type(obj)
obj_str = binary_type(obj)
else:
obj_str = obj
try:
return text_type(obj_str, encoding, **decode_args)
except UnicodeDecodeError:
return text_type(obj_str, fallback, **decode_args)
def to_int(s, default=0):
"""
Return input converted into an integer. If failed, then return ``default``.
Examples::
>>> to_int('1')
1
>>> to_int(1)
1
>>> to_int('')
0
>>> to_int(None)
0
>>> to_int(0, default='Empty')
0
>>> to_int(None, default='Empty')
'Empty'
"""
try:
return int(s)
except (TypeError, ValueError):
return default
_infs=set([float("inf"), float("-inf")])
def to_float(s, default=0.0, allow_nan=False):
"""
Return input converted into a float. If failed, then return ``default``.
Note that, by default, ``allow_nan=False``, so ``to_float`` will not return
``nan``, ``inf``, or ``-inf``.
Examples::
>>> to_float('1.5')
1.5
>>> to_float(1)
1.0
>>> to_float('')
0.0
>>> to_float('nan')
0.0
>>> to_float('inf')
0.0
>>> to_float('-inf', allow_nan=True)
-inf
>>> to_float(None)
0.0
>>> to_float(0, default='Empty')
0.0
>>> to_float(None, default='Empty')
'Empty'
"""
try:
f = float(s)
except (TypeError, ValueError):
return default
if not allow_nan:
if f != f or f in _infs:
return default
return f
def format_int(n, singular=_Default, plural=_Default):
"""
Return `singular.format(n)` if n is 1, or `plural.format(n)` otherwise. If
plural is not specified, then it is assumed to be same as singular but
suffixed with an 's'.
:param n:
Integer which determines pluralness.
:param singular:
String with a format() placeholder for n. (Default: `u"{:,}"`)
:param plural:
String with a format() placeholder for n. (Default: If singular is not
default, then it's `singular + u"s"`. Otherwise it's same as singular.)
Example: ::
>>> r(format_int(1000))
u'1,000'
>>> r(format_int(1, u"{} day"))
u'1 day'
>>> r(format_int(2, u"{} day"))
u'2 days'
>>> r(format_int(2, u"{} box", u"{} boxen"))
u'2 boxen'
>>> r(format_int(20000, u"{:,} box", u"{:,} boxen"))
u'20,000 boxen'
"""
n = int(n)
if singular in (None, _Default):
if plural is _Default:
plural = None
singular = u'{:,}'
elif plural is _Default:
plural = singular + u's'
if n == 1 or not plural:
return singular.format(n)
return plural.format(n)
RE_NUMBER = re.compile(r'[\d\.\-eE]+')
def dollars_to_cents(s, allow_negative=False):
"""
Given a string or integer representing dollars, return an integer of
equivalent cents, in an input-resilient way.
This works by stripping any non-numeric characters before attempting to
cast the value.
Examples::
>>> dollars_to_cents('$1')
100
>>> dollars_to_cents('1')
100
>>> dollars_to_cents(1)
100
>>> dollars_to_cents('1e2')
10000
>>> dollars_to_cents('-1$', allow_negative=True)
-100
>>> dollars_to_cents('1 dollar')
100
"""
# TODO: Implement cents_to_dollars
if not s:
return
if isinstance(s, string_types):
s = ''.join(RE_NUMBER.findall(s))
dollars = int(round(float(s) * 100))
if not allow_negative and dollars < 0:
raise ValueError('Negative values not permitted.')
return dollars
RE_SLUG = re.compile(r'\W+')
def slugify(s, delimiter='-'):
"""
Normalize `s` into ASCII and replace non-word characters with `delimiter`.
"""
s = unicodedata.normalize('NFKD', to_unicode(s)).encode('ascii', 'ignore').decode('ascii')
return RE_SLUG.sub(delimiter, s).strip(delimiter).lower()
if __name__ == "__main__":
import doctest
doctest.testmod(optionflags=doctest.ELLIPSIS)
|
{
"content_hash": "f62d672e8b5ff05ba0f346155b0acd20",
"timestamp": "",
"source": "github",
"line_count": 430,
"max_line_length": 127,
"avg_line_length": 25.3,
"alnum_prop": 0.5530839231547017,
"repo_name": "shazow/unstdlib.py",
"id": "9ca4604f8c822b2240f4bc5b4b749361fbca7b98",
"size": "10879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unstdlib/standard/string_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "68"
},
{
"name": "Python",
"bytes": "83599"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
import vet_cond
with open('README.rst') as fh:
long_description = fh.read()
setup(
name='VetCond',
version=vet_cond.__version__,
author='Matthew Einhorn',
author_email='moiein2000@gmail.com',
url='http://matham.github.io/vet_cond/',
license='MIT',
description='Vet school conditioning experiment.',
long_description=long_description,
classifiers=['License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering',
'Topic :: System :: Hardware',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Operating System :: Microsoft :: Windows',
'Intended Audience :: Developers'],
packages=find_packages(),
install_requires=['pymoa', 'pybarst', 'ffpyplayer', 'cplcom'],
package_data={'vet_cond': ['data/*', '*.kv']},
entry_points={'console_scripts': ['vet_cond=vet_cond.main:run_app']},
)
|
{
"content_hash": "3b121672a24f389968fd0d216a02ab8a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 73,
"avg_line_length": 40.48275862068966,
"alnum_prop": 0.5732538330494037,
"repo_name": "matham/vet_cond",
"id": "983d1bc5b8933c87317de7275c961472f7377703",
"size": "1174",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20461"
}
],
"symlink_target": ""
}
|
import bisect
import random
# Temperature phrases & upper bounds
temp_phrases = ["It is going to be " + s + "." for s in
["a cold day today", "a cool day today", "a warm day today", "a hot day today"]];
temp_upper_bounds = [0.25, 0.50, 0.75, 1.0];
# Wind phrases & upper bounds
wind_phrases = ["It is going to be " + s + "." for s in
["a very still day today", "a calm day today", "a windy day today", "a cyclonic day today"]]
wind_upper_bounds = [0.25, 0.50, 0.75, 1.0]
# Humidity phrases & upper bounds
humid_phrases = ["It is going to be " + s + "." for s in
["a very dry day today", "a dry day today", "a humid day today", "a very humid day today"]]
humid_upper_bounds = [0.25, 0.50, 0.75, 1.0]
if __name__ == "__main__":
op_file = "data/targets"
ip_file = "data/xs"
num_paragraphs = 1000
f = open(op_file + str(num_paragraphs) + ".txt", "w")
f2 = open(ip_file + str(num_paragraphs) + ".txt", "w")
for k in range(num_paragraphs):
# Generate three random numbers
rand_temp = random.uniform(0, 1)
rand_wind = random.uniform(0, 1)
rand_humid = random.uniform(0, 1)
# Obtain selector idxs
temp_idx = bisect.bisect(temp_upper_bounds, rand_temp)
wind_idx = bisect.bisect(wind_upper_bounds, rand_wind)
humid_idx = bisect.bisect(humid_upper_bounds, rand_humid)
# Obtain phrases
temp = temp_phrases[temp_idx]
wind = wind_phrases[wind_idx]
humid = humid_phrases[humid_idx]
# Print concatenated paragraph to STDOUT
paragraph = temp + " " + wind + " " + humid
f.write(paragraph + "\n")
f2.write("%0.4f %0.4f %0.4f\n" % (rand_temp, rand_wind, rand_humid))
f.close()
f2.close()
|
{
"content_hash": "20f9b8ce244f041f77d2eaec3c3fcd4f",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 108,
"avg_line_length": 37.208333333333336,
"alnum_prop": 0.5834266517357223,
"repo_name": "bitesandbytes/upgraded-system",
"id": "5611966dbf43b919b19df46e835796011a579fd5",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_gen/simple_3_gen.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "244373"
}
],
"symlink_target": ""
}
|
from indy import IndyError
from indy import crypto
import pytest
from indy.error import ErrorCode
@pytest.mark.asyncio
async def test_anon_decrypt_works(wallet_handle, identity_trustee1, message):
(_, verkey) = identity_trustee1
encrypted_msg = await crypto.anon_crypt(verkey, message)
parsed_message = await crypto.anon_decrypt(wallet_handle, verkey, encrypted_msg)
assert message == parsed_message
@pytest.mark.asyncio
async def test_anon_decrypt_works_for_invalid_anonymous_msg(wallet_handle, identity_trustee1):
(_, verkey) = identity_trustee1
msg = "unencrypted message"
with pytest.raises(IndyError) as e:
await crypto.anon_decrypt(wallet_handle, verkey, msg.encode('utf-8'))
assert ErrorCode.CommonInvalidStructure == e.value.error_code
@pytest.mark.asyncio
async def test_parse_msg_msg_works_for_unknown_recipient_vk(wallet_handle, verkey_my1, message):
encrypted_msg = await crypto.anon_crypt(verkey_my1, message)
with pytest.raises(IndyError) as e:
await crypto.anon_decrypt(wallet_handle, verkey_my1, encrypted_msg)
assert ErrorCode.WalletItemNotFound == e.value.error_code
@pytest.mark.asyncio
async def test_anon_decrypt_works_for_invalid_handle(wallet_handle, identity_trustee1, message):
(_, verkey) = identity_trustee1
encrypted_msg = await crypto.anon_crypt(verkey, message)
with pytest.raises(IndyError) as e:
invalid_wallet_handle = wallet_handle + 1
await crypto.anon_decrypt(invalid_wallet_handle, verkey, encrypted_msg)
assert ErrorCode.WalletInvalidHandle == e.value.error_code
|
{
"content_hash": "00b3094cb27f0e3f58b332c851caaca6",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 96,
"avg_line_length": 39.09756097560975,
"alnum_prop": 0.7461010605115409,
"repo_name": "srottem/indy-sdk",
"id": "a177a309c19359a8bda63d445b31a64b5de00aae",
"size": "1603",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "wrappers/python/tests/crypto/test_anon_decrypt.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "207870"
},
{
"name": "C#",
"bytes": "842011"
},
{
"name": "C++",
"bytes": "229233"
},
{
"name": "CSS",
"bytes": "137079"
},
{
"name": "Dockerfile",
"bytes": "23945"
},
{
"name": "Groovy",
"bytes": "102978"
},
{
"name": "HTML",
"bytes": "897750"
},
{
"name": "Java",
"bytes": "882162"
},
{
"name": "JavaScript",
"bytes": "185247"
},
{
"name": "Makefile",
"bytes": "328"
},
{
"name": "Objective-C",
"bytes": "584121"
},
{
"name": "Objective-C++",
"bytes": "706749"
},
{
"name": "Perl",
"bytes": "8271"
},
{
"name": "Python",
"bytes": "750776"
},
{
"name": "Ruby",
"bytes": "80525"
},
{
"name": "Rust",
"bytes": "5872898"
},
{
"name": "Shell",
"bytes": "251160"
},
{
"name": "Swift",
"bytes": "1114"
},
{
"name": "TypeScript",
"bytes": "197439"
}
],
"symlink_target": ""
}
|
from Tkinter import *
import random
#what i want to accomplish:
#create timer to regular pipes
#use timer for bird to move
#user input up arrow moves bird up a certain increment
#if bird overlaps part of pipe, bird dies--> loss
#if bird goes through, score goes up
def run():
global canvas
global data
data = {}
root = Tk()
root.title("Flappy Kesden")
#starting Flappy Kesden board
h = 500
w = 500
canvas = Canvas(root, width=w, height=h, bg="blue")
#setting blue background
canvas.pack()
data["canvas"] = canvas
data["height"] = h
data ["width"] = w
root.resizable(width=0, height=0)
root.bind("<Key>", keyPress)
init()
timerFired()
root.mainloop()
def draw_Pipe():
canvas.create_rectangle(pipe_top, fill = "red", tag = "pipe")
canvas.create_rectangle(pipe_bot, fill = "red", tag = "pipe")
if pipe_top[2] <=-5:
data["score"] +=1
make_Pipe()
def make_Pipe():
global pipe_top
global pipe_bot
global old_pipe_top
global old_pipe_bot
gap = random.randrange(150, data["height"]-150)
pipe_top = [data ["width"]-50, 0, data ["width"], gap]
pipe_bot = [data ["width"]-50, pipe_top[3] +135, data ["width"], data["height"]]
old_pipe_top = pipe_top[:]
old_pipe_bot = pipe_bot[:]
draw_Pipe()
def init():
global data
data["isGameOver"] = False
data ["score"] = 0
data["player_x"] = data["height"]/2
data["player_y"] = data["width"]/2
# data["kesden"] = PhotoImage(file = "kesden_bird.png")
def timerFired():
global data
canvas = data["canvas"]
if [data["isGameOver"] == False]:
redrawAll()
make_Pipe()
draw_Pipe()
move_Pipe()
check()
delay = 2000
canvas.after(delay, timerFired)
def move_Pipe():
canvas.move("pipe", -150, 0)
def draw_Kesden():
canvas.create_oval( data["player_x"]+ 10, data["player_y"] +10, data["player_x"]-10 , data["player_y"] -10, fill = "black", tag = "birdie")
def redrawAll():
if (data["isGameOver"] == True):
#adds game over and restart text
#r is in keypress
global pipe_bottom, pipe_pos
canvas.insert(canvas.create_text(data["width"] / 2, data["height"] / 2, 0, "Game Over! Press 'r' to start a new game")
else:
canvas.delete("score")
drawScore()
def keyPress(event):
if (event.keysym == "r"):
#if use hits r, game restarts
run()
redrawAll()
if (event.keysym == "Down"):
canvas.move("birdie",0,10)
data["player_y"]+=10
draw_Kesden()
if (event.keysym == "Up"):
canvas.move("birdie",0,-10)
data["player_y"]-=10
draw_Kesden()
def check():
if data["player_y"] +10 < pipe_top[3]:
data["isGameOver"] = True
else:data["score"] +=1
if data["player_y"]-10 > pipe_bot[1]:
data["isGameOver"] = True
else:data["score"] +=1
def drawScore():
#draws the current score
#called in redraw so it can update
canvas.create_text(data["width"]/2, 20, text="Score: " + str(data["score"]), fill="black", font="Purisa 22", tag = "score")
run()
|
{
"content_hash": "10216262f9765ab715c436eca733bcf5",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 144,
"avg_line_length": 27.273504273504273,
"alnum_prop": 0.5785020369790035,
"repo_name": "ananyacleetus/15-112",
"id": "66c574dc1a61db95d2b29af623da43665eb30fd6",
"size": "3220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27600"
}
],
"symlink_target": ""
}
|
import logging
import os
import sys
from rbtools.api.errors import APIError
from rbtools.clients import SCMClient, RepositoryInfo
from rbtools.utils.checks import check_gnu_diff, check_install
from rbtools.utils.filesystem import make_tempfile
from rbtools.utils.process import die, execute
# This specific import is necessary to handle the paths for
# cygwin enabled machines.
if (sys.platform.startswith('win')
or sys.platform.startswith('cygwin')):
import ntpath as cpath
else:
import posixpath as cpath
class ClearCaseClient(SCMClient):
"""
A wrapper around the clearcase tool that fetches repository
information and generates compatible diffs.
This client assumes that cygwin is installed on windows.
"""
viewtype = None
def __init__(self, **kwargs):
super(ClearCaseClient, self).__init__(**kwargs)
def get_repository_info(self):
"""Returns information on the Clear Case repository.
This will first check if the cleartool command is
installed and in the path, and post-review was run
from inside of the view.
"""
if not check_install('cleartool help'):
return None
viewname = execute(["cleartool", "pwv", "-short"]).strip()
if viewname.startswith('** NONE'):
return None
# Now that we know it's ClearCase, make sure we have GNU diff installed,
# and error out if we don't.
check_gnu_diff()
property_lines = execute(["cleartool", "lsview", "-full", "-properties",
"-cview"], split_lines=True)
for line in property_lines:
properties = line.split(' ')
if properties[0] == 'Properties:':
# Determine the view type and check if it's supported.
#
# Specifically check if webview was listed in properties
# because webview types also list the 'snapshot'
# entry in properties.
if 'webview' in properties:
die("Webviews are not supported. You can use post-review"
" only in dynamic or snapshot view.")
if 'dynamic' in properties:
self.viewtype = 'dynamic'
else:
self.viewtype = 'snapshot'
break
# Find current VOB's tag
vobstag = execute(["cleartool", "describe", "-short", "vob:."],
ignore_errors=True).strip()
if "Error: " in vobstag:
die("To generate diff run post-review inside vob.")
root_path = execute(["cleartool", "pwv", "-root"],
ignore_errors=True).strip()
if "Error: " in root_path:
die("To generate diff run post-review inside view.")
# From current working directory cut path to VOB.
# VOB's tag contain backslash character before VOB's name.
# I hope that first character of VOB's tag like '\new_proj'
# won't be treat as new line character but two separate:
# backslash and letter 'n'
cwd = os.getcwd()
base_path = cwd[:len(root_path) + len(vobstag)]
return ClearCaseRepositoryInfo(path=base_path,
base_path=base_path,
vobstag=vobstag,
supports_parent_diffs=False)
def check_options(self):
if ((self.options.revision_range or self.options.tracking)
and self.viewtype != "dynamic"):
die("To generate diff using parent branch or by passing revision "
"ranges, you must use a dynamic view.")
def _determine_version(self, version_path):
"""Determine numeric version of revision.
CHECKEDOUT is marked as infinity to be treated
always as highest possible version of file.
CHECKEDOUT, in ClearCase, is something like HEAD.
"""
branch, number = cpath.split(version_path)
if number == 'CHECKEDOUT':
return float('inf')
return int(number)
def _construct_extended_path(self, path, version):
"""Combine extended_path from path and version.
CHECKEDOUT must be removed becasue this one version
doesn't exists in MVFS (ClearCase dynamic view file
system). Only way to get content of checked out file
is to use filename only."""
if not version or version.endswith('CHECKEDOUT'):
return path
return "%s@@%s" % (path, version)
def _sanitize_branch_changeset(self, changeset):
"""Return changeset containing non-binary, branched file versions.
Changeset contain only first and last version of file made on branch.
"""
changelist = {}
for path, previous, current in changeset:
version_number = self._determine_version(current)
if path not in changelist:
changelist[path] = {
'highest': version_number,
'current': current,
'previous': previous
}
if version_number == 0:
# Previous version of 0 version on branch is base
changelist[path]['previous'] = previous
elif version_number > changelist[path]['highest']:
changelist[path]['highest'] = version_number
changelist[path]['current'] = current
# Convert to list
changeranges = []
for path, version in changelist.iteritems():
changeranges.append(
(self._construct_extended_path(path, version['previous']),
self._construct_extended_path(path, version['current']))
)
return changeranges
def _sanitize_checkedout_changeset(self, changeset):
"""Return changeset containing non-binary, checkdout file versions."""
changeranges = []
for path, previous, current in changeset:
changeranges.append(
(self._construct_extended_path(path, previous),
self._construct_extended_path(path, current))
)
return changeranges
def _directory_content(self, path):
"""Return directory content ready for saving to tempfile."""
return ''.join([
'%s\n' % s
for s in sorted(os.listdir(path))
])
def _construct_changeset(self, output):
return [
info.split('\t')
for info in output.strip().split('\n')
]
def get_checkedout_changeset(self):
"""Return information about the checked out changeset.
This function returns: kind of element, path to file,
previews and current file version.
"""
changeset = []
# We ignore return code 1 in order to
# omit files that Clear Case can't read.
output = execute([
"cleartool",
"lscheckout",
"-all",
"-cview",
"-me",
"-fmt",
r"%En\t%PVn\t%Vn\n"],
extra_ignore_errors=(1,),
with_errors=False)
if output:
changeset = self._construct_changeset(output)
return self._sanitize_checkedout_changeset(changeset)
def get_branch_changeset(self, branch):
"""Returns information about the versions changed on a branch.
This takes into account the changes on the branch owned by the
current user in all vobs of the current view.
"""
changeset = []
# We ignore return code 1 in order to
# omit files that Clear Case can't read.
if sys.platform.startswith('win'):
CLEARCASE_XPN = '%CLEARCASE_XPN%'
else:
CLEARCASE_XPN = '$CLEARCASE_XPN'
output = execute([
"cleartool",
"find",
"-all",
"-version",
"brtype(%s)" % branch,
"-exec",
'cleartool descr -fmt ' \
r'"%En\t%PVn\t%Vn\n" ' \
+ CLEARCASE_XPN],
extra_ignore_errors=(1,),
with_errors=False)
if output:
changeset = self._construct_changeset(output)
return self._sanitize_branch_changeset(changeset)
def diff(self, files):
"""Performs a diff of the specified file and its previous version."""
if self.options.tracking:
changeset = self.get_branch_changeset(self.options.tracking)
else:
changeset = self.get_checkedout_changeset()
return self.do_diff(changeset)
def diff_between_revisions(self, revision_range, args, repository_info):
"""Performs a diff between passed revisions or branch."""
# Convert revision range to list of:
# (previous version, current version) tuples
revision_range = revision_range.split(';')
changeset = zip(revision_range[0::2], revision_range[1::2])
return (self.do_diff(changeset)[0], None)
def diff_files(self, old_file, new_file):
"""Return unified diff for file.
Most effective and reliable way is use gnu diff.
"""
diff_cmd = ["diff", "-uN", old_file, new_file]
dl = execute(diff_cmd, extra_ignore_errors=(1, 2),
translate_newlines=False)
# If the input file has ^M characters at end of line, lets ignore them.
dl = dl.replace('\r\r\n', '\r\n')
dl = dl.splitlines(True)
# Special handling for the output of the diff tool on binary files:
# diff outputs "Files a and b differ"
# and the code below expects the output to start with
# "Binary files "
if (len(dl) == 1 and
dl[0].startswith('Files %s and %s differ' % (old_file, new_file))):
dl = ['Binary files %s and %s differ\n' % (old_file, new_file)]
# We need oids of files to translate them to paths on reviewboard
# repository.
old_oid = execute(["cleartool", "describe", "-fmt", "%On", old_file])
new_oid = execute(["cleartool", "describe", "-fmt", "%On", new_file])
if dl == [] or dl[0].startswith("Binary files "):
if dl == []:
dl = ["File %s in your changeset is unmodified\n" % new_file]
dl.insert(0, "==== %s %s ====\n" % (old_oid, new_oid))
dl.append('\n')
else:
dl.insert(2, "==== %s %s ====\n" % (old_oid, new_oid))
return dl
def diff_directories(self, old_dir, new_dir):
"""Return uniffied diff between two directories content.
Function save two version's content of directory to temp
files and treate them as casual diff between two files.
"""
old_content = self._directory_content(old_dir)
new_content = self._directory_content(new_dir)
old_tmp = make_tempfile(content=old_content)
new_tmp = make_tempfile(content=new_content)
diff_cmd = ["diff", "-uN", old_tmp, new_tmp]
dl = execute(diff_cmd,
extra_ignore_errors=(1, 2),
translate_newlines=False,
split_lines=True)
# Replacing temporary filenames to
# real directory names and add ids
if dl:
dl[0] = dl[0].replace(old_tmp, old_dir)
dl[1] = dl[1].replace(new_tmp, new_dir)
old_oid = execute(["cleartool", "describe", "-fmt", "%On",
old_dir])
new_oid = execute(["cleartool", "describe", "-fmt", "%On",
new_dir])
dl.insert(2, "==== %s %s ====\n" % (old_oid, new_oid))
return dl
def do_diff(self, changeset):
"""Generates a unified diff for all files in the changeset."""
diff = []
for old_file, new_file in changeset:
dl = []
if cpath.isdir(new_file):
dl = self.diff_directories(old_file, new_file)
elif cpath.exists(new_file):
dl = self.diff_files(old_file, new_file)
else:
logging.error("File %s does not exist or access is denied."
% new_file)
continue
if dl:
diff.append(''.join(dl))
return (''.join(diff), None)
class ClearCaseRepositoryInfo(RepositoryInfo):
"""
A representation of a ClearCase source code repository. This version knows
how to find a matching repository on the server even if the URLs differ.
"""
def __init__(self, path, base_path, vobstag, supports_parent_diffs=False):
RepositoryInfo.__init__(self, path, base_path,
supports_parent_diffs=supports_parent_diffs)
self.vobstag = vobstag
def find_server_repository_info(self, server):
"""
The point of this function is to find a repository on the server that
matches self, even if the paths aren't the same. (For example, if self
uses an 'http' path, but the server uses a 'file' path for the same
repository.) It does this by comparing VOB's name. If the
repositories use the same path, you'll get back self, otherwise you'll
get a different ClearCaseRepositoryInfo object (with a different path).
"""
# Find VOB's family uuid based on VOB's tag
uuid = self._get_vobs_uuid(self.vobstag)
logging.debug("Repository's %s uuid is %r" % (self.vobstag, uuid))
repositories = server.get_repositories()
for repository in repositories:
if repository['tool'] != 'ClearCase':
continue
info = self._get_repository_info(server, repository)
if not info or uuid != info['uuid']:
continue
logging.debug('Matching repository uuid:%s with path:%s' % (uuid,
info['repopath']))
return ClearCaseRepositoryInfo(info['repopath'],
info['repopath'], uuid)
# We didn't found uuid but if version is >= 1.5.3
# we can try to use VOB's name hoping it is better
# than current VOB's path.
if server.rb_version >= '1.5.3':
self.path = cpath.split(self.vobstag)[1]
# We didn't find a matching repository on the server.
# We'll just return self and hope for the best.
return self
def _get_vobs_uuid(self, vobstag):
"""Return family uuid of VOB."""
property_lines = execute(["cleartool", "lsvob", "-long", vobstag],
split_lines=True)
for line in property_lines:
if line.startswith('Vob family uuid:'):
return line.split(' ')[-1].rstrip()
def _get_repository_info(self, server, repository):
try:
return server.get_repository_info(repository['id'])
except APIError, e:
# If the server couldn't fetch the repository info, it will return
# code 210. Ignore those.
# Other more serious errors should still be raised, though.
if e.error_code == 210:
return None
raise e
|
{
"content_hash": "402d2080d14436d7c32ccdcd65736fef",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 80,
"avg_line_length": 36.60620525059666,
"alnum_prop": 0.5652627461207459,
"repo_name": "clach04/rbtools",
"id": "b71bfde5dbd2955c02529827981ab58ec9aef54e",
"size": "15338",
"binary": false,
"copies": "3",
"ref": "refs/heads/p2_0.4.1",
"path": "rbtools/clients/clearcase.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "267005"
}
],
"symlink_target": ""
}
|
import os.path
import re
class MockInputApi(object):
"""Mock class for the InputApi class.
This class can be used for unittests for presubmit by initializing the files
attribute as the list of changed files.
"""
def __init__(self):
self.change = MockChange([], [])
self.files = []
self.presubmit_local_path = os.path.dirname(__file__)
def AffectedSourceFiles(self, file_filter=None):
return self.AffectedFiles(file_filter=file_filter)
def AffectedFiles(self, file_filter=None, include_deletes=False):
# pylint: disable=unused-argument
return self.files
@classmethod
def FilterSourceFile(cls, affected_file, white_list=(), black_list=()):
# pylint: disable=unused-argument
return True
def PresubmitLocalPath(self):
return self.presubmit_local_path
def ReadFile(self, affected_file, mode='rU'):
filename = affected_file.AbsoluteLocalPath()
for f in self.files:
if f.LocalPath() == filename:
with open(filename, mode) as f:
return f.read()
# Otherwise, file is not in our mock API.
raise IOError, "No such file or directory: '%s'" % filename
class MockOutputApi(object):
"""Mock class for the OutputApi class.
An instance of this class can be passed to presubmit unittests for outputing
various types of results.
"""
class PresubmitResult(object):
def __init__(self, message, items=None, long_text=''):
self.message = message
self.items = items
self.long_text = long_text
def __repr__(self):
return self.message
class PresubmitError(PresubmitResult):
def __init__(self, message, items=None, long_text=''):
MockOutputApi.PresubmitResult.__init__(self, message, items, long_text)
self.type = 'error'
class MockChange(object):
"""Mock class for Change class.
This class can be used in presubmit unittests to mock the query of the
current change.
"""
def __init__(self, changed_files, bugs_from_description, tags=None):
self._changed_files = changed_files
self._bugs_from_description = bugs_from_description
self.tags = dict() if not tags else tags
def BugsFromDescription(self):
return self._bugs_from_description
def __getattr__(self, attr):
"""Return tags directly as attributes on the object."""
if not re.match(r"^[A-Z_]*$", attr):
raise AttributeError(self, attr)
return self.tags.get(attr)
class MockFile(object):
"""Mock class for the File class.
This class can be used to form the mock list of changed files in
MockInputApi for presubmit unittests.
"""
def __init__(self, local_path, new_contents=None, old_contents=None,
action='A'):
if new_contents is None:
new_contents = ["Data"]
self._local_path = local_path
self._new_contents = new_contents
self._changed_contents = [(i + 1, l) for i, l in enumerate(new_contents)]
self._action = action
self._old_contents = old_contents
def Action(self):
return self._action
def ChangedContents(self):
return self._changed_contents
def NewContents(self):
return self._new_contents
def LocalPath(self):
return self._local_path
def AbsoluteLocalPath(self):
return self._local_path
def OldContents(self):
return self._old_contents
|
{
"content_hash": "4e813a95c1eec6bdacc21622508039a6",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 78,
"avg_line_length": 27.647058823529413,
"alnum_prop": 0.6802431610942249,
"repo_name": "endlessm/chromium-browser",
"id": "510a553158b12ac2637b2bfde474aeaa8a50073b",
"size": "3786",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "third_party/webrtc/presubmit_test_mocks.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
class LightBreaker(Actor):
"""
React to changes in a sensor.
Output:
open : state true=closed, false=open
"""
@manage(include=[])
def init(self):
self.setup()
def setup(self):
self.sensor = calvinsys.open(self, "io.lightbreaker")
def will_migrate(self):
calvinsys.close(self.sensor)
self.sensor = None
def will_end(self):
if self.sensor:
calvinsys.close(self.sensor)
def did_migrate(self):
self.setup()
@stateguard(lambda self: calvinsys.can_read(self.sensor))
@condition([], ["open"])
def state_change(self):
value = calvinsys.read(self.sensor)
return (True if value else False,)
action_priority = (state_change, )
requires = ['io.lightbreaker']
test_calvinsys = {'io.lightbreaker': {'read': [True, False, True, False]}}
test_set = [
{
'outports': {'open': [True, False, True, False]}
}
]
|
{
"content_hash": "8f073463e7185a84a9bb328cebdb7ff3",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 78,
"avg_line_length": 24.136363636363637,
"alnum_prop": 0.5894538606403014,
"repo_name": "EricssonResearch/calvin-base",
"id": "472e6d17ecd875e287e06ffdc8b8b3f265be2383",
"size": "1667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "calvin/actorstore/systemactors/sensor/LightBreaker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "769"
},
{
"name": "Dockerfile",
"bytes": "612"
},
{
"name": "HTML",
"bytes": "24571"
},
{
"name": "JavaScript",
"bytes": "78325"
},
{
"name": "Makefile",
"bytes": "816"
},
{
"name": "Python",
"bytes": "3291484"
},
{
"name": "Shell",
"bytes": "37140"
}
],
"symlink_target": ""
}
|
from collections import defaultdict
import sys
cnts = defaultdict(lambda: 0)
for line in sys.stdin:
for word in line.strip().split():
cnts[word] += 1
for k, v in sorted(cnts.items(), key=lambda x: -x[1]):
print(f'{k}\t{v}')
|
{
"content_hash": "e11b7317ede26438f5d3059a9190b965",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 54,
"avg_line_length": 23.4,
"alnum_prop": 0.6623931623931624,
"repo_name": "neulab/compare-mt",
"id": "c8c2dc1af0e5fe06c3a2d2744919fad60d45bda6",
"size": "234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/count.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "191011"
},
{
"name": "Shell",
"bytes": "792"
}
],
"symlink_target": ""
}
|
"""
Contains GUI forms for the Voronoi volume filter.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import base
from ...filtering.filters import voronoiVolumeFilter
################################################################################
class VoronoiVolumeSettingsDialog(base.GenericSettingsDialog):
"""
Settings for Voronoi volume filter
"""
def __init__(self, mainWindow, title, parent=None):
super(VoronoiVolumeSettingsDialog, self).__init__(title, parent, "Voronoi volume")
self.setMinimumWidth(350)
self.addProvidedScalar("Voronoi volume")
# settings
self._settings = voronoiVolumeFilter.VoronoiVolumeFilterSettings()
# filtering options
self.addCheckBox("filteringEnabled", toolTip="Filter atoms by slip", label="<b>Enable filtering</b>", extraSlot=self.filteringToggled)
self.minVoroVolSpin = self.addDoubleSpinBox("minVoroVol", minVal=0, maxVal=9999, step=0.1, toolTip="Minimum visible Voronoi volume",
label="Minimum", settingEnabled="filteringEnabled")
self.maxVoroVolSpin = self.addDoubleSpinBox("maxVoroVol", minVal=0, maxVal=9999, step=0.1, toolTip="Maximum visible Voronoi volume",
label="Maximum", settingEnabled="filteringEnabled")
def filteringToggled(self, enabled):
"""Filtering toggled."""
self.minVoroVolSpin.setEnabled(enabled)
self.maxVoroVolSpin.setEnabled(enabled)
|
{
"content_hash": "2d1ff8cc8294ab0938d1e9ecd2055063",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 142,
"avg_line_length": 42.44736842105263,
"alnum_prop": 0.6212027278363298,
"repo_name": "chrisdjscott/Atoman",
"id": "8183eab735d35974a0a5bd65021be629c81d3632",
"size": "1614",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "atoman/gui/filterSettings/voronoiVolumeSettingsDialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "517802"
},
{
"name": "C++",
"bytes": "413449"
},
{
"name": "Makefile",
"bytes": "4095"
},
{
"name": "Perl",
"bytes": "8710"
},
{
"name": "Python",
"bytes": "1466114"
},
{
"name": "Roff",
"bytes": "12768"
},
{
"name": "Shell",
"bytes": "7607"
}
],
"symlink_target": ""
}
|
import argparse
import logging
import os
commandDesc = "Executes a provided command once on every file or " \
"subdirectory in a particular directory. Can be used, " \
"for example, for pushing files from one location to " \
"another via a cron job."
parser = argparse.ArgumentParser(description=commandDesc)
parser.add_argument('directory', help='The directory with source files', nargs='?')
parser.add_argument('--list-entries', help='List current known processed file entries', action='store_true')
parser.add_argument('--delete-entry', help='Delete a file entry by its index', metavar='index', type=int)
parser.add_argument('-c', '--command', help='Command to be run on each file. The string "{}" (without the quotes) will be replaced by the full path of the filename.', metavar='command', action='store')
parser.add_argument('-v', '--verbose', help='Enable verbose output', action='store_true')
parser.add_argument('--store-only', help='Mark all files as processed but don\'t exexecute a command', action='store_true')
parser.add_argument('--data-dir', help='Directory to store database and lock files. (Defaults to user home.)', default='~', metavar='directory')
args = parser.parse_args();
if not (args.directory or args.list_entries or args.delete_entry):
parser.error('Either a directory, --list-entries or --delete-entry required.')
if args.directory and not (args.command or args.store_only):
parser.error('A directory was specified but no command provided with -c or --command')
# configure runtime parameter based on arguments
command = args.command
listFiles = args.list_entries
deleteEntry = args.delete_entry
directory = args.directory
dataDir = os.path.expanduser(args.data_dir)
storeOnly = args.store_only
skipDotFiles = True
dbFile = os.path.abspath(dataDir + '/' + '.dirsend.db')
lockFile = os.path.abspath(dataDir + '/' + '.dirsend.lock')
# Confgure logging
logger = logging.getLogger()
_handler = logging.StreamHandler()
logger.addHandler(_handler)
if (args.verbose):
logger.setLevel(logging.DEBUG)
logger.debug('Debug output enabled')
|
{
"content_hash": "fba8f0e7731a1f998aeff3c97b9bbf9a",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 202,
"avg_line_length": 39.53703703703704,
"alnum_prop": 0.7217798594847775,
"repo_name": "cutchin/dirsend",
"id": "90f31c7fbd28f87e494e53695340f299ab489aaf",
"size": "2136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "args.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6808"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, absolute_import
from django.urls import reverse
from django.test import override_settings
from mock import patch
from ci.tests import DBTester, utils
from ci.github import api
@override_settings(INSTALLED_GITSERVERS=[utils.github_config()])
class Tests(DBTester.DBTester):
def setUp(self):
super(Tests, self).setUp()
self.create_default_recipes()
def test_start_session(self):
with self.settings(DEBUG=True):
response = self.client.get(reverse('ci:start_session', args=[1000]))
self.assertEqual(response.status_code, 404)
user = utils.get_test_user()
owner = utils.get_owner()
response = self.client.get(reverse('ci:start_session', args=[owner.pk]))
# owner doesn't have a token
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('ci:start_session', args=[user.pk]))
self.assertEqual(response.status_code, 302)
auth = user.auth()
self.assertIn(auth._user_key, self.client.session)
self.assertIn(auth._token_key, self.client.session)
with self.settings(DEBUG=False):
response = self.client.get(reverse('ci:start_session', args=[user.pk]))
self.assertEqual(response.status_code, 404)
def test_start_session_by_name(self):
with self.settings(DEBUG=True):
# invalid name
response = self.client.get(reverse('ci:start_session_by_name', args=['nobody']))
self.assertEqual(response.status_code, 404)
user = utils.get_test_user()
owner = utils.get_owner()
# owner doesn't have a token
response = self.client.get(reverse('ci:start_session_by_name', args=[owner.name]))
self.assertEqual(response.status_code, 404)
# valid, user has a token
response = self.client.get(reverse('ci:start_session_by_name', args=[user.name]))
self.assertEqual(response.status_code, 302)
auth = user.auth()
self.assertIn(auth._user_key, self.client.session)
self.assertIn(auth._token_key, self.client.session)
with self.settings(DEBUG=False):
response = self.client.get(reverse('ci:start_session_by_name', args=[user.name]))
self.assertEqual(response.status_code, 404)
@patch.object(api.GitHubAPI, 'is_collaborator')
def test_job_script(self, mock_collab):
# bad pk
mock_collab.return_value = False
response = self.client.get(reverse('ci:job_script', args=[1000]))
self.assertEqual(response.status_code, 404)
with utils.RecipeDir():
user = utils.get_test_user()
job = utils.create_job(user=user)
job.recipe.build_user = user
job.recipe.save()
utils.create_prestepsource(recipe=job.recipe)
utils.create_recipe_environment(recipe=job.recipe)
step = utils.create_step(recipe=job.recipe, filename='scripts/1.sh')
utils.create_step_environment(step=step)
url = reverse('ci:job_script', args=[job.pk])
response = self.client.get(url)
# owner doesn't have permission
self.assertEqual(response.status_code, 404)
mock_collab.return_value = True
utils.simulate_login(self.client.session, user)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertContains(response, job.recipe.name)
|
{
"content_hash": "8a7eb3c238f9eb361efd091ac9042f17",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 94,
"avg_line_length": 43.19047619047619,
"alnum_prop": 0.6221058434399118,
"repo_name": "idaholab/civet",
"id": "45f778d9ed51832e99233729d0b0a8b6f67d2d52",
"size": "4223",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ci/tests/test_DebugViews.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "790"
},
{
"name": "CSS",
"bytes": "45382"
},
{
"name": "HTML",
"bytes": "75321"
},
{
"name": "JavaScript",
"bytes": "6394"
},
{
"name": "Makefile",
"bytes": "1670"
},
{
"name": "Python",
"bytes": "1161254"
},
{
"name": "Shell",
"bytes": "8877"
}
],
"symlink_target": ""
}
|
import theano
from theano import tensor as T
from theano.tensor.nnet import conv2d
import numpy
rng = numpy.random.RandomState(23450)
# instantiate 4D tensor for input
input = T.tensor4(name='input')
nr_channels = 5
w_bound = numpy.sqrt(4 * 9 * 9)
def func(i, j, k):
if k == 0:
if (i == 4 or j==4):
return 0.
else:
return 1./w_bound
elif k == 1:
if (i == j):
return 0.
else:
return 1./w_bound
elif k == 2:
if (i == 8 - j):
return 0
else:
return 1. / w_bound
elif k == 3:
if (i == 2*j or j == i*2):
return 0
else:
return 1. / w_bound
# initialize shared variable for weights.
w_shp = (nr_channels, 4, 9, 9)
at = numpy.zeros(w_shp)
b =[]
for m in range(4):
a = numpy.frompyfunc(
lambda i, j: func(i, j, 3 -m), 2, 1
).outer(
numpy.arange(9),
numpy.arange(9),
).astype(numpy.float64) # a_ij = func(i, j)
b.append(a)
at[:,]=b
W = theano.shared( at, name ='W')
# initialize shared variable for bias (1D tensor) with random values
# IMPORTANT: biases are usually initialized to zero. However in this
# particular application, we simply apply the convolutional layer to
# an image without learning the parameters. We therefore initialize
# them to random values to "simulate" learning.
b_shp = (nr_channels,)
b = theano.shared(numpy.asarray(
rng.uniform(low=-.5, high=.5, size=b_shp),
dtype=input.dtype), name ='b')
# build symbolic expression that computes the convolution of input with filters in w
conv_out = conv2d(input, W)
# build symbolic expression to add bias and apply activation function, i.e. produce neural net layer output
# A few words on ``dimshuffle`` :
# ``dimshuffle`` is a powerful tool in reshaping a tensor;
# what it allows you to do is to shuffle dimension around
# but also to insert new ones along which the tensor will be
# broadcastable;
# dimshuffle('x', 2, 'x', 0, 1)
# This will work on 3d tensors with no broadcastable
# dimensions. The first dimension will be broadcastable,
# then we will have the third dimension of the input tensor as
# the second of the resulting tensor, etc. If the tensor has
# shape (20, 30, 40), the resulting tensor will have dimensions
# (1, 40, 1, 20, 30). (AxBxC tensor is mapped to 1xCx1xAxB tensor)
# More examples:
# dimshuffle('x') -> make a 0d (scalar) into a 1d vector
# dimshuffle(0, 1) -> identity
# dimshuffle(1, 0) -> inverts the first and second dimensions
# dimshuffle('x', 0) -> make a row out of a 1d vector (N to 1xN)
# dimshuffle(0, 'x') -> make a column out of a 1d vector (N to Nx1)
# dimshuffle(2, 0, 1) -> AxBxC to CxAxB
# dimshuffle(0, 'x', 1) -> AxB to Ax1xB
# dimshuffle(1, 'x', 0) -> AxB to Bx1xA
output = T.nnet.sigmoid(conv_out + b.dimshuffle('x', 0, 'x', 'x'))
# create theano function to compute filtered images
f = theano.function([input], output)
from PIL import Image
# open random image of dimensions 639x516
img = Image.open(open('../../data/pics/venus.png'))
# dimensions are (height, width, channel)
img = numpy.asarray(img, dtype='float64') / 256.
# put image in 4D tensor of shape (1, 3, height, width)
img_ = img.transpose(2, 0, 1).reshape(1, 4, img.shape[0], img.shape[1])
filtered_img = f(img_)
import pylab
# plot original image and first and second components of output
pylab.subplot((nr_channels + 1)/2, 2 , 1); pylab.axis('off'); pylab.imshow(img)
pylab.gray();
# recall that the convOp output (filtered image) is actually a "minibatch",
# of size 1 here, so we take index 0 in the first dimension:
for i in range(nr_channels):
pylab.subplot((nr_channels + 1)/2, 2 , i +2);
pylab.axis('off'); pylab.imshow(filtered_img[0, i, :, :])
pylab.show()
|
{
"content_hash": "d043c8d33ca4a25133db5c5f9ef58c09",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 107,
"avg_line_length": 31.414634146341463,
"alnum_prop": 0.6381987577639752,
"repo_name": "laputian/dml",
"id": "39ae502fbfd8ec0ab48ba101c99df2c85b639f5b",
"size": "3864",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mlp_test/conv2d_test/test_png_custom_filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "326596"
}
],
"symlink_target": ""
}
|
class ImageSizeError(Exception):
pass
class ThumbnailOptionError(Exception):
pass
class ThumbnailWorksError(Exception):
"""Internal thumbnail_works error.
Should be raised any time a method encounters an argument having a bad type
or bad value. Write as many such checks as necessary in order to catch any
changes in the underlying framework.
This is important, since this app is built on Django internal structures,
which might change without notice.
"""
pass
class NoAccessToImage(Exception):
pass
|
{
"content_hash": "bed6437a1c7e1a8742a4734d253133e3",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 79,
"avg_line_length": 26.761904761904763,
"alnum_prop": 0.7259786476868327,
"repo_name": "frol/django-thumbnail-works",
"id": "04a6acf886f53fa1fe4489a244832d69b766c549",
"size": "1520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/thumbnail_works/exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46402"
},
{
"name": "Shell",
"bytes": "4543"
}
],
"symlink_target": ""
}
|
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
import sys
from twitter.common.lang import Compatibility
from pants.base.config import Config
from pants.reporting.html_reporter import HtmlReporter
from pants.reporting.plaintext_reporter import PlainTextReporter
from pants.reporting.quiet_reporter import QuietReporter
from pants.reporting.report import Report, ReportingError
from pants.reporting.reporting_server import ReportingServerManager
from pants.util.dirutil import safe_mkdir, safe_rmtree
StringIO = Compatibility.StringIO
def initial_reporting(config, run_tracker):
"""Sets up the initial reporting configuration.
Will be changed after we parse cmd-line flags.
"""
reports_dir = os.path.join(config.get_option(Config.DEFAULT_PANTS_WORKDIR), 'reports')
link_to_latest = os.path.join(reports_dir, 'latest')
if os.path.lexists(link_to_latest):
os.unlink(link_to_latest)
run_id = run_tracker.run_info.get_info('id')
if run_id is None:
raise ReportingError('No run_id set')
run_dir = os.path.join(reports_dir, run_id)
safe_rmtree(run_dir)
html_dir = os.path.join(run_dir, 'html')
safe_mkdir(html_dir)
os.symlink(run_dir, link_to_latest)
report = Report()
# Capture initial console reporting into a buffer. We'll do something with it once
# we know what the cmd-line flag settings are.
outfile = StringIO()
capturing_reporter_settings = PlainTextReporter.Settings(outfile=outfile, log_level=Report.INFO,
color=False, indent=True, timing=False,
cache_stats=False)
capturing_reporter = PlainTextReporter(run_tracker, capturing_reporter_settings)
report.add_reporter('capturing', capturing_reporter)
# Set up HTML reporting. We always want that.
template_dir = config.get('reporting', 'reports_template_dir')
html_reporter_settings = HtmlReporter.Settings(log_level=Report.INFO,
html_dir=html_dir,
template_dir=template_dir)
html_reporter = HtmlReporter(run_tracker, html_reporter_settings)
report.add_reporter('html', html_reporter)
# Add some useful RunInfo.
run_tracker.run_info.add_info('default_report', html_reporter.report_path())
port = ReportingServerManager.get_current_server_port()
if port:
run_tracker.run_info.add_info('report_url', 'http://localhost:%d/run/%s' % (port, run_id))
return report
def update_reporting(options, is_quiet_task, run_tracker):
"""Updates reporting config once we've parsed cmd-line flags."""
# Get any output silently buffered in the old console reporter, and remove it.
old_outfile = run_tracker.report.remove_reporter('capturing').settings.outfile
old_outfile.flush()
buffered_output = old_outfile.getvalue()
old_outfile.close()
log_level = Report.log_level_from_string(options.level or 'info')
# Ideally, we'd use terminfo or somesuch to discover whether a
# terminal truly supports color, but most that don't set TERM=dumb.
color = (not options.no_colors) and (os.getenv('TERM') != 'dumb')
timing = options.time
cache_stats = options.time # TODO: Separate flag for this?
if options.quiet or is_quiet_task:
console_reporter = QuietReporter(run_tracker,
QuietReporter.Settings(log_level=log_level, color=color))
else:
# Set up the new console reporter.
settings = PlainTextReporter.Settings(log_level=log_level, outfile=sys.stdout, color=color,
indent=True, timing=timing, cache_stats=cache_stats)
console_reporter = PlainTextReporter(run_tracker, settings)
console_reporter.emit(buffered_output)
console_reporter.flush()
run_tracker.report.add_reporter('console', console_reporter)
if options.logdir:
# Also write plaintext logs to a file. This is completely separate from the html reports.
safe_mkdir(options.logdir)
run_id = run_tracker.run_info.get_info('id')
outfile = open(os.path.join(options.logdir, '%s.log' % run_id), 'w')
settings = PlainTextReporter.Settings(log_level=log_level, outfile=outfile, color=False,
indent=True, timing=True, cache_stats=True)
logfile_reporter = PlainTextReporter(run_tracker, settings)
logfile_reporter.emit(buffered_output)
logfile_reporter.flush()
run_tracker.report.add_reporter('logfile', logfile_reporter)
|
{
"content_hash": "d800e2b30241134617a5b5a8701e4701",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 98,
"avg_line_length": 43.698113207547166,
"alnum_prop": 0.6917098445595855,
"repo_name": "Ervii/garage-time",
"id": "0099ab5f5343ab525b27d257d9caa3ceca0da9cd",
"size": "4779",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "garage/src/python/pants/goal/initialize_reporting.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9347"
},
{
"name": "GAP",
"bytes": "4684"
},
{
"name": "HTML",
"bytes": "64603"
},
{
"name": "Java",
"bytes": "43275"
},
{
"name": "JavaScript",
"bytes": "9523"
},
{
"name": "Protocol Buffer",
"bytes": "4664"
},
{
"name": "Python",
"bytes": "2200035"
},
{
"name": "Scala",
"bytes": "6693"
},
{
"name": "Shell",
"bytes": "29352"
},
{
"name": "Thrift",
"bytes": "1946"
}
],
"symlink_target": ""
}
|
import os
import uuid
import time
import traceback
import xml.etree.ElementTree as ET
from PyQt4 import QtCore, QtGui
from XSTAF.core.logger import LOGGER
from ui.ui_testsuite_generator import Ui_TestSuiteDialog
import ui.resources_rc
class Tool(object):
_description = "testsuite generator"
main_window = None
@classmethod
def set_main_window(cls, main_window):
cls.main_window = main_window
@staticmethod
def icon():
tool_icon = QtGui.QIcon()
tool_icon.addPixmap(QtGui.QPixmap(":icons/icons/generator.png"))
return tool_icon
@classmethod
def launch(cls):
try:
LOGGER.info("Launch testsuite generator tool")
tool_dialog = TestsuiteGenerator(cls.main_window)
tool_dialog.exec_()
except:
LOGGER.error(traceback.format_exc())
@classmethod
def description(cls):
return cls._description
class TestsuiteGenerator(QtGui.QDialog, Ui_TestSuiteDialog):
def __init__(self, parent):
QtGui.QDialog.__init__(self)
self.setupUi(self)
self.pyanvilRadioButton.toggle()
self.connect(self.inputToolButton, QtCore.SIGNAL("clicked(bool)"), self.get_input_file)
self.connect(self.outputToolButton, QtCore.SIGNAL("clicked(bool)"), self.get_output_location)
def get_input_file(self):
input_file = QtGui.QFileDialog.getOpenFileName(self, "Input file")
self.inputLineEdit.setText(input_file)
def get_output_location(self):
output_path = QtGui.QFileDialog.getExistingDirectory(self, "Output location")
self.outputLineEdit.setText(output_path)
def accept(self):
input_file = str(self.inputLineEdit.text())
output_path = str(self.outputLineEdit.text())
if self.pyanvilRadioButton.isChecked():
parser = PyAnvilParser(input_file)
parser.generate(output_path)
else:
parser = CSVParser(input_file)
parser.generate(output_path)
QtGui.QDialog.accept(self)
#function to format XML
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class CSVParser(object):
'''
csv format requried:
"name,command,auto,timeout,description,data"
'''
def __init__(self, input_file):
self.input_file = input_file
def generate(self, output_path):
input_file_basename = os.path.basename(self.input_file).split(".")[0]
output_file = os.path.join(output_path, input_file_basename+"_generated.xml")
root_element = ET.Element("TestSuite")
testcases_element = ET.SubElement(root_element, "TestCases")
with open(self.input_file, 'r') as in_f:
for line in in_f:
values = line.split(",")
if len(values) < 6:
values += ("", )*(6-len(values))
elif len(values) > 6:
values = values[0:6]
name, command, auto, timeout, description, data = values
testcase_element = ET.SubElement(testcases_element, "TestCase")
id_element = ET.SubElement(testcase_element, "ID")
id_element.text = str(uuid.uuid1())
time.sleep(0.01)
name_element = ET.SubElement(testcase_element, "Name")
name_element.text = name
command_element = ET.SubElement(testcase_element, "Command")
command_element.text = command
auto_element = ET.SubElement(testcase_element, "Auto")
auto_element.text = auto
timeout_element = ET.SubElement(testcase_element, "Timeout")
timeout_element.text = timeout
description_element = ET.SubElement(testcase_element, "Description")
description_element.text = description
data_element = ET.SubElement(testcase_element, "Data")
data_element.text = data
runs_element = ET.SubElement(testcase_element, "Runs")
indent(root_element)
ET.ElementTree(root_element).write(output_file)
class PyAnvilParser(object):
def __init__(self, input_file):
self.input_file = input_file
def generate(self, output_path):
input_file_basename = os.path.basename(self.input_file).split(".")[0]
output_file = os.path.join(output_path, input_file_basename+"_generated.xml")
root_element = ET.Element("TestSuite")
testcases_element = ET.SubElement(root_element, "TestCases")
input_xml_tree = ET.parse(self.input_file)
input_root_element = input_xml_tree.getroot()
input_testcase_elements = input_root_element.findall("TestList/ToolCase")
for input_testcase_element in input_testcase_elements:
data = input_testcase_element.attrib["name"]
executable = input_testcase_element.find("Executable").text
parameters = input_testcase_element.find("Parameters").text
command = executable+" "+parameters
auto = "True"
description = ""
if not input_testcase_element.find("Timeout") is None:
timeout = input_testcase_element.find("Timeout").text
if not input_testcase_element.find("Description") is None:
name = input_testcase_element.find("Description").text
testcase_element = ET.SubElement(testcases_element, "TestCase")
id_element = ET.SubElement(testcase_element, "ID")
id_element.text = str(uuid.uuid1())
time.sleep(0.01)
name_element = ET.SubElement(testcase_element, "Name")
name_element.text = name
command_element = ET.SubElement(testcase_element, "Command")
command_element.text = command
auto_element = ET.SubElement(testcase_element, "Auto")
auto_element.text = auto
timeout_element = ET.SubElement(testcase_element, "Timeout")
timeout_element.text = timeout
description_element = ET.SubElement(testcase_element, "Description")
description_element.text = description
data_element = ET.SubElement(testcase_element, "Data")
data_element.text = data
runs_element = ET.SubElement(testcase_element, "Runs")
indent(root_element)
ET.ElementTree(root_element).write(output_file)
|
{
"content_hash": "4eae0863493077a2c824869559989322",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 101,
"avg_line_length": 40.38728323699422,
"alnum_prop": 0.5988263918706168,
"repo_name": "xcgspring/XSTAF",
"id": "65bba31190dbef455c19e06eaaaf09641727cd3c",
"size": "6988",
"binary": false,
"copies": "1",
"ref": "refs/heads/ver0.1",
"path": "XSTAF/tools/testsuite_generator/testsuite_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7227"
},
{
"name": "Python",
"bytes": "980326"
}
],
"symlink_target": ""
}
|
import logging
import requests
import tables
import factual.common.session
class Session(factual.common.session.BaseSession):
def read(self, table):
'''Build a Read request on the provided Table type using this Session. See the requests module for details.'''
return requests.Read(self, table)
def input(self, table):
'''Build an Input request on the provided Table type using this Session. See the requests module for details.'''
return requests.Input(self, table)
def rate(self, table):
'''Build a Rate request on the provided Table type using this Session. See the requests module for details.'''
return requests.Rate(self, table)
def duplicates(self, table):
'''Build a Duplicates request on the provided Table type using this Session. See the requests module for details.'''
return requests.Duplicates(self, table)
def schema(self, table):
'''Build a Schema request on the provided Table type using this Session. See the requests module for details.'''
return requests.Schema(self, table)
|
{
"content_hash": "71c8889302eab5b644f92957810701c0",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 118,
"avg_line_length": 44.43478260869565,
"alnum_prop": 0.7592954990215264,
"repo_name": "casebeer/factual",
"id": "ee0f899d8ab92f99cc306fc41e08fe4d36fdeff7",
"size": "1022",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "factual/v2/session.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "37755"
}
],
"symlink_target": ""
}
|
class Wiki(object):
def __init__(self, project, path):
pass
|
{
"content_hash": "c4d4802eae6a10e642a807d797f8fb17",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 38,
"avg_line_length": 18.25,
"alnum_prop": 0.5616438356164384,
"repo_name": "douban/code",
"id": "055c3b87d26269060ceb5d7656dedeac2a99fbc0",
"size": "99",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vilya/models/wiki.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7956218"
},
{
"name": "HTML",
"bytes": "548630"
},
{
"name": "JavaScript",
"bytes": "7771620"
},
{
"name": "Makefile",
"bytes": "568"
},
{
"name": "Mako",
"bytes": "11668"
},
{
"name": "Python",
"bytes": "1486693"
},
{
"name": "Shell",
"bytes": "61416"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: asa_config
version_added: "2.2"
author: "Peter Sprygada (@privateip), Patrick Ogenstad (@ogenstad)"
short_description: Manage configuration sections on Cisco ASA devices
description:
- Cisco ASA configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ASA configuration sections in
a deterministic way.
extends_documentation_fragment: asa
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section or hierarchy
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is mutually
exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct
default: line
choices: ['line', 'block']
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the
playbook root directory. If the directory does not exist, it is created.
type: bool
default: 'no'
config:
description:
- The C(config) argument allows the playbook designer to supply
the base configuration to be used to validate configuration
changes necessary. If this argument is provided, the module
will not download the running-config from the remote node.
defaults:
description:
- This argument specifies whether or not to collect all defaults
when getting the remote device running config. When enabled,
the module will get the current config by issuing the command
C(show running-config all).
type: bool
default: 'no'
passwords:
description:
- This argument specifies to include passwords in the config
when retrieving the running-config from the remote device. This
includes passwords related to VPN endpoints. This argument is
mutually exclusive with I(defaults).
type: bool
default: 'no'
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
type: bool
default: 'no'
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
---
vars:
cli:
host: "{{ inventory_hostname }}"
username: cisco
password: cisco
authorize: yes
auth_pass: cisco
---
- asa_config:
lines:
- network-object host 10.80.30.18
- network-object host 10.80.30.19
- network-object host 10.80.30.20
parents: ['object-group network OG-MONITORED-SERVERS']
provider: "{{ cli }}"
- asa_config:
host: "{{ inventory_hostname }}"
lines:
- message-length maximum client auto
- message-length maximum 512
match: line
parents: ['policy-map type inspect dns PM-DNS', 'parameters']
authorize: yes
auth_pass: cisco
username: admin
password: cisco
context: ansible
- asa_config:
lines:
- ikev1 pre-shared-key MyS3cretVPNK3y
parents: tunnel-group 1.1.1.1 ipsec-attributes
passwords: yes
provider: "{{ cli }}"
- name: attach ASA acl on interface vlan13/nameif cloud13
asa_config:
lines:
- access-group cloud-acl_access_in in interface cloud13
provider: "{{ cli }}"
- name: configure ASA (>=9.2) default BGP
asa_config:
lines:
- bgp log-neighbor-changes
- bgp bestpath compare-routerid
provider: "{{ cli }}"
parents:
- router bgp 65002
register: bgp
when: bgp_default_config is defined
- name: configure ASA (>=9.2) BGP neighbor in default/single context mode
asa_config:
lines:
- "bgp router-id {{ bgp_router_id }}"
- "neighbor {{ bgp_neighbor_ip }} remote-as {{ bgp_neighbor_as }}"
- "neighbor {{ bgp_neighbor_ip }} description {{ bgp_neighbor_name }}"
provider: "{{ cli }}"
parents:
- router bgp 65002
- address-family ipv4 unicast
register: bgp
when: bgp_neighbor_as is defined
- name: configure ASA interface with standby
asa_config:
lines:
- description my cloud interface
- nameif cloud13
- security-level 50
- ip address 192.168.13.1 255.255.255.0 standby 192.168.13.2
provider: "{{ cli }}"
parents: ["interface Vlan13"]
register: interface
- name: Show changes to interface from task above
debug:
var: interface
- name: configurable backup path
asa_config:
lines:
- access-group cloud-acl_access_in in interface cloud13
provider: "{{ cli }}"
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/asa_config.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.asa.asa import asa_argument_spec, check_args
from ansible.module_utils.network.asa.asa import get_config, load_config, run_commands
from ansible.module_utils.network.common.config import NetworkConfig, dumps
from ansible.module_utils._text import to_native
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
path = module.params['parents']
candidate = get_candidate(module)
if match != 'none':
contents = module.params['config']
if not contents:
contents = get_config(module)
config = NetworkConfig(indent=1, contents=contents)
configobjs = candidate.difference(config, path=path, match=match,
replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
load_config(module, commands)
result['changed'] = True
if module.params['save']:
if not module.check_mode:
run_commands(module, 'write mem')
result['changed'] = True
def main():
""" main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
backup_options=dict(type='dict', options=backup_spec),
config=dict(),
defaults=dict(type='bool', default=False),
passwords=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
save=dict(type='bool', default=False),
)
argument_spec.update(asa_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src'),
('defaults', 'passwords')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
result = {'changed': False}
check_args(module)
config = None
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
|
{
"content_hash": "e1e0a1573b79813ab80d50900daac4a7",
"timestamp": "",
"source": "github",
"line_count": 366,
"max_line_length": 98,
"avg_line_length": 34.833333333333336,
"alnum_prop": 0.650247078202212,
"repo_name": "thaim/ansible",
"id": "ad49118e8afbac7bae33cde966d407776a97a1bf",
"size": "12892",
"binary": false,
"copies": "23",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/network/asa/asa_config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from upload_image_box.example.forms import *
from django.conf import settings
from upload_image_box.settings import *
# Example view
def upload_example(request):
# if you want to upload inside a custom directory
# request.session['CUSTOM_CROPPED_IMG_DIRECTORY'] = CUSTOM_CROPPED_IMG_DIRECTORY
# if a GET (or any other method) we'll create a blank form
form_no_crop = uploadedImagesNoCropForm()
form_crop = uploadedImagesCropForm()
context = {
"post" : request.POST,
"form_no_crop": form_no_crop,
"form_crop": form_crop,
}
return render(request, 'upload_image_box/upload_example.html', context)
|
{
"content_hash": "c28f88575cf3d52b23f92ca16aeb1538",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 84,
"avg_line_length": 32.857142857142854,
"alnum_prop": 0.7028985507246377,
"repo_name": "entpy/beauty-and-pics",
"id": "eab6460aacabc6a22201f3167c8793074ece608d",
"size": "715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beauty_and_pics/upload_image_box/example/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "43305"
},
{
"name": "HTML",
"bytes": "318159"
},
{
"name": "JavaScript",
"bytes": "98290"
},
{
"name": "Python",
"bytes": "540131"
}
],
"symlink_target": ""
}
|
import sys, os
from opentreetesting import test_http_json_method, config
# peyotl setup
from peyotl.api.phylesystem_api import PhylesystemAPI
DOMAIN = config('host', 'apihost')
# get results from about method before
CONTROLLER = DOMAIN + '/v4/studies'
SUBMIT_URI = CONTROLLER + '/about'
r = test_http_json_method(SUBMIT_URI,
'GET',
expected_status=200,
return_bool_data=True)
assert r[0] is True
nstudies_start = r[1]['number_studies']
ntrees_start = r[1]['number_trees']
notus_start = r[1]['number_otus']
ncurators_start = r[1]['number_curators']
print "START: studies, tree, otus, curators: {s}, {t}, {o}, {c}".format(
s=nstudies_start,
t=ntrees_start,
o=notus_start,
c=ncurators_start
)
CONTROLLER = DOMAIN + '/v3'
SUBMIT_URI = CONTROLLER + '/add_update_studies'
phylesystem_api_wrapper = PhylesystemAPI(get_from='local')
phy = phylesystem_api_wrapper.phylesystem_obj
counter = 0
start = 0
limit = start+8000
for study_id, studyobj in phy.iter_study_objs():
if counter>start:
p = [study_id]
r = test_http_json_method(SUBMIT_URI,
'POST',
data=p,
expected_status=200,
return_bool_data=True)
assert r[0] is True
print "updated study",study_id
counter+=1
if counter>limit:
break
# get results from about method after
CONTROLLER = DOMAIN + '/v4/studies'
SUBMIT_URI = CONTROLLER + '/about'
r = test_http_json_method(SUBMIT_URI,
'GET',
expected_status=200,
return_bool_data=True)
assert r[0] is True
nstudies_end = r[1]['number_studies']
ntrees_end = r[1]['number_trees']
notus_end = r[1]['number_otus']
ncurators_end = r[1]['number_curators']
print "END: studies, tree, otus, curators: {s}, {t}, {o}, {c}".format(
s=nstudies_end,
t=ntrees_end,
o=notus_end,
c=ncurators_end
)
# if study existed, then would be equal, otherwise greater
assert nstudies_end >= nstudies_start
assert ntrees_end >= ntrees_start
assert notus_end >= notus_start
assert ncurators_end >= ncurators_start
|
{
"content_hash": "79b716aa7a6ba4753f198aeed064f3d0",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 72,
"avg_line_length": 30.931506849315067,
"alnum_prop": 0.6018600531443755,
"repo_name": "kcranston/ottreeindex",
"id": "24739924bef68d8ee3225b01c4c90333ac592fbb",
"size": "2315",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ws-tests/tests_that_modify_db/test_v3_studies_update2.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "2874"
},
{
"name": "Python",
"bytes": "31739"
}
],
"symlink_target": ""
}
|
"""
==================================
Shifting time-scale in evoked data
==================================
"""
# Author: Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.viz import tight_layout
from mne.datasets import sample
print(__doc__)
data_path = sample.data_path()
fname = data_path + '/MEG/sample/sample_audvis-ave.fif'
# Reading evoked data
condition = 'Left Auditory'
evoked = mne.read_evokeds(fname, condition=condition, baseline=(None, 0),
proj=True)
ch_names = evoked.info['ch_names']
picks = mne.pick_channels(ch_names=ch_names, include=["MEG 2332"])
# Create subplots
f, (ax1, ax2, ax3) = plt.subplots(3)
evoked.plot(exclude=[], picks=picks, axes=ax1,
titles=dict(grad='Before time shifting'), time_unit='s')
# Apply relative time-shift of 500 ms
evoked.shift_time(0.5, relative=True)
evoked.plot(exclude=[], picks=picks, axes=ax2,
titles=dict(grad='Relative shift: 500 ms'), time_unit='s')
# Apply absolute time-shift of 500 ms
evoked.shift_time(0.5, relative=False)
evoked.plot(exclude=[], picks=picks, axes=ax3,
titles=dict(grad='Absolute shift: 500 ms'), time_unit='s')
tight_layout()
|
{
"content_hash": "02d9ea0bc7c5e8469eaed4f3fca8eb80",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 73,
"avg_line_length": 26.48936170212766,
"alnum_prop": 0.642570281124498,
"repo_name": "adykstra/mne-python",
"id": "808d011249a5a97e7c20c7439dd6c053498e1cc9",
"size": "1245",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "examples/preprocessing/plot_shift_evoked.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "69806"
},
{
"name": "Makefile",
"bytes": "3928"
},
{
"name": "Python",
"bytes": "6001033"
},
{
"name": "Shell",
"bytes": "936"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class USqlTableColumn(Model):
"""A Data Lake Analytics catalog U-SQL table column item.
:param name: the name of the column in the table.
:type name: str
:param type: the object type of the specified column (such as
System.String).
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(self, name=None, type=None):
super(USqlTableColumn, self).__init__()
self.name = name
self.type = type
|
{
"content_hash": "ef977cd899ab87c2d8b0c69a3b11d76c",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 65,
"avg_line_length": 27,
"alnum_prop": 0.5875420875420876,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "07807ab364448a2024049143ee7ccb61d02940c1",
"size": "1068",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/catalog/models/usql_table_column.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
}
|
import tensorflow as tf
from ..utils.argtools import graph_memoized
"""
Utils about parsing dependencies in the graph.
"""
__all__ = [
'dependency_of_targets', 'dependency_of_fetches'
]
@graph_memoized
def dependency_of_targets(targets, op):
"""
Check that op is in the subgraph induced by the dependencies of targets.
The result is memoized.
This is useful if some SessionRunHooks should be run only together with certain ops.
Args:
targets: a tuple of ops or tensors. The targets to find dependencies of.
op (tf.Operation or tf.Tensor):
Returns:
bool: True if any one of `targets` depend on `op`.
"""
# TODO tensorarray? sparsetensor?
if isinstance(op, tf.Tensor):
op = op.op
assert isinstance(op, tf.Operation), op
try:
from tensorflow.contrib.graph_editor import get_backward_walk_ops # deprecated
except ImportError:
from tensorflow.python.ops.op_selector import get_backward_walk_ops
# alternative implementation can use graph_util.extract_sub_graph
dependent_ops = get_backward_walk_ops(targets, control_inputs=True)
return op in dependent_ops
def dependency_of_fetches(fetches, op):
"""
Check that op is in the subgraph induced by the dependencies of fetches.
fetches may have more general structure.
Args:
fetches: An argument to `sess.run`. Nested structure will affect performance.
op (tf.Operation or tf.Tensor):
Returns:
bool: True if any of `fetches` depend on `op`.
"""
try:
from tensorflow.python.client.session import _FetchHandler as FetchHandler
# use the graph of the op, so that this function can be called without being under a default graph
handler = FetchHandler(op.graph, fetches, {})
targets = tuple(handler.fetches() + handler.targets())
except ImportError:
if isinstance(fetches, list):
targets = tuple(fetches)
elif isinstance(fetches, dict):
raise ValueError("Don't know how to parse dictionary to fetch list! "
"This is a bug of tensorpack.")
else:
targets = (fetches, )
return dependency_of_targets(targets, op)
if __name__ == '__main__':
a = tf.random_normal(shape=[3, 3])
b = tf.random_normal(shape=[3, 3])
print(dependency_of_fetches(a, a))
print(dependency_of_fetches([a, b], a))
|
{
"content_hash": "f28dd1ab239ddd324671453b2eac2243",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 106,
"avg_line_length": 32.54666666666667,
"alnum_prop": 0.6566980745596067,
"repo_name": "ppwwyyxx/tensorpack",
"id": "b63b0249d977a0ee7a28d30fd2a2eae04870ada0",
"size": "2442",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorpack/tfutils/dependency.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "731254"
},
{
"name": "Shell",
"bytes": "1581"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.