prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
from htmldoc import *
from chunk import *
from module import *
from js import *
#? This global variable is used to specify where the javascript files are on the server.
JavascriptDir = "./"
#<_ view="internal">A Marker showing where the RPCs are installed</_>
rpcim = Marker("js")
rpcs=["""
<script language='JavaScript'>
var server = {};
// RPC calls are installed | here
""", rpcim,"// End RPC call installation.\n</script>\n"]
#? The json module is defined
jsonModule = Module("json",jsm,[("head",["<script language='JavaScript' src='%sjson2.js'>1;</script>\n<script language='JavaScript' src='%sjsonreq.js'>1;</script>\n" % (JavascriptDir,JavascriptDir | )] + rpcs) ])
class DefineRpc:
def __init__(self,rpcname):
self.name = rpcname
def call(self,*jsargs):
args = ",".join([str(x) for x in jsargs])
return "server.%s(%s)" % (self.name,args)
def gen(self,doc):
doc.AddModule(jsModule,LocationBoth)
doc.AddModule(jsonModule,LocationBoth)
doc.Insert("InstallFunction(server, '%s');\n" % self.name,rpcim)
def actionDynGet(element,uri):
eid = None
try: # If its a chunk then use the id.
eid = element.getId()
except: # Otherwise assume that the user is passing the id in
eid = str(element)
return "ReplaceChildrenWithUri('%s','%s');" % (eid,str(uri))
def actionDynGetScript(element,uri,js):
eid = None
try: # If its a chunk then use the id.
eid = element.getId()
except: # Otherwise assume that the user is passing the id in
eid = str(element)
return "ReplaceChildrenWithUri('%s','%s'); LoadScript('%s','%s');" % (eid,str(uri),eid + "script", js)
#<example>
def Test():
import gen
from attribute import *
cdlt = Chunk("Click for Dynamic load test")
replaceme = Chunk("this will be replaced")
action(cdlt,"onClick",actionDynGet(replaceme,"testjsondyn.html"))
rpc = DefineRpc("rpctest")
b1 = Chunk("RPC")
action(b1,"onClick",rpc.call("'arg1'",5))
d = [cdlt,replaceme,rpc,b1]
gen.WriteFile("testjson.html",d)
#</example>
|
"""Test template specific functionality.
Make sure tables expose their functionality to templates right. This
generally about testing "out"-functionality of the tables, whether
via templates or otherwise. Whether a test belongs here or, say, in
``test_basic``, is not always a clear-cut decision.
"""
from django.template import Template, Context, add_to_builtins
from django.http import HttpRequest
import django_tables as tables
def test_order_by():
class BookTable(tables.Table):
id = tables.Column()
name = tables.Column()
books = BookTable([
{'id': 1, 'name': 'Foo: Bar'},
])
# cast to a string we get a value ready to be passed to the querystring
books.order_by = ('name',)
assert str(books.order_by) == 'name'
books.order_by = ('name', '-id')
assert str(books.order_by) == 'name,-id'
def test_columns_and_rows():
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tables.TextColumn(sortable=False)
population = tables.NumberColumn(verbose_name="Population Size")
currency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'cc': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'cc': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'cc': '31'},
{'name': 'Austria', 'cc': 43, 'currency': 'Euro (€)', 'population': 8}])
assert len(list(countries.columns)) == 4
assert len(list(countries.rows)) == len(list(countries)) == 4
# column name override, hidden columns
assert [c.name for c in countries.columns] == ['name', 'capital', 'population', 'cc']
# verbose_name, and fallback to field name
assert [unicode(c) for c in countries.columns] == ['Name', 'Capital', 'Population Size', 'Phone Ext.']
# data yielded by each row matches the defined columns
for row in countries.rows:
assert len(list(row)) == len(list(countries.columns))
# we can access each column and row by name...
assert countries.columns['population'].column.verbose_name == "Population Size"
assert countries.columns['cc'].column.verbose_name == "Phone Ext."
# ...even invisible ones
assert countries.columns['tld'].column.verbose_name == "Domain"
# ...and even inaccessible ones (but accessible to the coder)
assert countries.columns['currency'].column == countries.base_columns['currency']
# this also works for rows
for row in countries:
row['tld'], row['cc'], row['population']
# certain data is available on columns
assert countries.columns['currency'].sortable == True
assert countries.columns['capital'].sortable == False
assert countries.columns['name'].visible == True
assert countries.columns['tld'].visible == False
def test_render():
"""For good measure, render some actual templates."""
class CountryTable(tables.Table):
name = tables.TextColumn()
capital = tab | les.TextColumn()
population = tables.NumberColumn(verbose_name="Population Size")
c | urrency = tables.NumberColumn(visible=False, inaccessible=True)
tld = tables.TextColumn(visible=False, verbose_name="Domain")
calling_code = tables.NumberColumn(name="cc", verbose_name="Phone Ext.")
countries = CountryTable(
[{'name': 'Germany', 'capital': 'Berlin', 'population': 83, 'currency': 'Euro (€)', 'tld': 'de', 'calling_code': 49},
{'name': 'France', 'population': 64, 'currency': 'Euro (€)', 'tld': 'fr', 'calling_code': 33},
{'name': 'Netherlands', 'capital': 'Amsterdam', 'calling_code': '31'},
{'name': 'Austria', 'calling_code': 43, 'currency': 'Euro (€)', 'population': 8}])
assert Template("{% for column in countries.columns %}{{ column }}/{{ column.name }} {% endfor %}").\
render(Context({'countries': countries})) == \
"Name/name Capital/capital Population Size/population Phone Ext./cc "
assert Template("{% for row in countries %}{% for value in row %}{{ value }} {% endfor %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany Berlin 83 49 France None 64 33 Netherlands Amsterdam None 31 Austria None 8 43 "
print Template("{% for row in countries %}{% if countries.columns.name.visible %}{{ row.name }} {% endif %}{% if countries.columns.tld.visible %}{{ row.tld }} {% endif %}{% endfor %}").\
render(Context({'countries': countries})) == \
"Germany France Netherlands Austria"
def test_templatetags():
add_to_builtins('django_tables.app.templatetags.tables')
# [bug] set url param tag handles an order_by tuple with multiple columns
class MyTable(tables.Table):
f1 = tables.Column()
f2 = tables.Column()
t = Template('{% set_url_param x=table.order_by %}')
table = MyTable([], order_by=('f1', 'f2'))
assert t.render({'request': HttpRequest(), 'table': table}) == '?x=f1%2Cf2'
|
format(len(sets)))
sys.stdout.write("\n")
sets = list(sets)
keys = list(range(len(sets)))
# Generate paddings for asym.
max_size = max(len(s) for s in sets)
paddings = dict()
if pad_for_asym:
padding_sizes = sorted(list(set([max_size-len(s) for s in sets])))
for num_perm in num_perms:
paddings[num_perm] = dict()
for i, padding_size in enumerate(padding_sizes):
if i == 0:
prev_size = 0
pad = MinHash(num_perm, hashfunc=_hash_32)
else:
prev_size = padding_sizes[i-1]
pad = paddings[num_perm][prev_size].copy()
for w in range(prev_size, padding_size):
pad.update(str(w)+"_tmZZRe8DE23s")
paddings[num_perm][padding_size] = pad
# Generate minhash
print("Creating MinHash...")
minhashes = dict()
for num_perm in num_perms:
print("Using num_parm = {}".format(num_perm))
ms = []
for s in sets:
m = MinHash(num_perm, hashfunc=_hash_32)
for word in s:
m.update(str(word))
if pad_for_asym:
# Add padding to the minhash
m.merge(paddings[num_perm][max_size-len(s)])
ms.append(m)
sys.stdout.write("\rMinhashed {} sets".format(len(ms)))
sys.stdout.write("\n")
minhashes[num_perm] = ms
return (minhashes, sets, keys)
def benchmark_lshensemble(threshold, num_perm, num_part, m, storage_config,
index_data, query_data):
print("Building LSH Ensemble index")
(minhashes, indexed_sets, keys) = index_data
lsh = MinHashLSHEnsemble(threshold=threshold, num_perm=num_perm,
num_part=num_part, m=m, storage_config=storage_config)
lsh.index((key, minhash, len(s))
for key, minhash, s in \
zip(keys, minhashes[num_perm], indexed_sets))
print("Querying")
(minhashes, sets, keys) = query_data
probe_times = []
process_times = []
results = []
for qs, minhash in zip(sets, minhashes[num_perm]):
# Record probing time
start = time.perf_counter()
result = list(lsh.query(minhash, len(qs)))
probe_times.append(time.perf_counter() - start)
# Record post processing time.
start = time.perf_counter()
[_compute_containment(qs, indexed_sets[key]) for key in result]
process_times.append(time.perf_counter() - start)
results.append(result)
sys.stdout.write("\rQueried {} sets".format(len(results)))
sys.stdout.write("\n")
return results, probe_times, process_times
def benchmark_ground_truth(threshold, index, query_data):
(_, query_sets, _) = query_data
times = []
results = []
for q in query_sets:
start = time.perf_counter()
result = [key for key, _ in index.query(q)]
duration = time.perf_counter() - start
times.append(duration)
results.append(result)
sys.stdout.write("\rQueried {} sets".format(len(results)))
sys.stdout.write("\n")
return results, times
def _compute_containment(x, y):
if len(x) == 0 or len(y) == 0:
return 0.0
intersection = len(np.intersect1d(x, y, assume_unique=True))
return float(intersection) / float(len(x))
levels = {
"test": {
"thresholds": [1.0,],
"num_parts": [4,],
"num_perms": [32,],
"m": 2,
},
"lite": {
"thresholds": [0.5, 0.75, 1.0],
"num_parts": [8, 16],
"num_perms": [32, 64],
"m": 8,
},
"medium": {
"thresholds": [0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"num_parts": [8, 16, 32],
"num_perms": [32, 128, 224],
"m": 8,
},
"complete": {
"thresholds": [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0],
"num_parts": [8, 16, 32],
"num_perms": [32, 64, 96, 128, 160, 192, 224, 256],
"m": 8,
},
}
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Run LSH Ensemble benchmark using data sets obtained "
"from https://github.com/ekzhu/set-similarity-search-benchmarks.")
parser.add_argument("--indexed-sets", type=str, required=True,
help="Input indexed set file (gzipped), each line is a set: "
"<set_size> <1>,<2>,<3>..., where each <?> is an element.")
parser.add_argument("--query-sets", type=str, required=True,
help="Input query set file (gzipped), each line is a set: "
"<set_size> <1>,<2>,<3>..., where each <?> is an element.")
parser.add_argument("--query-results", type=str,
default="lshensemble_benchmark_query_results.csv")
parser.add_argument("--ground-truth-results", type=str,
default="lshensemble_benchmark_ground_truth_results.csv")
parser.add_argument("--indexed-sets-sample-ratio", type=float, default=0.1)
parser.add_argument("--level", type=str, choices=levels.keys(),
default="complete")
parser.add_argument("--skip-ground-truth", action="store_true")
parser.add_argument("--use-asym-minhash", action="store_true")
parser.add_argument("--use-redis", action="store_true")
parser.add_argument("--redis-host", type=str, default="localhost")
parser.add_argument("--redis-port", type=int, default=6379)
args = parser.parse_args(sys.argv[1:])
level = levels[args.level]
index_data, query_data = None, None
index_data_cache = "{}.pickle".format(args.indexed_sets)
query_data_cache = "{}.pickle".format(args.query_sets)
if os.path.exists(index_data_cache):
print("Using cached indexed sets {}".format(index_data_cache))
with open(index_data_cache, "rb") as d:
index_data = pickle.load(d)
else:
print("Using indexed sets {}".format(args.indexed_sets))
index_data = bootstrap_sets(args.indexed_sets,
args.indexed_sets_sample_ratio, num_perms=level["num_perms"],
pad_for_asym=args.use_asym_minhash)
with open(index_data_cache, "wb") as d:
pickle.dump(index_data, d)
if os.path.exists(query_data_cache):
print("Using cached query sets {}".format(query_data_cache))
with open(query_data_cache, "rb") as d:
query_data = pickle.load(d)
else:
print("Using query sets {}".format(args.query_sets))
query_data = bootstrap_sets(args.query_sets, 1.0,
num_perms=level["num_perms"], skip=0)
with open(query_data_cache, "wb") as d:
pickle.dump(query_data, d)
if not args.skip_ground_truth:
rows = []
# Build search index separately, only works for containment.
print("Building search index...")
index = SearchIndex(index_data[1], similarity_func_name="containment",
similarity_threshold=0.1)
for threshold in level["thresholds"]:
index.similarity_threshold = threshold
print("Running ground truth benchmark threshold = {}".format(threshold))
ground_truth_results, ground_truth_times = \
benchmark_ground_truth(threshold, index, query_data)
for t, r, query_set, query_key in zip | (ground_truth_times,
ground_truth_results, query_data[1], query_data[2]):
rows.append((query_key, len(query_set), threshold, t,
",".join(str(k) for k in r)))
df_groundtruth = pd.DataFrame.from_records(rows,
columns=["query_key", "query_size", "threshold",
"query_time", "results"])
df_groundtruth.to_csv(args.ground_truth_results)
storage_config = {"type": "dict"}
if args.use_ | redis:
storage_config = {
"type": "redis",
"redis": {
"host": args.redis_host,
"port": args.redis_port,
},
}
rows = []
for threshold in level["thresholds"]:
for num_part in level["num_parts"]:
for num_perm in level["num_perms"]:
|
"""
The Fibonacci numbers, which we are all familiar with, start like this:
0,1,1,2,3,5,8,13,21,34,...
Where each new number in the sequence is the sum of the previous two.
It turns out that by summing different Fibonacci numbers with each other, you can create every single positive integer.
In fact, a much stronger statement holds:
Every single positive integer can be represented in one and only one way as a sum of non-consecutive Fibonacci numbers.
This is called the number's "Zeckendorf representation" [http://en.wikipedia.org/wiki/Zeckendorf%27s_theorem].
For instance, the Zeckendorf representation of the number 100 is 89 + 8 + 3, and the Zeckendorf representation of 1234
is 987 + 233 + 13 + 1. Note that all these numbers are Fibonacci numbers, and that they are non-consecutive (i.e. no
two numbers in a Zeckendorf representation can be next to each other in the Fibonacci sequence).
There are other ways of summing Fibonacci numbers to get these numbers. For instance, 100 is also equal to 89 + 5 + 3 +
2 + 1, but 1, 2, 3, 5 are all consecutive Fibonacci numbers. If no consecutive Fibonacci numbers are allowed, the
representation is unique.
Finding the Zeckendorf representation is actually not very hard. Lets use the number 100 as an example of how it's done:
First, you find the largest fibonacci number less than or equal to 100. In this case that is 89. This number will a | lways
be of the representation, so we remember that number and proceed recursively, and figure out the representation of
100 - 89 = 11.
The largest Fibonacci number less than or equal to 11 is 8. We remember that number and proceed recursively | with
11 - 8 = 3.
3 is a Fibonacci number itself, so now we're done. The answer is 89 + 8 + 3.
Write a program that finds the Zeckendorf representation of different numbers.
What is the Zeckendorf representation of 315 ?
Thanks to SwimmingPastaDevil for suggesting this problem in /r/dailyprogrammer_ideas! Do you have a problem you
think would be good for us? Why not head over there and post it?
"""
def zeckendorf(target, fib_list):
res = []
for f in fib_list[::-1]:
if f <= target:
res.append(f)
target -= f
return res
def get_fibonacci_list(target):
""" returns fibonacci numbers upto less than the target and not including zero"""
fib = [1, 1]
while fib[-1] < target:
fib.append(fib[-1] + fib[-2])
return fib[:-1]
def main():
target = 3**15
fib_list = get_fibonacci_list(target)
zeck = zeckendorf(target, fib_list)
print(zeck)
print(' 3**15 = {} \nsum of zeckendorf = {}'.format(3**15, sum(zeck)))
if __name__ == "__main__":
main()
|
#-*- coding: utf-8 -*-
import inspect
from django import forms
from django.conf import settings as globalsettings
from django.contrib.admin.widgets import ForeignKeyRawIdWidget
from django.contrib.admin.sites import site
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.db import models
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
from filer.utils.compatibility import truncate_words
from filer.models import File
from filer import settings as filer_settings
import logging
logger = logging.getLogger(__name__)
class AdminFileWidget(ForeignKeyRawIdWidget):
choices = None
def render(self, name, value, attrs=None):
obj = self.obj_for_value(value)
css_id = attrs.get('id', 'id_image_x')
css_id_thumbnail_img = "%s_thumbnail_img" % css_id
css_id_description_txt = "%s_description_txt" % css_id
related_url = None
if value:
try:
file_obj = File.objects.get(pk=value)
related_url = file_obj.logical_folder.\
get_admin_directory_listing_url_path()
except Exception,e:
# catch exception and manage it. We can re-raise it for debugging
# purposes and/or just logging it, provided user configured
# proper logging configuration
if filer_settings.FILER_ENABLE_LOGGING:
logger.error('Error while rendering file widget: %s',e)
if filer_settings.FILER_DEBUG:
raise e
if not related_url:
related_url = reverse('admin:filer-directory_listing-last')
params = self.url_parameters()
if params:
lookup_url = '?' + '&'.join(
['%s=%s' % (k, v) for k, v in params.items()])
else:
lookup_url = ''
if not 'class' in attrs:
# The JavaScript looks for this hook.
attrs['class'] = 'vForeignKeyRawIdAdminField'
# rendering the super for ForeignKeyRawIdWidget on purpose here because
# we only need the input and none of the other stuff that
# ForeignKeyRawIdWidget adds
hidden_input = super(ForeignKeyRawIdWidget, self).render(
name, value, attrs)
filer_static_prefix = filer_settings.FILER_STATICMEDIA_PREFIX
if not filer_static_prefix[-1] == '/':
filer_static_prefix += '/'
context = {
'hidden_input': hidden_input,
'lookup_url': '%s%s' % (related_url, lookup_url),
'thumb_id': css_id_thumbnail_img,
'span_id': css_id_description_txt,
'object': obj,
'lookup_name': name,
'filer_static_prefix': filer_static_prefix,
'clear_id': '%s_clear' % css_id,
'id': css_id,
}
html = render_to_string('admin/filer/widgets/admin_file.html', context)
return mark_safe(html)
def label_for_value(self, value):
obj = self.obj_for_value(value)
return ' <strong>%s</strong>' % truncate_words(obj, 14)
def obj_for_value(self, value):
try:
key = self.rel.get_related_field().name
obj = self.rel.to._default_manager.get(**{key: value})
except:
obj = None
return obj
class Media:
js = (filer_settings.FILER_STATICMEDIA_PREFIX + 'js/popup_handling.js',)
class AdminFileFormField(forms.ModelChoiceField):
widget = AdminFileWidget
def __init__(self, rel, queryset, to_field_name, *args, **kwargs):
self.rel = rel
self.queryset = queryset
self.to_field_name = to_field_name
self.max_value = None
self.min_value = None
other_widget = kwargs.pop('widget', None)
if 'admin_site' in inspect.getargspec(self.widget.__init__)[0]: # Django 1.4
widget_instance = self.widget(rel, site)
else: # Django <= 1.3
widget_instance = self.widget(rel)
forms.Field.__init__(self, widget=widget_instance, *args, **kwargs)
def widget_attrs(self, widget):
widget.required = self.required
return {}
class FilerFileField(models.ForeignKey):
default_form_class = AdminFileFormField
default_model_class = File
def __init__(self, **kwargs):
# we call ForeignKey.__init__ with the Image model as parameter...
# a FilerImageFiled can only be a ForeignKey to a Image
return super(FilerFileField, self).__init__(
self.default_model_class, **kwargs)
def formfield(self, **kwargs):
# This is a fairly standard way to set up some defaults
# while letting the caller override them.
defaults = {
'form_class': self.default_form_class,
'rel': self.rel,
}
defaults.update(kwargs)
return super(FilerFileField, self).formfield(**defaults)
def south_field_triple(self):
"Returns a suitable description of this field for South."
# We'll just introspect ourselves, since we inherit.
from south.modelsinspector import | introspector
field_class = "dj | ango.db.models.fields.related.ForeignKey"
args, kwargs = introspector(self)
# That's our definition!
return (field_class, args, kwargs)
|
# Copyright (C) 2012 Thomas "stacks" Birn (@stacksth)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along | with this program. If not, see <http://www.gnu.org/licenses/>.
from lib.cuckoo.common.abstracts import Signature
class InstallsWinpcap(Signature):
name = "sniffer_winpcap"
description = "Installs WinPCAP"
severity = 3
categories = ["sniffer"]
authors = ["Thomas Birn", "nex"]
minimum = "0.5"
def run(self):
indica | tors = [
".*\\\\packet\.dll$",
".*\\\\npf\.sys$",
".*\\\\wpcap\.dll$"
]
for indicator in indicators:
if self.check_file(pattern=indicator, regex=True):
return True
return False
|
parse import (
urlparse, urljoin, urlsplit, urlunsplit, parse_qs)
from .packages.tldextract import tldextract
log = logging.getLogger(__name__)
MAX_FILE_MEMO = 20000
GOOD_PATHS = ['story', 'article', 'feature', 'featured', 'slides',
'slideshow', 'gallery', 'news', 'video', 'media',
'v', 'radio', 'press']
BAD_CHUNKS = ['careers', 'contact', 'about', 'faq', 'terms', 'privacy',
'advert', 'preferences', 'feedback', 'info', 'browse', 'howto',
'account', 'subscribe', 'donate', 'shop', 'admin']
BAD_DOMAINS = ['amazon', 'doubleclick', 'twitter']
def remove_args(url, keep_params=(), frags=False):
"""
Remove all param arguments from a url.
"""
parsed = urlsplit(url)
filtered_query= '&'.join(
qry_item for qry_item in parsed.query.split('&')
if qry_item.startswith(keep_params)
)
if frags:
frag = parsed[4:]
else:
frag = ('',)
return urlunsplit(parsed[:3] + (filtered_query,) + frag)
def redirect_back(url, source_domain):
"""
Some sites like Pinterest have api's that cause news
args to direct to their site with the real news url as a
GET param. This method catches that and returns our param.
"""
parse_data = urlparse(url)
domain = parse_data.netloc
query = parse_data.query
# If our url is even from a remotely similar domain or
# sub domain, we don't need to redirect.
if source_domain in domain or domain in source_domain:
return url
query_item = parse_qs(query)
if query_item.get('url'):
# log.debug('caught redirect %s into %s' % (url, query_item['url'][0]))
return query_item['url'][0]
return url
def prepare_url(url, source_url=None):
"""
Operations that purify a url, removes arguments,
redirects, and merges relatives with absolutes.
"""
try:
if source_url is not None:
source_domain = urlparse(source_url).netloc
proper_url = urljoin(source_url, url)
proper_url = redirect_back(proper_url, source_domain)
proper_url = remove_args(proper_url)
else:
proper_url = remove_args(url)
except ValueError, e:
log.critical('url %s failed on err %s' % (url, str(e)))
# print 'url %s failed on err %s' % (url, str(e))
proper_url = u''
return proper_url
def valid_url(url, verbose=False, test=False):
"""
| Perform a regex check on an absolute url.
First, perform a few basic checks like making sure the format of the url
is right, (scheme, domain, tld).
Second, make sure that the url isn't some static resource, check the
file type.
Then, search of a YYYY/MM/DD pattern in the url. News sites
love to use this pattern, this is a very safe bet.
Separ | ators can be [\.-/_]. Years can be 2 or 4 digits, must
have proper digits 1900-2099. Months and days can be
ambiguous 2 digit numbers, one is even optional, some sites are
liberal with their formatting also matches snippets of GET
queries with keywords inside them. ex: asdf.php?topic_id=blahlbah
We permit alphanumeric, _ and -.
Our next check makes sure that a keyword is within one of the
separators in a url (subdomain or early path separator).
cnn.com/story/blah-blah-blah would pass due to "story".
We filter out articles in this stage by aggressively checking to
see if any resemblance of the source& domain's name or tld is
present within the article title. If it is, that's bad. It must
be a company link, like 'cnn is hiring new interns'.
We also filter out articles with a subdomain or first degree path
on a registered bad keyword.
"""
DATE_REGEX = r'([\./\-_]{0,1}(19|20)\d{2})[\./\-_]{0,1}(([0-3]{0,1}[0-9][\./\-_])|(\w{3,5}[\./\-_]))([0-3]{0,1}[0-9][\./\-]{0,1})?'
ALLOWED_TYPES = ['html', 'htm', 'md', 'rst'] # TODO add more!
# if we are testing this method in the testing suite, we actually
# need to preprocess the url like we do in the article's constructor!
if test:
url = prepare_url(url)
# 11 chars is shortest valid url length, eg: http://x.co
if url is None or len(url) < 11:
if verbose: print '\t%s rejected because len of url is less than 11' % url
return False
r1 = ('mailto:' in url) # TODO not sure if these rules are redundant
r2 = ('http://' not in url) and ('https://' not in url)
if r1 or r2:
if verbose: print '\t%s rejected because len of url structure' % url
return False
path = urlparse(url).path
# input url is not in valid form (scheme, netloc, tld)
if not path.startswith('/'):
return False
# the '/' which may exist at the end of the url provides us no information
if path.endswith('/'):
path = path[:-1]
# '/story/cnn/blahblah/index.html' --> ['story', 'cnn', 'blahblah', 'index.html']
path_chunks = [x for x in path.split('/') if len(x) > 0]
# siphon out the file type. eg: .html, .htm, .md
if len(path_chunks) > 0:
last_chunk = path_chunks[-1].split('.') # last chunk == file usually
file_type = last_chunk[-1] if len(last_chunk) >= 2 else None
# if the file type is a media type, reject instantly
if file_type and file_type not in ALLOWED_TYPES:
if verbose: print '\t%s rejected due to bad filetype' % url
return False
# the file type is not of use to use anymore, remove from url
if len(last_chunk) > 1:
path_chunks[-1] = last_chunk[-2]
# Index gives us no information
if 'index' in path_chunks:
path_chunks.remove('index')
# extract the tld (top level domain)
tld_dat = tldextract.extract(url)
subd = tld_dat.subdomain
tld = tld_dat.domain.lower()
url_slug = path_chunks[-1] if path_chunks else u''
if tld in BAD_DOMAINS:
if verbose: print '%s caught for a bad tld' % url
return False
if len(path_chunks) == 0:
dash_count, underscore_count = 0, 0
else:
dash_count = url_slug.count('-')
underscore_count = url_slug.count('_')
# If the url has a news slug title
if url_slug and (dash_count > 4 or underscore_count > 4):
if dash_count >= underscore_count:
if tld not in [ x.lower() for x in url_slug.split('-') ]:
if verbose: print '%s verified for being a slug' % url
return True
if underscore_count > dash_count:
if tld not in [ x.lower() for x in url_slug.split('_') ]:
if verbose: print '%s verified for being a slug' % url
return True
# There must be at least 2 subpaths
if len(path_chunks) <= 1:
if verbose: print '%s caught for path chunks too small' % url
return False
# Check for subdomain & path red flags
# Eg: http://cnn.com/careers.html or careers.cnn.com --> BAD
for b in BAD_CHUNKS:
if b in path_chunks or b == subd:
if verbose: print '%s caught for bad chunks' % url
return False
match_date = re.search(DATE_REGEX, url)
# if we caught the verified date above, it's an article
if match_date is not None:
if verbose: print '%s verified for date' % url
return True
if verbose: print '%s caught for default false' % url
return False
def get_domain(abs_url, **kwargs):
"""
returns a url's domain, this method exists to
encapsulate all url code into this file
"""
if abs_url is None:
return None
return urlparse(abs_url, **kwargs).netloc
def get_scheme(abs_url, **kwargs):
"""
"""
if abs_url is None:
return None
return urlparse(abs_url, **kwargs).scheme
def get_path(abs_url, **kwargs):
"""
"""
if abs_url is None:
return None
return urlparse(abs_url, **kwargs).path
def is_abs_url(url):
"""
this regex was brought to you by django!
"""
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https:// |
import networkx as nx
import numpy as np
import pandas as pd
def normalise(x):
x = x[:]#deepcopy error
x -= min(x)
x /= max(x)
return x
def jgraph(posjac):
'''
networkx graph object from posjac at timestep
'''
posjac = 1 - normalise(np.log10(p | osjac).replace([np.inf,-np.inf],np.nan).dropna())
split = [i.split('->') for i in posjac.index]
#graph
G = nx.DiGraph()
for e in range(len(split)):
G.add_edge(split[e][0],split[e][1],weight=posjac[e])
G.remove_edges_from(G.selfloop_edges())
return G
def getnx(self, ts ,save=False):
'''
Create a networkx graph from a DSMACC new class
Usage:
getnx(a,a.ts[-1], 'propane')
'''
self.cre | ate_posjac()
G = nx.DiGraph()
posjac = self.posjac.loc[ts,:]
split = [i.split('->') for i in posjac.index]
for e in range(len(split)):
G.add_edge(split[e][0],split[e][1],weight=posjac[e])
G.remove_edges_from(G.selfloop_edges())
if save:
nx.write_weighted_edgelist(G, save+'.wedgelist')
#G=nx.read_weighted_edgelist('propane.wedgelist',create_using=nx.DiGraph)
return G
def pagerank(a):
return geobj2df(metric(tograph(group_hour(a.jacsp))))
def tograph(jac):
'''
Use hourly avg
'''
rt = []
for t in jac.iterrows():
jacsp=t[1]
#inverse negative links
index = np.array(jacsp.index)
lt = list(jacsp<0)
index[lt] = map(lambda x: '->'.join(reversed(x.split('->'))),index[lt])
jacsp.index = index
jacsp = jacsp.abs()
#normalize jacsp
jacsp = jacsp*1.01 - jacsp.min().min()
jacsp /= jacsp.max().max()
split = [i.split('->') for i in jacsp.index]
#graph
G = nx.DiGraph()
for e in range(len(split)):
G.add_edge(split[e][0],split[e][1],weight=jacsp[e])
G.remove_edges_from(G.selfloop_edges())
rt.append({'graph':G,'time':t[0]})
return rt
def metric(GS,met = 'nx.pagerank'):
'''
GS - out array from to_graph
'''
metfn = eval(met)
for gt in range(len(GS)):
res = metfn(GS[gt]['graph'])
res = [[key, res[key]] for key, value in sorted(res.iteritems(), key=lambda k,v: (v,k))]
GS[gt][met] = res
return GS
def geobj2df(GS,what = 'nx.pagerank'):
res = []
index = []
for s in GS:
index.append(s['time'])
s = pd.DataFrame(s[what])
s.index = s[0]
s=s[1]
res.append(s)
df = pd.concat(res,axis = 1).T
df.index = index
df = (df*1.1).subtract(df.min(axis=0
))
df=df.divide(df.max(axis=1),axis=0)
import zcreate_centrality as p
#p.createhtml(df)
return df
|
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch default make.sys for wannier
if LooseVersion(self.version) >= LooseVersion("5"):
fn = os.path.join(self.cfg['start_dir'], 'install', 'make_wannier90.sys')
else:
fn = os.path.join(self.cfg['start_dir'], 'plugins', 'install', 'make_wannier90.sys')
try:
for line in fileinput.input(fn, inplace=1, backup='.orig.eb'):
line = re.sub(r"^(LIBS\s*=\s*).*", r"\1%s" % libs, line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read()))
# patch Makefile of want plugin
wantprefix = 'want-'
wantdirs = [d for d in os.listdir(self.builddir) if d.startswith(wantprefix)]
if len(wantdirs) > 1:
raise EasyBuildError("Found more than one directory with %s prefix, help!", wantprefix)
if len(wantdirs) != 0:
wantdir = os.path.join(self.builddir, wantdirs[0])
make_sys_in_path = None
cand_paths = [os.path.join('conf', 'make.sys.in'), os.path.join('config', 'make.sys.in')]
for path in cand_paths:
full_path = os.path.join(wantdir, path)
if os.path.exists(full_path):
make_sys_in_path = full_path
break
if make_sys_in_path is None:
raise EasyBuildError("Failed to find make.sys.in in want directory %s, paths considered: %s",
wantdir, ', '.join(cand_paths))
try:
for line in fileinput.input(make_sys_in_path, inplace=1, backup='.orig.eb'):
# fix preprocessing directives for .f90 files in make.sys if required
if self.toolchain.comp_family() in [toolchain.GCC]:
line = re.sub("@f90rule@",
"$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" +
"\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o",
line)
sys.stdout.write(line)
except IOError, err:
raise EasyBuildError("Failed to patch %s: %s", fn, err)
# move non-espresso directories to where they're expected and create symlinks
try:
dirnames = [d for d in os.listdir(self.builddir) if not d.startswith('espresso')]
targetdir = os.path.join(self.builddir, "espresso-%s" % self.version)
for dirname in dirnames:
shutil.move(os.path.join(self.builddir, dirname), os.path.join(targetdir, dirname))
self.log.info("Moved %s into %s" % (dirname, targetdir))
dirname_head = dirname.split('-')[0]
linkname = None
if dirname_head == 'sax':
linkname = 'SaX'
if dirname_head == 'wannier90':
linkname = 'W90'
elif dirname_head in ['gipaw', 'plumed', 'want', 'yambo']:
linkname = dirname_head.upper()
if linkname:
os.symlink(os.path.join(targetdir, dirname), os.path.join(targetdir, linkname))
except OSError, err:
raise EasyBuildError("Failed to move non-espresso directories: %s", err)
def install_step(self):
"""Skip install step, since we're building in the install directory."""
pass
def sanity_check_step(self):
"""Custom sanity check for Quantum ESPRESSO."""
# build list of expected binaries based on make targets
bins = ["iotk", "iotk.x", "iotk_print_kinds.x"]
if 'cp' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["cp.x", "cppp.x", "wfdd.x"])
if 'gww' in self.cfg['buildopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["gww_fit.x", "gww.x", "head.x", "pw4gww.x"])
if 'ld1' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["ld1.x"])
if 'gipaw' in self.cfg['buildopts']:
bins.extend(["gipaw.x"])
if 'neb' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["neb.x", "path_interpolation.x"])
if 'ph' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["d3.x", "dynmat.x", "lambda.x", "matdyn.x", "ph.x", "phcg.x", "q2r.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["fqha.x", "q2qstar.x"])
if 'pp' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
bins.extend(["average.x", "bands.x", "dos.x", "epsilon.x", "initial_state.x",
"plan_avg.x", "plotband.x", "plotproj.x", "plotrho.x", "pmw.x", "pp.x",
"projwfc.x", "sumpdos.x", "pw2wannier90.x", "pw_export.x", "pw2gw.x",
"wannier_ham.x", "wannier_plot.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["pw2bgw.x", "bgw2pw.x"])
else:
bins.extend(["pw2casino.x"])
if 'pw' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
bins.extend(["band_plot.x", "dist.x", "ev.x", "kpoints.x", "pw.x", "pwi2xsf.x",
" | bands_FS.x", "kvecs_FS.x"])
if LooseVersion(self.version) > LooseVersion("5"):
bin | s.extend(["generate_vdW_kernel_table.x"])
else:
bins.extend(["path_int.x"])
if 'pwcond' in self.cfg['buildopts'] or 'pwall' in self.cfg['buildopts'] or \
'all' in self.cfg['buildopts']:
bins.extend(["pwcond.x"])
if 'tddfpt' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
if LooseVersion(self.version) > LooseVersion("5"):
bins.extend(["turbo_lanczos.x", "turbo_spectrum.x"])
upftools = []
if 'upf' in self.cfg['buildopts'] or 'all' in self.cfg['buildopts']:
upftools = ["casino2upf.x", "cpmd2upf.x", "fhi2upf.x", "fpmd2upf.x", "ncpp2upf.x",
"oldcp2upf.x", "read_upf_tofile.x", "rrkj2upf.x", "uspp2upf.x", "vdb2upf.x",
"virtual.x"]
if LooseVersion(self.version) > LooseVersion("5"):
upftools.extend(["interpolate.x", "upf2casino.x"])
if 'vdw' in self.cfg['buildopts']: # only for v4.x, not in v5.0 anymore
bins.extend(["vdw.x"])
if 'w90' in self.cfg['buildopts']:
bins.extend(["wannier90.x"])
want_bins = []
if 'want' in self.cfg['buildopts']:
want_bins = ["bands.x", "blc2wan.x", "conductor.x", "current.x", "disentangle.x",
"dos.x", "gcube2plt.x", "kgrid.x", "midpoint.x", "plot.x", "sumpdos",
"wannier.x", "wfk2etsf.x"]
if LooseVersion(self.version) > LooseVersion("5"):
want_bins.extend(["cmplx_bands.x", "decay.x", "sax2qexml.x", "sum_sgm.x"])
if 'xspectra' in self.cfg['buildopts']:
bins.extend(["xspectra.x"])
yambo_bins = []
if 'yambo' in self.cfg['buildopts']:
yambo_bins = ["a2y", "p2y", "yambo", "ypp"]
pref = self.install_subdir
custom_paths = {
'files': [os.path.join(pref, 'bin', x) for x in bins] +
[os.path.join(pref, 'upftools', x) for x in upftools] +
[os.path.join(pref, 'WANT', 'bin', x) for x in want_bins] +
[os.path.join(pref, 'YAMBO', 'bin', x) for x in yambo_bins],
|
iew_data)
def delete_save_search(request):
"""
Called via ajax to delete a table. Only called from the saved_search.html
"""
id = request.GET.get("id", None)
if not id:
return respondWithError("Saved search cannot be found."\
" Please refresh and try again", True)
response = delete_table(request.user.id, id)
return httpResponse(response)
@user_passes_test(user_can_view_data)
def load_data(request, obj):
"""
Ajax call to load the data for the table.
"""
sortBy = request.GET.get("sortBy", 'null')
pageNumber = request.GET.get("pageNumber", 1)
maxRows = request.GET.get("maxRows", 25)
if sortBy == 'null':
sortBy = {}
else:
sortBy = json.loads(sortBy)
return get_table_data(request, obj, sort=sortBy, pageNumber=pageNumber, maxRows=maxRows)
@user_passes_test(user_can_view_data)
def save_search(request):
"""
Ajax call to save the table. Only called from the saved_search.html
"""
dashId = request.GET.get('dashId', None)
newDashName = request.GET.get('newDashName', None)
tableId = request.GET.get("tableId", None)
errorMessage = None
clone = False
try:
if newDashName:
newDash = createNewDashboard(request.user.id, newDashName)
if not newDash:
raise(Exception, "Dashboard already exists")
dashboard = newDash
elif dashId:
dashboard = Dashboard.objects(id=dashId).first()
if dashboard.isPublic and dashboard.analystId != request.user.id:
newDash = cloneDashboard(request.user.id, dashboard, cloneSearches = True, skip=tableId)
dashboard = newDash
clone = True
newDashName = newDash.name
elif dashboard.isPublic:
updateChildren(dashboard.id)
else:
errorMessage = "Error finding dashboard. Please refresh and try again."
except Exception as e:
print e
errorMessage = "You already have a dashboard with that name."
if errorMessage:
return respondWithError(errorMessage, True)
userId = request.GET.get('userId', None)
tableName = request.GET.get('tableName', None)
searchTerm = request.GET.get('query', None)
objType = request.GET.get('object_type', None)
columns = json.loads(request.GET.get("columns", ""))
sortBy = request.GET.get("sortBy", None)
isDefault = request.GET.get("isDefaultOnDashboard", "False")
sizex = request.GET.get("sizex", None)
maxRows = request.GET.get("maxRows", None)
if isDefault.lower() == "true":
isDefault = True
else:
isDefault = False
if sortBy:
sortBy = json.loads(sortBy)
response = save_data(userId, columns, tableName, searchTerm, objType, sortBy,
tableId, sizex=sizex, isDefaultOnDashboard=isDefault,
maxRows=maxRows,
dashboard=dashboard, clone=clone)
if newDashName:
response["newDashId"] = str(newDash.id)
response["newDashName"] = newDash.name
response["isClone"] = clone
response["newDashUrl"] = reverse("crits-dashboards-views-dashboard",
kwargs={"dashId":newDash.id})
return httpResponse(response)
@user_passes_test(user_can_view_data)
def save_new_dashboard(request):
"""
Ajax call to save the dashboard and the positioning and width of the
tables on it. Called from the dashboard.html
"""
data = json.loads(request.POST.get('data', ''))
userId = request.POST.get('userId', None)
dashId = request.POST.get('dashId', None)
user = request.user
clone = False
if not dashId:
return respondWithError("Error finding dashboard. Please refresh and try again.", True)
else:
dashboard = Dashboard.objects(id=dashId).first()
if dashboard.isPublic and dashboard.analystId != user.id:
dashboard = cloneDashboard(userId, dashboard)
if not dashboard:
return respondWithError("You already have a dashboard with that name.", True)
clone = True
if not user.defaultDashboard:
setDefaultDashboard(user, dashboard.id)
elif dashboard.isPublic:
updateChildren(dashboard.id)
for table in data:
isDefault = False
if table['isDe | fault'].lower() == "true":
isDefault = True
sortBy = None
if 'sortDirection' in table and 'sortField' in table:
sortBy = {'field':table['sortField'],'direction':table['sortDirection']}
| response = save_data(userId, table['columns'], table['tableName'],
tableId=table['id'], isDefaultOnDashboard=isDefault,
sortBy=sortBy, dashboard=dashboard,
clone=clone, row=table['row'], grid_col=table['col'],
sizex=table['sizex'], sizey=table['sizey'])
if not response['success']:
return httpResponse(response)
return httpResponse({"success":True,
"clone":clone,
"dashId": str(dashboard.id),
"message":"Dashboard saved successfully!"})
@user_passes_test(user_can_view_data)
def get_dashboard_table_data(request, tableName):
"""
Ajax call to get the records for a default dashboard table.
Only called from the saved_search.html when editing the table
"""
response = getRecordsForDefaultDashboardTable(request.user, tableName)
return httpResponse(response)
@user_passes_test(user_can_view_data)
def destroy_dashboard(request):
"""
Ajax call to clear all tables positions. called from dashbaord.html
"""
dashId = request.GET.get('dashId', None)
response = clear_dashboard(dashId)
return httpResponse(response)
@user_passes_test(user_can_view_data)
def toggle_table_visibility(request):
"""
Ajax call to toggle tables visibilty to either pinned or hidden.
Called from saved_searches_list.html
"""
id = request.GET.get('tableId', None)
isVisible = request.GET.get('isVisible', True)
if isVisible == "True":
isVisible = False
else:
isVisible = True
response = toggleTableVisibility(id, isVisible)
return httpResponse(response)
@user_passes_test(user_can_view_data)
def set_default_dashboard(request):
"""
Ajax call to set the users default dashboard. Called from saved_searches_list.html
"""
id = request.GET.get('id', None)
dashName = setDefaultDashboard(request.user, id)
if not dashName:
respondWithError("An error occurred while updating dashboard. Please try again later.", True)
return respondWithSuccess(dashName + " is now your default dashboard.")
@user_passes_test(user_can_view_data)
def set_dashboard_public(request):
"""
Ajax call to set the users default dashboard. Called from saved_searches_list.html
"""
id = request.GET.get('id', None)
makePublic = request.GET.get('makePublic', "true")
successMsg = "Dashboard is now "
if makePublic == "false":
makePublic = False
successMsg += "hidden from "
else:
makePublic = True
successMsg += "visible to "
successMsg += "all users."
response = setPublic(id, makePublic)
if type(response) == str:
return respondWithError(response, True)
return respondWithSuccess(successMsg)
def ignore_parent(request, id):
"""
Ajax call to ignore that the parent of the dashboard has been changed.
Called from dashboard.html
"""
try:
Dashboard.objects(id=id).update_one(set__hasParentChanged=False)
except:
return respondWithError("An error occured while updating dashboard. Please try again later.", True)
return respondWithSuccess("success")
def delete_dashboard(request):
"""
Ajax call to delete users dashboard. Called from saved_searches_list.html
"""
id = request.GET.get('id', None)
try:
response = deleteDashboard(id)
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissi | ons and limitations
# under the License.
from heat.api.aws import exception
from heat.common import identifier
from heat.c | ommon import wsgi
from heat.rpc import client as rpc_client
class SignalController(object):
def __init__(self, options):
self.options = options
self.rpc_client = rpc_client.EngineClient()
def update_waitcondition(self, req, body, arn):
con = req.context
identity = identifier.ResourceIdentifier.from_arn(arn)
try:
md = self.rpc_client.resource_signal(
con,
stack_identity=dict(identity.stack()),
resource_name=identity.resource_name,
details=body,
sync_call=True)
except Exception as ex:
return exception.map_remote_error(ex)
return {'resource': identity.resource_name, 'metadata': md}
def signal(self, req, arn, body=None):
con = req.context
identity = identifier.ResourceIdentifier.from_arn(arn)
try:
self.rpc_client.resource_signal(
con,
stack_identity=dict(identity.stack()),
resource_name=identity.resource_name,
details=body)
except Exception as ex:
return exception.map_remote_error(ex)
def create_resource(options):
"""
Signal resource factory method.
"""
deserializer = wsgi.JSONRequestDeserializer()
return wsgi.Resource(SignalController(options), deserializer)
|
import numpy, matplotlib, random, pylab, math
def matrix_square_root(sigma) :
eigen, vect = numpy.linalg.eig(sigma)
dim = len(sigma)
res = numpy.identity(dim)
for i in range(0,dim) :
res[i,i] = eigen[i]**0.5
return vect * res * vect.transpose()
def chi2_level (alpha = 0.95) :
N = 1000
x = [ random.gauss(0,1) for _ in range(0,N) ]
y = [ random.gauss(0,1) for _ in range(0,N) ]
r = map ( lambda c : (c[0]**2+c[1]**2)**0.5, zip(x,y))
r = list(r)
r.sort()
res = r [ int (alpha * N) ]
return res
def square_figure(mat, a) :
x = [ ]
y = [ ]
for i in range (0,100) :
x.append ( a * mat[0][0]**0.5 )
y.append ( (random.random ()-0.5) * a * mat[1][1]**0.5*2 )
x.append ( -a * mat[0][0]**0.5 )
y.append ( (random.random ()-0.5) * a * mat[1][1]**0.5*2 )
y.append ( a * mat[1][1]**0.5 )
x.append ( (random.random ()-0.5) * a * mat[0][0]**0.5*2 )
y.append ( -a * mat[1][1]**0.5 )
x.append ( (random.random ()-0.5) * a * mat[0][0]**0.5*2 )
pylab.plot(x,y, 'ro')
x = [ ]
y = [ ]
for i in range (0,100) :
x.append ( a )
y.append ( (random.random ()-0.5) * a*2 )
x.append ( -a )
y.append ( (random.random ()-0.5) * a*2 )
y.append ( a )
x.append ( (random.random ()-0.5) * a*2 | )
y.append ( -a )
x.append ( (random.random ()-0.5) * a*2 )
xs,ys = [],[]
for a,b in zip (x,y) :
ar = numpy.matrix( [ [a], [b] ] ).transpose()
we = ar * root
xs.append ( we [0,0] )
ys.append ( we [0,1] )
pylab.plot(xs,ys, 'bo')
pylab.show()
def circle_figure (mat, a) :
x = [ ]
y = [ ]
for i in range (0,200) :
z = r | andom.random() * math.pi * 2
i = a * mat[0][0]**0.5 * math.cos(z)
j = a * mat[0][0]**0.5 * math.sin(z)
x.append ( i )
y.append ( j )
pylab.plot(x,y, 'ro')
x = [ ]
y = [ ]
for i in range (0,200) :
z = random.random() * math.pi * 2
i = a * math.cos(z)
j = a * math.sin(z)
x.append ( i )
y.append ( j )
xs,ys = [],[]
for a,b in zip (x,y) :
ar = numpy.matrix( [ [a], [b] ] ).transpose()
we = ar * root
xs.append ( we [0,0] )
ys.append ( we [0,1] )
pylab.plot(xs,ys, 'bo')
pylab.show()
if __name__ == "__main__" :
level = chi2_level ()
mat = [ [0.1, 0.05], [0.05, 0.2] ]
npmat = numpy.matrix(mat)
root = matrix_square_root (npmat)
square_figure (mat, 1.96)
circle_figure (mat, level) |
"""
De | mo of the errorbar function.
"""
import numpy as np
import matplotlib.pyplot as plt
# example data
x = np.arange(0.1, 4, 0.5)
y = np.exp(-x)
plt.errorbar(x, | y, xerr=0.2, yerr=0.4)
plt.show()
|
s="the q | uick brown fox jumped over the lazy dog"
t = s.split(" ")
for v in t:
print(v)
r = s.split("e")
for v in r:
print(v)
x = s.split()
for v in x:
print(v)
# 2-arg version of split not supported
# y = s.split(" | ",7)
# for v in y:
# print v
|
from common_fixtures import * # NOQA
def _clean_clusterhostmap_for_host(host):
for cluster in host.clusters():
cluster.removehost(hostId=str(host.id))
def _resource_is_inactive(resource):
return resource.state == 'inactive'
def _resource_is_active(resource):
return resource.state == 'active'
@pytest.mark.skipif('True')
def test_cluster_add_remove_host_actions(super_client, new_context):
host1 = super_client.reload(new_context.host)
account = new_context.project
_clean_clusterhostmap_for_host(host1)
cluster = super_client.create_cluster(
accountId=account.id,
name='testcluster1', port=9000)
cluster = wait_for_condition(
super_client, cluster, _resource_is_inactive,
lambda x: 'State is: ' + x.state)
# Add one host to cluster
cluster = cluster.addhost(hostId=str(host1.id))
cluster = wait_for_condition(
super_client, cluster,
lambda x: len(x.hosts()) == 1,
lambda x: 'Number of hosts in cluster is: ' + len(x.hosts()))
assert cluster.hosts()[0].id == host1.id
assert len(host1.clusters()) == 1
assert host1.clusters()[0].id == cluster.id
# activate cluster
cluster.activate()
cluster = wait_for_condition(
super_client, cluster, _resource_is_active,
lambda x: 'State is: ' + x.state)
# verify that the agent got created
uri = 'sim:///?clusterId={}&managingHostId={}'. \
format(get_plain_id(super_client, cluster),
get_plain_id(super_client, host1))
agents = super_client.list_agent(uri=uri)
assert len(agents) == 1
# verify that the agent instance got created
agent_instances = super_client.list_instance(agentId=agents[0].id)
assert len(agent_instances) == 1
try:
cluster.addhost(hostId=str(host1.id))
assert False
except cattle.ApiError as e:
assert e.error.code == 'InvalidReference'
cluster = cluster.removehost(hostId=str(host1.id))
cluster = wait_for_condition(
super_client, cluster,
lambda x: len(x.hosts()) == 0,
lambda x: 'Number of hosts in cluster is: ' + len(x.hosts()))
try:
cluster = cluster.removehost(hostId=str(host1.id))
assert False
except cattle.ApiError as e:
assert e.error.code == 'InvalidReference'
cluster = cluster.addhost(hostId=str(host1.id))
assert len(cluster.hosts()) == 1
# Add 2nd host to cluster
host2 = register_simulated_host(new_context)
cluster = cluster.addhost(hostId=str(host2.id))
cluster = wait_for_conditio | n(
super_client, cluster,
lambda x: len(x.hosts()) == 2,
lambda x: 'Number of hosts in cluster is: ' + len(x.hosts()))
# Remove 2nd host from cluster
cluster = cluster.removehost(hostId=str(host2.id))
cluster = wait_for_condition(
super_client, cluster,
lambda x: len(x.hosts()) == 1,
lambda x: len(x.hosts()))
# temporarily skipping since this was inadvertently deleting the
# real host causing downstream TFs
@pytest.mark.skipif('True')
| def test_host_purge(super_client, new_context):
host1 = super_client.reload(new_context.host)
_clean_clusterhostmap_for_host(host1)
cluster = super_client.create_cluster(
accountId=new_context.project.id,
name='testcluster2', port=9000)
cluster = wait_for_condition(
super_client, cluster, _resource_is_inactive,
lambda x: 'State is: ' + x.state)
cluster = cluster.addhost(hostId=str(host1.id))
host1 = super_client.wait_success(host1.deactivate())
host1 = super_client.wait_success(super_client.delete(host1))
super_client.wait_success(host1.purge())
wait_for_condition(
super_client, cluster, lambda x: len(x.hosts()) == 0)
@pytest.mark.skipif('True')
def test_cluster_purge(super_client, new_context):
host1 = super_client.reload(new_context.host)
_clean_clusterhostmap_for_host(host1)
cluster = super_client.create_cluster(
accountId=new_context.project.id,
name='testcluster3', port=9000)
cluster = wait_for_condition(
super_client, cluster, _resource_is_inactive,
lambda x: 'State is: ' + x.state)
cluster = cluster.addhost(hostId=str(host1.id))
cluster = wait_for_condition(
super_client, cluster, lambda x: len(x.hosts()) == 1)
cluster.activate()
cluster = wait_for_condition(
super_client, cluster, _resource_is_active,
lambda x: 'State is: ' + x.state)
# verify that the agent got created
uri = 'sim:///?clusterId={}&managingHostId={}'. \
format(get_plain_id(super_client, cluster),
get_plain_id(super_client, host1))
agents = super_client.list_agent(uri=uri)
assert len(agents) == 1
# verify that the agent instance got created
agentId = agents[0].id
agent_instances = super_client.list_instance(agentId=agentId)
assert len(agent_instances) == 1
# deactivate, remove, and purge cluster
cluster = super_client.wait_success(cluster.deactivate())
cluster = super_client.wait_success(super_client.delete(cluster))
cluster = super_client.wait_success(cluster.purge())
# check no hosts is registered to this cluster
wait_for_condition(
super_client, cluster, lambda x: len(x.hosts()) == 0)
# verify that the agent is removed
agents = super_client.list_agent(uri=uri)
wait_for_condition(
super_client, agents[0],
lambda x: x.state == 'removed',
lambda x: 'State is: ' + x.state)
# verify that the agent instance is removed as well
agent_instances = super_client.list_instance(agentId=agentId)
wait_for_condition(
super_client, agent_instances[0],
lambda x: x.state == 'removed',
lambda x: 'State is: ' + x.state)
@pytest.mark.skipif('True')
def test_cluster_actions_invalid_host_ref(super_client, new_context):
host1 = super_client.reload(new_context.host)
_clean_clusterhostmap_for_host(host1)
cluster = super_client.create_cluster(
accountId=new_context.project.id,
name='testcluster4', port=9000)
try:
cluster.addhost(hostId='badvalue')
assert False
except cattle.ApiError as e:
assert e.error.code == 'InvalidReference'
try:
cluster.removehost(hostId='badvalue')
assert False
except cattle.ApiError as e:
assert e.error.code == 'InvalidReference'
|
ntent']))
def backwards(self, orm):
# Renaming column for 'Comment.parent_content' to match new field type.
db.rename_column('canvas_comment', 'parent_content_id', 'parent_content')
# Changing field 'Comment.parent_content'
db.alter_column('canvas_comment', 'parent_content', self.gf('django.db.models.fields.CharField')(max_length=32))
# Renaming column for 'Comment.reply_content' to match new field type.
db.rename_column('canvas_comment', 'reply_content_id', 'reply_content')
# Changing field 'Comment.reply_content'
db.alter_column('canvas_comment', 'reply_content', self.gf('django.db.models.fields.CharField')(max_length=32))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_acti | ve': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': | ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'canvas.comment': {
'Meta': {'object_name': 'Comment'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'parent_content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'to': "orm['canvas.Content']"}),
'reply_content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'used_in_comments'", 'null': 'True', 'to': "orm['canvas.Content']"}),
'reply_text': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.FloatField', [], {})
},
'canvas.content': {
'Meta': {'object_name': 'Content'},
'id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'timestamp': ('django.db.models.fields.FloatField', [], {})
},
'canvas.contentsticker': {
'Meta': {'object_name': 'ContentSticker'},
'content': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stickers'", 'to': "orm['canvas.Content']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'timestamp': ('django.db.models.fields.FloatField', [], {}),
'type_id': ('django.db.models.fields.IntegerField', [], {})
},
'canvas.hashtag': {
'Meta': {'object_name': 'Hashtag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'canvas.post': {
'Meta': {'object_name': 'Post'},
'anonymous': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'blacklisted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'content_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'default': "'0.0.0.0'", 'max_length': '15'}),
'post_id': ('django.db.models.fields.IntegerField', [], {}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'posts'", 'to': "orm['canvas.Thread']"}),
'thumb_down': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'thumb_up': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'timestamp': ('django.db.models.fields.FloatField', [], {})
},
'canvas.stashcontent': {
'Meta': {'object_name': 'StashContent'},
'content_id': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'post': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'thread': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['canvas.Thread']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'canvas.thread': {
'Meta': {'object_name': 'Thread'},
'hashtags': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'threads'", 'symmetrical': 'False', 'to': "orm['canvas.Hashtag']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'locked': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django. |
# -*- coding: utf-8 -*-
# Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from wsme import types as wtypes
from cloudkitty.api.v1 import types as ck_types
class Script(wtypes.Base):
"""Type describing a script.
"""
script_id = wtypes.wsattr(ck_types.UuidType(), mandatory=False)
"""UUID of the script."""
name = wtypes.wsattr(wtypes.text, mandatory=True)
"""Name of the script."""
data = wtypes.wsattr(wtypes.text, mandatory=False)
"""Data of the script."""
checksum = wtypes.wsattr(wtypes.text, mandatory=False, readonly=True)
"""Checksum of the script data."""
@classmethod
def sample(cls):
sample = cls(script_id='bc05108d-f515-4984-8077-de319cbf35aa', |
name='policy1',
data='return 0',
checksum='cf83e1357eefb8bdf1542850d66d8007d620e4050b5715d'
'c83f4a921d36ce9ce47d0d13c | 5d85f2b0ff8318d2877eec'
'2f63b931bd47417a81a538327af927da3e')
return sample
class ScriptCollection(wtypes.Base):
"""Type describing a list of scripts.
"""
scripts = [Script]
"""List of scripts."""
@classmethod
def sample(cls):
sample = Script.sample()
return cls(scripts=[sample])
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007-2009 Christopher Lenz
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import random
import sys
from couchdb import client
from couchdb import ServerError
class TempDatabaseMixin(object):
temp_dbs = None
_db = None
def setUp(self):
self.server = client.Server(full_co | mmit=False)
def tearDown(self):
if self.temp_dbs:
for name in self.temp_dbs:
try:
self.server.delete(name)
except ServerError as err:
if err.args[0] == (500, ('error', | 'eacces')):
continue
raise
def temp_db(self):
if self.temp_dbs is None:
self.temp_dbs = {}
# Find an unused database name
while True:
name = 'couchdb-python/%d' % random.randint(0, sys.maxsize)
if name not in self.temp_dbs:
break
db = self.server.create(name)
self.temp_dbs[name] = db
return name, db
def del_db(self, name):
del self.temp_dbs[name]
self.server.delete(name)
@property
def db(self):
if self._db is None:
name, self._db = self.temp_db()
return self._db
|
#!/usr/bin/env python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Library for manipulating naclports packages in python.
This library can be used to build tools for working with naclports
packages. For example, it is used by 'update_mirror.py' to iterate
through all packages and mirror them on commondatastorage.
"""
import optparse
import os
import urlparse
import shlex
import shutil
import subprocess
import sys
import tempfile
import sha1check
MIRROR_URL = 'http://commondatastorage.googleapis.com/nativeclient-mirror/nacl'
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
NACLPORTS_ROOT = os.path.dirname(SCRIPT_DIR)
OUT_DIR = os.path.join(NACLPORTS_ROOT, 'out')
ARCH = os.environ.get('NACL_ARCH', 'i686')
BUILD_ROOT = os.path.join(OUT_DIR, 'repository')
ARCHIVE_ROOT = os.path.join(OUT_DIR, 'tarballs')
NACL_SDK_ROOT = os.environ.get('NACL_SDK_ROOT')
# TODO(sbc): use this code to replace the bash logic in build_tools/common.sh
class Error(Exception):
pass
class Package(object):
"""Representation of a single naclports package.
Package objects correspond to folders on disk which
contain a 'pkg_info' file.
"""
def __init__(self, pkg_root):
self.root = os.path.abspath(pkg_root)
info = os.path.join(pkg_root, 'pkg_info')
keys = []
self.URL_FILENAME = None
self.URL = None
self.LICENSE = None
if not os.path.exists(info):
raise Error('Invalid package folder: %s' % pkg_root)
with open(info) as f:
for i, line in enumerate(f):
if line[0] == '#':
continue
if '=' not in line:
raise Error('Invalid pkg_info line %d: %s' % (i + 1, pkg_root))
key, value = line.split('=', 1)
key = key.strip()
value = shlex.split(value.strip())[0]
keys.append(key)
setattr(self, key, value)
assert 'PACKAGE_NAME' in keys
def GetBasename(self):
basename = os.path.splitext(self.GetArchiveFilename())[0]
if basename.endswith('.tar'):
basename = os.path.splitext(basename)[0]
return basename
def __cmp__(self, other):
return cmp(self.PACKAGE_NAME, other.PACKAGE_NAME)
def GetBuildLocation(self):
package_dir = getattr(self, 'PACKAGE_DIR', self.PACKAGE_NAME)
return os.path.join(BUILD_ROOT, package_dir)
def GetArchiveFilename(self):
if self.URL_FILENAME:
return self.URL_FILENAME
elif self.URL:
return os.path.basename(urlparse.urlparse(self.URL)[2])
def DownloadLocation(self):
archive = self.GetArchiveFilename()
if not archive:
return
return os.path.join(ARCHIVE_ROOT, archive)
def Verify(self, verbose=False):
if not self.GetArchiveFilename():
print "no archive: %s" % self.PACKAGE_NAME
return True
self.Download()
olddir = os.getcwd()
sha1file = os.path.join(self.root, self.PACKAGE_NAME + '.sha1')
try:
os.chdir(ARCHIVE_ROOT)
with open(sha1file) as f:
try:
filenames = sha1check.VerifyFile(f, False)
print "verified: %s" % (filenames)
except sha1check.Error as e:
print "verification failed: %s: %s" % (sha1file, str(e))
return False
finally:
os.chdir(olddir)
return True
def Extract(self):
self.ExtractInto(BUILD_ROOT)
def ExtractInto(self, output_path):
"""Extract the package archive into the given location.
This method assumes the package has already been downloaded.
"""
if not os.path.exists(output_path):
os.makedirs(output_path)
new_foldername = os.path.dirname(self.GetBuildLocation())
if os.path.exists(os.path.join(outp | ut_path, new_foldern | ame)):
return
tmp_output_path = tempfile.mkdtemp(dir=OUT_DIR)
try:
archive = self.DownloadLocation()
ext = os.path.splitext(archive)[1]
if ext in ('.gz', '.tgz', '.bz2'):
cmd = ['tar', 'xf', archive, '-C', tmp_output_path]
elif ext in ('.zip',):
cmd = ['unzip', '-q', '-d', tmp_output_path, archive]
else:
raise Error('unhandled extension: %s' % ext)
print cmd
subprocess.check_call(cmd)
src = os.path.join(tmp_output_path, new_foldername)
dest = os.path.join(output_path, new_foldername)
os.rename(src, dest)
finally:
shutil.rmtree(tmp_output_path)
def GetMirrorURL(self):
return MIRROR_URL + '/' + self.GetArchiveFilename()
def Enabled(self):
if hasattr(self, 'LIBC'):
if os.environ.get('NACL_GLIBC') == '1':
if self.LIBC != 'glibc':
raise Error('Package cannot be built with glibc.')
else:
if self.LIBC != 'newlib':
raise Error('Package cannot be built with newlib.')
if hasattr(self, 'DISABLED_ARCH'):
arch = os.environ.get('NACL_ARCH', 'x86_64')
if arch == self.DISABLED_ARCH:
raise Error('Package is disabled for current arch: %s.' % arch)
if hasattr(self, 'BUILD_OS'):
sys.path.append(os.path.join(NACL_SDK_ROOT, 'tools'))
import getos
if getos.GetPlatform() != self.BUILD_OS:
raise Error('Package can only be built on: %s.' % self.BUILD_OS)
def Download(self):
filename = self.DownloadLocation()
if not filename or os.path.exists(filename):
return
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
try:
mirror = self.GetMirrorURL()
print 'Downloading: %s [%s]' % (mirror, filename)
cmd = ['wget', '-O', filename, mirror]
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
print 'Downloading: %s [%s]' % (self.URL, filename)
cmd = ['wget', '-O', filename, self.URL]
subprocess.check_call(cmd)
def PackageIterator(folders=None):
"""Iterator which yield a Package object for each
naclport package."""
if not folders:
folders = [os.path.join(NACLPORTS_ROOT, 'ports')]
for folder in folders:
for root, dirs, files in os.walk(folder):
if 'pkg_info' in files:
yield Package(root)
def main(args):
try:
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', action='store_true',
help='Output extra information.')
parser.add_option('-C', dest='dirname', default='.',
help='Change directory before executing commands.')
options, args = parser.parse_args(args)
if not args:
parser.error("You must specify a build command")
if len(args) > 1:
parser.error("More than one command specified")
command = args[0]
if not options.dirname:
options.dirname = '.'
if not NACL_SDK_ROOT:
Error("$NACL_SDK_ROOT not set")
p = Package(options.dirname)
if command == 'download':
p.Download()
elif command == 'check':
pass # simply check that the package is valid.
elif command == 'enabled':
p.Enabled()
elif command == 'verify':
p.Verify()
except Error as e:
sys.stderr.write('naclports: %s\n' % e)
return 1
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
from ubuntutweak.janitor import JanitorCachePlugin
class ChromeCachePlugin(JanitorCachePlugin):
__title__ = _('Chrome Cache')
__category__ = 'application' |
root_path = '~/.cache/google-chrome/Default'
class ChromiumCachePlugin(JanitorCachePlugin):
__title__ = _('Chromium Cache')
__category__ = 'application'
root_path = '~/.cac | he/chromium/Default'
|
#!/usr/bin/env python
# encoding: utf-8
import logging
import math
import time
from django.utils import timezone
import django
from modularodm import Q
from oauthlib.oauth2 import OAuth2Error
from dateutil.relativedelta import relativedelta
django.setup()
from framework.celery_tasks import app as celery_app
from scripts import utils as scripts_utils
from website.app import init_app
from addons.box.models import Provider as Box
from addons.googledrive.models import GoogleDriveProvider
from addons.mendeley.models import Mendeley
from osf.models import ExternalAccount
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
PROVIDER_CLASSES = (Box, GoogleDriveProvider, Mendeley, )
def look_up_provider(addon_short_name):
for Provider in PROVIDER_CLASSES:
if Provider.short_name == addon_short_name:
return Provider
return None
def get_targets(delta, addon_short_name):
# NOTE: expires_at is the access_token's expiration date,
# NOT the refresh token's
return ExternalAccount.find(
Q('expires_at', 'lt', timezone.now() - delta) &
Q('date_last_refreshed', 'lt', timezone.now() - delta) &
Q('provider', 'eq', addon_short_name)
)
def main(delta, Provider, rate_limit, dry_run):
allowance = rate_limit[0]
last_call = time.time()
for record in get_targets(delta, Provider.short_name):
if Provider(record).has_expired_credentials:
logger.info(
'Found expired record {}, skipping'.format(record.__repr__())
)
continue
logger.info(
'Refreshing tokens on record {0}; expires at {1}'.format(
record.__repr__(),
record.expires_at.strftime('%c')
)
)
if not dry_run:
if allowance < 1:
try:
time.sleep(rate_limit[1] | - (time.time() - last_call))
except (ValueError, IOError):
pass # Value/IOError indicates negative sleep time in Py 3.5/2 | .7, respectively
allowance = rate_limit[0]
allowance -= 1
last_call = time.time()
success = False
try:
success = Provider(record).refresh_oauth_key(force=True)
except OAuth2Error as e:
logger.error(e)
else:
logger.info(
'Status of record {}: {}'.format(
record.__repr__(),
'SUCCESS' if success else 'FAILURE')
)
@celery_app.task(name='scripts.refresh_addon_tokens')
def run_main(addons=None, rate_limit=(5, 1), dry_run=True):
"""
:param dict addons: of form {'<addon_short_name>': int(<refresh_token validity duration in days>)}
:param tuple rate_limit: of form (<requests>, <seconds>). Default is five per second
"""
init_app(set_backends=True, routes=False)
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
for addon in addons:
days = math.ceil(int(addons[addon])*0.75)
delta = relativedelta(days=days)
Provider = look_up_provider(addon)
if not Provider:
logger.error('Unable to find Provider class for addon {}'.format(addon))
else:
main(delta, Provider, rate_limit, dry_run=dry_run)
|
nalyze the performance
# of a network.
class Metrics(object):
## Calculate average throughput as: total_bytes_rcvd / duration.
#
# @param pkts_list An iterator object in the format [(timestamp, size),]
# @param duration Time duration (in s) over which thruput is to be computed. Typically it is the simulation period.
# @return Average throughput in Kbps; return -1 if duration is not positive
@staticmethod
def average_throughput(pkts_list, duration):
#print 'Average throughput'
avg_thruput = 0
start = -1
stop = 0
if pkts_list:
for record in pkts_list:
#print record
try:
avg_thruput += long(record[1])
if start == -1:
start = float(record[0])
stop = float(record[0])
#print record[0], record[1]
except IndexError:
pass
if duration <= 0:
duration = stop - start + 0.00000001
#print 'duration:', duration
avg_thruput = 8 * float(avg_thruput) / (1024 * duration) # Since pkt len is in bytes
return avg_thruput
@staticmethod
## Calculate instantaneous throughput as total bytes_rcvd at each time instant.
#
# <b>Logic</b>: To determine total bytes received at any instant, say, at t = 5, sum
# up sizes of all packets received in the interval 5.00000... to 5.99999...
#
# This procedure is repeated for all the time instances.
# @param pkts_list An iterator object in the format [(timestamp, size),]
# @return A list in the form [(time_instance, total_Kbytes),]
def instantaneous_throughput(pkts_list=None):
#print 'Instantaneous throughput'
result = []
start_time = -1 # Anything less than 0
this_instance = 0
bytes_this_instance = 0
#i_duration = long(duration)
if pkts_list:
for record in pkts_list:
try:
if start_time < 0: # This is the first record encountered
start_time = float(record[0])
#print start_time
this_instance = int(start_time)
#print this_instance
bytes_this_instance = long(record[1])
continue
cur_time = float(record[0])
if this_instance < cur_time and\
cur_time < (this_instance + 1):
bytes_this_instance += long(record[1])
else:
result.append( (this_instance, bytes_this_instance * 8 / 1024) )
this_instance += 1
bytes_this_instance = long(record[1])
except IndexError:
pass
# Append the last record
result.append( (this_instance, bytes_this_instance * 8 / 1024) )
return result
@staticmethod
def cumulative_bytes_received(pkts_list=None):
#print 'Cumulative plot of bytes received'
result = []
start_time = -1 # Anything less than 0
this_instance = 0
bytes_this_instance = 0
if pkts_list:
for record in pkts_list:
try:
if start_time < 0:
start_time = float(record[0])
this_instance = int(start_time)
bytes_this_instance = long(record[1])
continue
cur_time = float(record[0])
bytes_this_instance += long(record[1])
if this_instance < cur_time and\
cur_time < (this_instance + 1):
continue
else:
result.append( (this_instance, ( float(bytes_this_instance / 1024) ) * 8 ) )
this_instance += 1
#print cur_time
except IndexError:
pass
# Append the last record
result.append( (this_instance, ( float(bytes_this_instance / 1024) ) * 8 ) )
return result
@staticmethod
## Calculate throughput as total bytes_rcvd upto current instance of time / total duration upto current instance
# @param pkts_list An iterator object in the format [(timestamp, size),]
# @return A list in the form [(time_instance, total_bytes),]
def cumulative_throughput(pkts_list=None):
#print 'Current throughput'
result = []
start_time = -1 # Anything less than 0
this_instance = 0
bytes_this_instance = 0
if pkts_list:
for record in pkts_list:
try:
if start_time < 0:
start_time = float(record[0])
this_instance = int(start_time)
bytes_this_instance = long(record[1])
continue
cur_time = float(record[0])
bytes_this_instance += long(record[1])
if this_instance < cur_time and\
cur_time < (this_instance + 1):
continue
else:
result.append( (this_instance, ( float(bytes_this_instance / 1024) / ( this_instance - int(start_time) + 1) ) * 8 ) )
this_instance += 1
except IndexError:
pass
# Append the last record
result.append( (this_instance, ( float(bytes_this_instance / 1024) / ( this_instance - int(start_time) + 1) ) * 8 ) )
return result
| ## Return the end to end delay for each packet moving between a source and
# destination node, and identified by a flow ID. The delay is computed as
# the difference between sending t | ime of the packet at source node and
# receiving time of the packet at the destination node.
# @param send_pkts_list An iterator object in the format [(seq_num, timestamp)]
# @param rcvd_pkts_list An iterator object in the format [(seq_num, timestamp)]
# @return A list in the form [(seq_num, delay),]
@staticmethod
def end2end_delay(send_pkts_list=None, rcvd_pkts_list=None):
#print 'End to end delay'
send_pkts = {}
rcvd_pkts = {}
for pkt in send_pkts_list:
send_pkts[pkt[0]] = float(pkt[1])
for pkt in rcvd_pkts_list:
rcvd_pkts[pkt[0]] = float(pkt[1])
pkt_delay = []
for seq_num in send_pkts:
if seq_num in rcvd_pkts:
if rcvd_pkts[seq_num] >= send_pkts[seq_num]:
delay = rcvd_pkts[seq_num] - send_pkts[seq_num]
pkt_delay.append( (seq_num, delay) )
# Sort pkt_delay in integer order of seq_num -- otherwise displayed
# graph would be garbage
pkt_delay = [ ( int(e[0]), e[1], ) for e in pkt_delay ]
pkt_delay.sort()
return pkt_delay
# @param send_pkts_list An iterator object in the format [seq_num]
@staticmethod
def packet_retransmissions(send_pkts_list=None):
#print 'Packet retransmissions'
send_pkts = {}
send_pkts_list = [ int(item) for item in send_pkts_list ]
for seq_num in send_pkts_list:
if seq_num in send_pkts:
send_pkts[seq_num] += 1
else:
send_pkts[seq_num] = 0
pkt_retransmits = []
for (seq_num, retransmits) in send_pkts.items():
if retransmits != 0:
pkt_retransmits.append( (seq_num, retransmits) )
pkt_retransmits.sort()
return pkt_ret |
# -*- coding: utf-8 -*-
"""
Pygments unit tests
~~~~~~~~~~~~~~~~~~
Usage::
python run.py [testfile ...]
:copyright: Copyright 2006-2012 by the Pygments team, see AUTHOR | S.
:license: BSD, see LICENSE for details.
"""
import sys, os
if sys.version_info >= (3,):
# copy test suite over to "build/lib" and convert it
print ('Copying and converting sources to build/lib/test...')
from distutils.util import copydir_run_2to3
testroot = os.path.dirname(__file__)
newroot = os.path.join(testroot, '..', 'build/lib/test')
copydir_run_2to3(testroot, newroot | )
# make nose believe that we run from the converted dir
os.chdir(newroot)
else:
# only find tests in this directory
os.chdir(os.path.dirname(__file__))
try:
import nose
except ImportError:
print ('nose is required to run the Pygments test suite')
sys.exit(1)
try:
# make sure the current source is first on sys.path
sys.path.insert(0, '..')
import pygments
except ImportError:
print ('Cannot find Pygments to test: %s' % sys.exc_info()[1])
sys.exit(1)
else:
print ('Pygments %s test suite running (Python %s)...' %
(pygments.__version__, sys.version.split()[0]))
nose.main()
|
"""
Caching instances via ``related_name``
--------------------------------------
``cache_relation`` adds utility methods to | a model to obtain ``related_name``
instances via the cache.
Usage
~~~~~
::
from django.db import models
from django.contrib.auth.models import User
class Foo(models.Model):
user = models.OneToOneField(
| User,
primary_key=True,
related_name='foo',
)
name = models.CharField(max_length=20)
cache_relation(User.foo)
::
>>> user = User.objects.get(pk=1)
>>> user.foo_cache # Cache miss - hits the database
<Foo: >
>>> user = User.objects.get(pk=1)
>>> user.foo_cache # Cache hit - no database access
<Foo: >
>>> user = User.objects.get(pk=2)
>>> user.foo # Regular lookup - hits the database
<Foo: >
>>> user.foo_cache # Special-case: Will not hit cache or database.
<Foo: >
Accessing ``user_instance.foo_cache`` (note the "_cache" suffix) will now
obtain the related ``Foo`` instance via the cache. Accessing the original
``user_instance.foo`` attribute will perform the lookup as normal.
Invalidation
~~~~~~~~~~~~
Upon saving (or deleting) the instance, the cache is cleared. For example::
>>> user = User.objects.get(pk=1)
>>> foo = user.foo_cache # (Assume cache hit from previous session)
>>> foo.name = "New name"
>>> foo.save() # Cache is cleared on save
>>> user = User.objects.get(pk=1)
>>> user.foo_cache # Cache miss.
<Foo: >
Manual invalidation may also be performed using the following methods::
>>> user_instance.foo_cache_clear()
>>> User.foo_cache_clear_fk(user_instance_pk)
Manual invalidation is required if you use ``.update()`` methods which the
``post_save`` and ``post_delete`` hooks cannot intercept.
Support
~~~~~~~
``cache_relation`` currently only works with ``OneToOneField`` fields. Support
for regular ``ForeignKey`` fields is planned.
"""
from django.db.models.signals import post_save, post_delete
from .core import get_instance, delete_instance
def cache_relation(descriptor, timeout=None):
"""
Adds utility methods to a model to obtain related
model instances via a cache.
"""
rel = descriptor.related
related_name = '%s_cache' % rel.field.related_query_name()
@property
def get(self):
"""
Returns the cached value of the related model if found
in the cache. Otherwise gets and caches the related model.
"""
# Always use the cached "real" instance if available
try:
return getattr(self, descriptor.cache_name)
except AttributeError:
pass
# Lookup cached instance
try:
return getattr(self, '_%s_cache' % related_name)
except AttributeError:
pass
instance = get_instance(rel.model, self.pk, timeout)
setattr(self, '_%s_cache' % related_name, instance)
return instance
setattr(rel.parent_model, related_name, get)
# Clearing cache
def clear(self):
"""
Clears the cache of all related models of self.
"""
delete_instance(rel.model, self)
@classmethod
def clear_pk(cls, *instances_or_pk): # pylint: disable=unused-argument
"""
Clears the cache of all related models of
the provided instances_or_pk.
"""
delete_instance(rel.model, *instances_or_pk)
def clear_cache(sender, instance, *args, **kwargs): # pylint: disable=unused-argument
"""
Clears the cache of all related models of the
given instance.
"""
delete_instance(rel.model, instance)
setattr(rel.parent_model, '%s_clear' % related_name, clear)
setattr(rel.parent_model, '%s_clear_pk' % related_name, clear_pk)
post_save.connect(clear_cache, sender=rel.model, weak=False)
post_delete.connect(clear_cache, sender=rel.model, weak=False)
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product.date_added'
db.add_column(u'clone_product', 'date_added',
self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, default=datetime.datetime(2014, 8, 3, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Product.date_added'
db.delete_column(u'clone_product', 'date_added')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields. | related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'d | efault': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'clone.product': {
'Meta': {'object_name': 'Product'},
'base_price': ('django.db.models.fields.FloatField', [], {}),
'currency': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'username': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['clone'] |
# found at http://stackoverflow.com/questions/855759/python-try-else
# The statements in the else block are executed if execution falls off
# the bottom of the try, i.e. if there was no exception.
try:
operation_that_can_throw_ioerror()
except IOError:
handle_the_exception_somehow()
else:
# we don't want to catch t | he IOError if it's raised
another_operation_that_can_throw_ioerror()
final | ly:
something_we_always_need_to_do()
# The else lets you make sure:
#
# * another_operation_that_can_throw_ioerror() is only run if there's no exception,
# * it's run before the finally block, and
# * any IOErrors it raises aren't caught here
|
nd,
* `Gamma` is the [gamma function](
https://en.wikipedia.org/wiki/Gamma_function), and,
* `||y||**2` denotes the [squared Euclidean norm](
https://en.wikipedia.org/wiki/Norm_(mathematics)#Euclidean_norm) of `y`.
The VectorStudentT distribution is a member of the [location-scale family](
https://en.wikipedia.org/wiki/Location-scale_family), i.e., it can be
constructed as,
```none
X ~ StudentT(df, loc=0, scale=1)
Y = loc + scale * X
```
Notice that the `scale` matrix has semantics closer to std. deviation than
covariance (but it is not std. deviation).
This distribution is an Affine transformation of iid
[Student's t-distributions](
https://en.wikipedia.org/wiki/Student%27s_t-distribution)
and should not be confused with the [Multivariate Student's t-distribution](
https://en.wikipedia.org/wiki/Multivariate_t-distribution). The
traditional Multivariate Student's t-distribution is type of
[elliptical distribution](
https://en.wikipedia.org/wiki/Elliptical_distribution); it has PDF:
```none
pdf(x; df, mu, Sigma) = (1 + ||y||**2 / df)**(-0.5 (df + k)) / Z
where,
y = inv(Sigma) (x - mu)
Z = abs(det(Sigma)) sqrt(df pi)**k Gamma(0.5 df) / Gamma(0.5 (df + k))
```
Notice that the Multivariate Student's t-distribution uses `k` where the
Vector Student's t-distribution has a `1`. Conversely the Vector version has a
broader application of the power-`k` in the normalization constant.
#### Examples
A single instance of a "Vector Student's t-distribution" is defined by a mean
vector of length `k` and a scale matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
import tensorflow_probability as tfp
tfd = tfp.distributions
# Initialize a single 3-variate vector Student's t-distribution.
mu = [1., 2, 3]
chol = [[1., 0, 0.],
[1, 3, 0],
[1, 2, 3]]
vt = tfd.VectorStudentT(df=2, loc=mu, scale_tril=chol)
# Evaluate this on an observation in R^3, returning a scalar.
vt.prob([-1., 0, 1])
# Initialize a batch of two 3-variate vector Student's t-distributions.
mu = [[1., 2, 3],
[11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
vt = tfd.VectorStudentT(loc=mu, scale_tril=chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1],
[-11, 0, 11]]
vt.prob(x)
```
For more examples of how to construct the `scale` matrix, see the
`tf.contrib.distributions.bijectors.Affine` docstring.
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self,
df,
loc=None,
scale_identity_multiplier=None,
scale_diag=None,
scale_tril=None,
scale_perturb_factor=None,
scale_perturb_diag=None,
validate_args=False,
allow_nan_stats=True,
name="VectorStudentT"):
"""Instantiates the vector Student's t-distributions on `R^k`.
The `batch_shape` is the broadcast between `df.batch_shape` and
`Affine.batch_shape` where `Affine` is constructed from `loc` and
`scale_*` arguments.
The `event_shape` is the event shape of `Affine.event_shape`.
Args:
df: Floating-point `Tensor`. The degrees of freedom of the
distribution(s). `df` must contain only positive values. Must be
scalar if `loc`, `scale_*` imply non-scalar batch_shape or must have the
same `batch_shape` implied by `loc`, `scale_*`.
| loc: Floating-point `Tensor`. If this is set to `None`, no `loc` is
applied.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag=scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is ad | ded to `scale`.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k], which represents a k x k
diagonal matrix. When `None` no diagonal term is added to `scale`.
scale_tril: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ..., k, k], which represents a k x k
lower triangular matrix. When `None` no `scale_tril` term is added to
`scale`. The upper triangular elements above the diagonal are ignored.
scale_perturb_factor: Floating-point `Tensor` representing factor matrix
with last two dimensions of shape `(k, r)`. When `None`, no rank-r
update is added to `scale`.
scale_perturb_diag: Floating-point `Tensor` representing the diagonal
matrix. `scale_perturb_diag` has shape [N1, N2, ..., r], which
represents an r x r Diagonal matrix. When `None` low rank updates will
take the form `scale_perturb_factor * scale_perturb_factor.T`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
"""
parameters = dict(locals())
graph_parents = [df, loc, scale_identity_multiplier, scale_diag,
scale_tril, scale_perturb_factor, scale_perturb_diag]
with ops.name_scope(name) as name:
with ops.name_scope("init", values=graph_parents):
# The shape of the _VectorStudentT distribution is governed by the
# relationship between df.batch_shape and affine.batch_shape. In
# pseudocode the basic procedure is:
# if df.batch_shape is scalar:
# if affine.batch_shape is not scalar:
# # broadcast distribution.sample so
# # it has affine.batch_shape.
# self.batch_shape = affine.batch_shape
# else:
# if affine.batch_shape is scalar:
# # let affine broadcasting do its thing.
# self.batch_shape = df.batch_shape
# All of the above magic is actually handled by TransformedDistribution.
# Here we really only need to collect the affine.batch_shape and decide
# what we're going to pass in to TransformedDistribution's
# (override) batch_shape arg.
affine = bijectors.Affine(
shift=loc,
scale_identity_multiplier=scale_identity_multiplier,
scale_diag=scale_diag,
scale_tril=scale_tril,
scale_perturb_factor=scale_perturb_factor,
scale_perturb_diag=scale_perturb_diag,
validate_args=validate_args)
distribution = student_t.StudentT(
df=df,
loc=array_ops.zeros([], dtype=affine.dtype),
scale=array_ops.ones([], dtype=affine.dtype))
batch_shape, override_event_shape = (
distribution_util.shapes_from_loc_and_scale(
affine.shift, affine.scale))
override_batch_shape = distribution_util.pick_vector(
distribution.is_scalar_batch(),
batch_shape,
constant_op.constant([], dtype=dtypes.int32))
super(_VectorStudentT, self).__init__(
distribution=distribution,
bijector=affine,
batch_shape=override_batch_shape,
event_shape=override_event_shape,
validate_args=validate_args,
name=name)
self._parameters = |
od(ast.Mod):
def prepare(self):
pass
def print_c(self):
return '%'
class C_LShift(ast.LShift):
def prepare(self):
pass
def print_c(self):
return '<<'
class C_RShift(ast.RShift):
def prepare(self):
pass
def print_c(self):
return '>>'
class C_BitOr(ast.BitOr):
def prepare(self):
pass
def print_c(self):
return '|'
class C_BitXor(ast.BitXor):
def prepare(self):
pass
def print_c(self):
return '^'
class C_BitAnd(ast.BitAnd):
def prepare(self):
pass
def print_c(self):
return '&'
class C_BoolOp(ast.BoolOp):
def prepare(self):
pass
def print_c(self):
asC = '(' + self.values.pop(0).print_c()
for value in self.values:
asC += ' ' + self.op.print_c() + ' '
asC += value.print_c()
return asC + ')'
class C_And(ast.And):
def prepare(self):
pass
def print_c(self):
return '&&'
class C_Or(ast.Or):
def prepare(self):
pass
def print_c(self):
return '||'
class C_Compare(ast.Compare):
def prepare(self):
pass
def print_c(self):
asC = ''
self.comparators.insert(0,self.left)
addAnd = False
for i,op in enumerate(self.ops):
if addAnd:
asC += ' && '
else:
addAnd = True
asC += '(' + self.comparators[i].print_c() + ' '
asC += op.print_c()
asC += ' ' + self.comparators[i + 1].print_c() + ')'
return asC
class C_Eq(ast.Eq):
def prepare(self):
pass
def print_c(self):
return '=='
class C_NotEq(ast.NotEq):
def prepare(self):
pass
def print_c(self):
return '!='
class C_Lt(ast.Lt):
def prepare(self):
pass
def print_c(self):
return '<'
class C_LtE(ast.LtE):
def prepare(self):
pass
def print_c(self):
return '<='
class C_Gt(ast.Gt):
def prepare(self):
pass
def print_c(self):
return '>'
class C_GtE(ast.GtE):
def prepare(self):
pass
def print_c(self):
return '>='
class C_Call(ast.Call):
def prepare(self):
pass
def print_args(self):
asC = ''
for arg in self.args:
asC += ', '
asC += arg.print_c()
return asC
def print_c(self):
if self.func.print_c() in renames:
return module_rename(self)
if isinstance(self.func,C_Attribute):
# Convert OOP calls to regular function calls
self.args.insert(0,self.func.value)
self.func = C_Name(self.func.attr,None)
asC = self.func.print_c() + '('
useComma = False
for arg in self.args:
if useComma:
asC += ', '
else:
useComma = True
asC += arg.print_c()
asC += ')'
return asC
class C_IfExp(ast.IfExp):
def prepare(self):
pass
def print_c(self):
asC = '(' + self.test.print_c()
asC += ' ? ' + self.body.print_c()
asC += ' : ' + self.orelse.print_c() + ')'
return asC
class C_Attribute(ast.Attribute):
def prepare(self):
pass
def print_c(self):
return self.value.print_c() + '.' + self.attr
class C_Subscript(ast.Subscript):
def prepare(self):
pass
def print_c(self):
return self.value.print_c() + '[' + self.slice.print_c() + ']'
class C_Index(ast.Index):
def prepare(self):
pass
def print_c(self):
return self.value.print_c()
class C_Assign(ast.Assign):
def prepare(self):
pass
def print_c(self):
asC = ''
for target in self.targets:
asC += target.print_c() + ' = '
asC += self.value.print_c()
return asC
if "AnnAssign" in ast.__dict__:
class C_AnnAssign(ast.AnnAssign):
def prepare(self):
pass
def print_c(self):
asC = self.annotation.print_c() + ' '
asC += self.target.print_c()
if isinstance(self.value, C_Call) and self.value.fun | c.print_c() in classNames:
asC += ';\n'
asC += self.value.func.print_c() + '___init__('
asC += se | lf.target.print_c()
asC += self.value.print_args() + ')'
else:
if self.value:
asC += ' = ' + self.value.print_c()
return asC
class C_AugAssign(ast.AugAssign):
def prepare(self):
pass
def print_c(self):
asC = self.target.print_c() + ' '
asC += self.op.print_c() + '= '
asC += self.value.print_c()
return asC
class C_Assert(ast.Assert):
def prepare(self):
pass
def print_c(self):
return 'VERIFY(' + self.test.print_c() + ')'
class C_Pass(ast.Pass):
def prepare(self):
pass
def print_c(self):
return ''
class C_Import(ast.Import):
def prepare(self):
pass
def print_c(self):
importName = '/'.join(self.names[0].name.split('.'))
return '#include ' + importName + '.c\n'
class C_If(ast.If):
def prepare(self):
pass
def print_c(self):
asC = 'if ('
asC += self.test.print_c()
if sameLineBraces:
asC += ') {\n'
else:
asC += ')\n{\n'
for childNode in self.body:
try:
unindented = childNode.print_c()
unindented = '\n'.join([indent + x for x in unindented.split('\n')])
if not unindented.endswith('}'):
unindented += ';'
unindented += '\n'
asC += unindented
except Exception as e:
print(traceback.format_exc())
print(ast.dump(childNode))
return asC
asC += '}'
if self.orelse:
if sameLineBraces:
asC += ' else {\n'
else:
asC += '\nelse\n{\n'
for childNode in self.orelse:
try:
unindented = childNode.print_c()
unindented = '\n'.join([indent + x for x in unindented.split('\n')])
if not unindented.endswith('}'):
unindented += ';'
unindented += '\n'
asC += unindented
except Exception as e:
print(traceback.format_exc())
print(ast.dump(childNode))
return asC
asC += '}'
return asC
class C_For(ast.For):
def prepare(self):
pass
def print_c(self):
# Only supports for _ in range() for now
asC = ''
var = self.target.print_c()
low = '0'
step = '1'
if len(self.iter.args) > 1:
low = self.iter.args[0].print_c()
high = self.iter.args[1].print_c()
if len(self.iter.args) > 2:
step = self.iter.args[2].print_c()
else:
high = self.iter.args[0].print_c()
asC += 'for (' + var + ' = '
asC += low
asC += '; ' + var + ' < ' + high + '; ' + var + ' += ' + step
if sameLineBraces:
asC += ') {\n'
else:
asC += ')\n{\n'
for childNode in self.body:
try:
unindented = childNode.print_c()
unindented = '\n'.join([indent + x for x in unindented.split('\n')])
if not unindented.endswith('}'):
unindented += ';'
unindented += '\n'
asC += unindented
except Exception as e:
print(traceback.format_exc())
print(ast.dump(childNode))
return asC
return asC + '}'
class C_While(ast.While):
def prepare(self):
pass
def print_c(self):
asC = 'while (' + self.test.print_c()
if sameLineBraces:
asC += ') {\n'
else:
asC += ')\n{\n'
for childNode in self.body:
try:
unindented = childNode.print_c()
unindented = '\n'.join([indent + x for x in unindented.split('\n')])
if not unindented.endswith('}'):
unindented += ';'
unindented += '\n'
asC += unindented
except Exception as e:
print(traceback.format_exc())
print(ast.dump(childNode))
return asC
return asC + '}'
class C_Break(ast.Break):
def prepare(self):
pass
def print_c(self):
return 'break'
class C_Continue(ast.Continue):
def prepare(self):
pass
def print_c(self):
return 'continue'
class C_Return(ast.Return):
def prepare(self):
pass
def print_c(self):
return 'return ' + self.value.print_c()
class C_ClassDef(ast.ClassDef):
def prepare(self):
classNames.append(self.name)
def print_c(self):
asC = '/*** Class: ' + self.name + |
flags = h5py.h5p.CRT_ORDER_TRACKED | h5py.h5p.CRT_ORDER_INDEXED
gcpl.set_link_creation_order(flags)
name = self.name.encode("utf-8")
gid = h5py.h5g.create(self._parent.id, name, gcpl=gcpl)
self.group = h5py.Group(gid)
def create_link(self, target, name):
self._create_h5obj()
if name in self.group:
del self.group[name]
self.group[name] = target._h5group.group
@classmethod
def create_from_h5obj(cls, h5obj):
parent = h5obj.parent
name = h5obj.name.split("/")[-1]
if isinstance(h5obj, h5py.Group):
return cls(parent, name)
elif isinstance(h5obj, h5py.Dataset):
return H5DataSet(parent, name)
else:
raise ValueError("Invalid object: "
"{} must be either h5py.Group of h5py.Dataset.")
def open_group(self, name, create=False):
"""
Returns a new H5Group with the given name contained in the current
group. If the current group does not exist in the file,
it is automatically created.
:param name: the name of the group
:param create: creates the child group in the file if it does not exist
:return: a new H5Group object
"""
self._create_h5obj()
return H5Group(self.group, name, create)
def create_dataset(self, name, shape, dtype):
"""
Creates a dataset object under the current group with a given name,
shape, and type.
:param name: the name of the dataset
:param shape: tuple representing the shape of the dataset
:param dtype: the type of the data for this dataset (DataType)
:return: a new H5DataSet object
"""
self._create_h5obj()
return H5DataSet(self.group, name, dtype, shape)
def get_dataset(self, name):
"""
Returns a contained H5DataSet object.
:param name: name of the dataset
:return: H5DataSet object
"""
notfound = KeyError("No DataSet named {} found.")
if self.group is None:
raise notfound
if name in self.group:
dset = sel | f.group[name]
return H5DataSet.create_from_h5obj(dset)
else:
| raise notfound
def write_data(self, name, data, dtype=None):
"""
Writes the data to a Dataset contained in the group with the
given name. Creates the Dataset if necessary.
:param name: name of the Dataset object
:param data: the data to write
:param dtype: optionally specify the data type, otherwise it will be
automatically determined by the data
"""
shape = np.shape(data)
if self.has_data(name):
dset = self.get_dataset(name)
dset.shape = shape
else:
if dtype is None:
dtype = DataType.get_dtype(data[0])
dset = self.create_dataset(name, shape, dtype)
dset.write_data(data)
def get_data(self, name):
"""
Returns the data contained in the dataset identified by 'name', or an
empty list if a dataset of that name does not exist in the Group.
:param name: The name of the dataset
:return: The data contained in the dataset as a numpy array or None
"""
if name not in self.group:
return []
dset = self.group[name]
# TODO: Error if dset is Group?
return dset[:]
def has_data(self, name):
"""
Return True if the Group contains a Dataset object with the given name.
:param name: name of Dataset
:return: True if Dataset exists in Group, False if it does not exist,
or exists and is not a Dataset
"""
if self.group.get(name, getclass=True) == h5py.Dataset:
return True
else:
return False
def has_by_id(self, id_or_name):
if not self.group:
return False
if util.is_uuid(id_or_name):
for item in self:
if item.get_attr("entity_id") == id_or_name:
return True
else:
return False
else:
return id_or_name in self.group
def get_by_id_or_name(self, id_or_name):
if util.is_uuid(id_or_name):
return self.get_by_id(id_or_name)
else:
return self.get_by_name(id_or_name)
def get_by_name(self, name):
if self.group and name in self.group:
return self.create_from_h5obj(self.group[name])
else:
raise ValueError("No item with name {} found in {}".format(
name, self.group.name
))
def get_by_id(self, id_):
if self.group:
for item in self:
if item.get_attr("entity_id") == id_:
return item
raise ValueError("No item with ID {} found in {}".format(
id_, self.name
))
def get_by_pos(self, pos):
if not self.group:
raise ValueError
# Using low level interface to specify iteration order
name, _ = self.group.id.links.iterate(lambda n: n,
idx_type=h5py.h5.INDEX_CRT_ORDER,
order=h5py.h5.ITER_INC,
idx=pos)
return self.get_by_name(name)
def delete(self, id_or_name):
if util.is_uuid(id_or_name):
name = self.get_by_id_or_name(id_or_name).name
else:
name = id_or_name
try:
del self.group[name]
except Exception:
raise ValueError("Error deleting {} from {}".format(name,
self.name))
# Delete if empty and non-root container
groupdepth = len(self.group.name.split("/")) - 1
if not len(self.group) and groupdepth > 1:
del self.parent.group[self.name]
# del self.group
self.group = None
def set_attr(self, name, value):
self._create_h5obj()
if value is None:
if name in self.group.attrs:
del self.group.attrs[name]
else:
self.group.attrs[name] = value
def get_attr(self, name):
if self.group is None:
return None
attr = self.group.attrs.get(name)
if isinstance(attr, bytes):
attr = attr.decode()
return attr
def find_children(self, filtr=None, limit=None):
result = []
start_depth = len(self.group.name.split("/"))
def match(name, obj):
curdepth = name.split("/")
if limit is not None and curdepth == start_depth + limit:
return None
h5grp = H5Group.create_from_h5obj(obj)
if filtr(h5grp):
result.append(h5grp)
self.group.visititems(match)
return result
@property
def file(self):
"""
An H5Group object which represents the file root.
:return: H5Group at '/'
"""
return H5Group(self.group.file, "/", create=False)
@property
def h5root(self):
"""
Returns the H5Group of the Block or top-level Section which contains
this object. Returns None if requested on the file root '/' or the
/data or /metadata groups.
:return: Top level object containing this group (H5Group)
"""
pathparts = self.group.name.split("/")
if len(pathparts) == 3:
return self
if self.group.name == "/":
return None
if len(pathparts) == 2:
return None
return self.parent.h5root
@property
def root(self):
"""
Returns the Block or top-level Section which contains this object.
Returns None if requested on the file root '/' or the /data or
/metadata groups.
:return: Top level object containing this group (Block or Section)
"""
|
M,
OperandType.IMM_FULL : O_IMM,
OperandType.IMM32 : O_IMM,
OperandType.SEIMM8 : O_IMM,
OperandType.IMM16_1 : O_IMM_1,
OperandType.IMM8_1 : O_IMM_1,
OperandType.IMM8_2 : O_IMM_2,
OperandType.REG8 : O_REG,
OperandType.REG16 : O_REG,
OperandType.REG_FULL : O_REG,
OperandType.REG32 : O_REG,
OperandType.REG32_64 : O_REG,
OperandType.FREG32_64_RM : O_REG,
OperandType.RM8 : O_MEM,
OperandType.RM16 : O_MEM,
OperandType.RM_FULL : O_MEM,
OperandType.RM32_64 : O_MEM,
OperandType.RM16_32 : O_MEM,
OperandType.FPUM16 : O_MEM,
OperandType.FPUM32 : O_MEM,
OperandType.FPUM64 : O_MEM,
OperandType.FPUM80 : O_MEM,
OperandType.R32_M8 : O_MEM,
OperandType.R32_M16 : O_MEM,
OperandType.R32_64_M8 : O_MEM,
OperandType.R32_64_M16 : O_MEM,
OperandType.RFULL_M16 : O_MEM,
OperandType.CREG : O_REG,
OperandType.DREG : O_REG,
OperandType.SREG : O_REG,
OperandType.SEG : O_REG,
OperandType.ACC8 : O_REG,
OperandType.ACC16 : O_REG,
OperandType.ACC_FULL : O_REG,
OperandType.ACC_FULL_NOT64 : O_REG,
OperandType.MEM16_FULL : O_MEM,
OperandType.PTR16_FULL : O_PTR,
OperandType.MEM16_3264 : O_MEM,
OperandType.RELCB : O_PC,
OperandType.RELC_FULL : O_PC,
OperandType.MEM : O_MEM,
OperandType.MEM_OPT : O_MEM,
OperandType.MEM32 : O_MEM,
OperandType.MEM32_64 : O_MEM,
OperandType.MEM64 : O_MEM,
OperandType.MEM128 : O_MEM,
OperandType.MEM64_128 : O_MEM,
OperandType.MOFFS8 : O_MEM,
OperandType.MOFFS_FULL : O_MEM,
OperandType.CONST1 : O_IMM,
OperandType.REGCL : O_REG,
OperandType.IB_RB : O_REG,
OperandType.IB_R_FULL : O_REG,
OperandType.REGI_ESI : O_MEM,
OperandType.REGI_EDI : O_MEM,
OperandType.REGI_EBXAL : O_MEM,
OperandType.REGI_EAX : O_MEM,
OperandType.REGDX : O_REG,
OperandType.REGECX : O_REG,
OperandType.FPU_SI : O_REG,
OperandType.FPU_SSI : O_REG,
OperandType.FPU_SIS : O_REG,
OperandType.MM : O_REG,
OperandType.MM_RM : O_REG,
OperandType.MM32 : O_MEM,
OperandType.MM64 : O_MEM,
OperandType.XMM : O_REG,
OperandType.XMM_RM : O_REG,
OperandType.XMM16 : O_MEM,
OperandType.XMM32 : O_MEM,
OperandType.XMM64 : O_MEM,
OperandType.XMM128 : O_MEM,
OperandType.REGXMM0 : O_REG,
OperandType.RM32 : O_MEM,
OperandType.REG32_64_M8 : O_MEM,
OperandType.REG32_64_M16 : O_MEM,
OperandType.WREG32_64 : O_REG,
OperandType.WRM32_64 : O_REG,
OperandType.WXMM32_64 : O_MEM,
OperandType.VXMM : O_REG,
OperandType.XMM_IMM : O_IMM,
OperandType.YXMM : O_REG,
OperandType.YXMM_IMM : O_REG,
OperandType.YMM : O_REG,
OperandType.YMM256 : O_MEM,
OperandType.VYMM : O_REG,
OperandType.VYXMM : O_REG,
OperandType.YXMM64_256 : O_MEM,
OperandType.YXMM128_256 : O_MEM,
OperandType.LXMM64_128 : O_MEM,
OperandType.LMEM128_256 : O_MEM
}
flagsDict = {}
def CheckOTCollisions(ii):
""" Checks whether an instruction has two or more operands that use the same fields in the diStorm3 structure.
E.G: ENTER 0x10, 0x1 --> This instruction uses two OT_IMM, which will cause a collision and use the same field twice which is bougs. """
types = map(lambda x: _OPT2T[x], ii.operands)
# Regs cannot cause a collision, since each register is stored inside the operand itself.
for i in types:
if i != O_REG and types.count(i) > 1:
print "**WARNING: Operand type collision for instruction: " + ii.mnemonics[0], ii.tag
break
# This fucntion for certain flow control related instructions will set their type.
def CheckForFlowControl(ii):
if ii.mnemonics[0].find("CMOV") == 0:
ii.flowControl = FlowControl.CMOV
return
# Should I include SYSCALL ?
pairs = [
(["INT", "INT1", "INT 3", "INTO", "UD2"], FlowControl.INT),
(["CALL", "CALL FAR"], FlowControl.CALL),
(["RET", "IRET", "RETF"], FlowControl.RET),
(["SYSCALL", "SYSENTER", "SYSRET", "SYSEXIT"], FlowControl.SYS),
(["JMP", "JMP FAR"], FlowControl.UNC_BRANCH),
(["JCXZ", "JO", "JNO", "JB", "JAE", "JZ", "JNZ", "JBE", "JA", "JS", "JNS", "JP", "JNP", "JL", "JGE", "JLE", "JG", "LOOP", "LOOPZ", "LOOPNZ"], FlowControl.CND_BRANCH)
]
ii.flowControl = 0
for p in pairs:
if ii.mnemonics[0] in p[0]:
ii.flowControl = p[1]
return
def CheckWritableDestinationOperand(ii):
prefixes = ["MOV", "SET", "CMOV", "CMPXCHG"]
for i in prefixes:
if ii.mnemonics[0].find(i) == 0:
ii.flags |= InstFlag.DST_WR
return
mnemonics = [
"ADD", "OR", "ADC", "SBB", "AND", "SUB", "XOR", "INC", "DEC", "LEA", "XCHG",
"ROL", "ROR", "RCL", "RCR", "SHL", "SHR", "SAL", "SAR", "SHLD", "SHRD",
"NEG", "NOT", "MUL", "IMUL", "DIV", "IDIV",
"POP", "BTR", "BTS", "BTC", "XADD", "BSWAP",
"LZCNT", "MOVBE", "POPCNT", "CRC32", "SMSW"
]
for i in mnemonics:
if ii.mnemonics[0] in i:
ii.flags |= InstFlag.DST_WR
return
def FormatInstruction(ii):
""" Formats a string with all information relevant for diStorm InstInfo structure
or the InstInfoEx. These are | the internal structures diStorm uses for holding the instructions' information.
Using this structure diStorm knows how to format an opcode when it reads it from the stream.
An instruction information structure is found by its byte codes with a prefix of "II_".
So for example ADD EAX, Imm32 instruction is II_00.
Since there are several types of instructions information structures,
the tables which point to these non-default InstInfo structures, will have to cast the pointer. """
# T | here might be optional fields, if there's a 3rd operand or a second/third mnemonic.
optFields = ""
# Default type of structure is InstInfo.
type = "_InstInfo"
# Make sure the instruction can be fully represented using the diStorm3 _DecodeInst structure.
CheckOTCollisions(ii)
# Add flags for flow control instructions.
CheckForFlowControl(ii)
# Add flags for writable destination operand.
CheckWritableDestinationOperand(ii)
# Pad mnemonics to three, in case EXMNEMONIC/2 isn't used (so we don't get an exception).
mnems = TranslateMnemonics([None, ii.classType][(ii.flags & InstFlag.PSEUDO_OPCODE) == InstFlag.PSEUDO_OPCODE], ii.mnemonics) + ["0", "0"]
# Pad operands to atleast three (so we don't get an exception too, since there might be instructions with no operands at all).
ops = ii.operands + [OperandType.NONE, OperandType.NONE, OperandType.NONE, OperandType.NONE]
# Is it an extended structure?
if ii.flags & InstFlag.EXTENDED:
# Since there's a second and/or a third mnemonic, use the the InstInfoEx structure.
type = "_InstInfoEx"
flagsEx = 0
# Fix flagsEx to have the VEX flags, except PRE_VEX.
if ii.flags & InstFlag.PRE_VEX:
flagsEx = ii.flags >> InstFlag.FLAGS_EX_START_INDEX
# If there's a third operand, use it, otherwise NONE.
op3 = [OperandType.NONE, ops[2]][(ii.flags & InstFlag.USE_OP3) == InstFlag.USE_OP3]
op4 = [OperandType.NONE, ops[3]][(ii.flags & InstFlag.USE_OP4) == InstFlag.USE_OP4]
if flagsEx >= 256: # Assert the size of flagsEx is enough to holds this value.
raise "FlagsEx exceeded its 8 bits. Change flagsEx of _InstInfoEx to be uint16!"
# Concat the mnemonics and the third operand.
optFields = ", 0x%x, %d, %d, %s, %s" % (flagsEx, op3, op4, mnems[1], mnems[2])
# Notice we filter out internal bits from flags.
flags = ii.flags & ((1 << InstFlag.FLAGS_EX_START_INDEX)-1)
# Allocate a slot for this flag if needed.
if not flagsDict.has_key(flags):
flagsDict[flags] = len(flagsDict) + FLAGS_BASE_INDEX # Skip a few reserved slots.
# Get the flags-index.
flagsIndex = flagsDict[flags]
if flagsIndex >= 256:
raise "FlagsIndex exceeded its 8 bits. Change flags of _InstInfo to be uint16!"
# Also classType and flow control are shared in two nibbles.
fields = "0x%x, %d, %d, %d, %s" % (flagsIndex, ops[1], ops[0], (ii.classType << 3) | ii.flowControl, mnems[0])
# "Structure-Name" = II_Bytes-Code {Fields + Optional-Fields}.
return ("\t/*II%s*/ {%s%s}" % (ii.tag, fields, optFields), (ii.flags & InstFlag.EXTENDED) != 0)
def FilterTable(table):
# All tables must go to output.
return True
def CreateTables(db):
""" This is the new tables generator code as for May 2011.
Its purpose is to return all t |
#!/usr/bin/env python3
import time
import random
import socket
from flask import Flask, render_template, redirect, url_for, request, jsonify
import config
log = None
# classes
class Agent():
def __init__(self, ip, cw=True, node=None, state='initial'):
self.ip = ip
self.cw = cw
self.state = state
self.node = node
def __repr__(self):
return 'Agent: ip {}, direction CW: {}, state: {}, node: {}'.format(self.ip, self.cw, self.state, self.node)
class Node():
def __init__(self, label):
assert isinstance(label, int), 'Node constructor accepts numeric label only'
self.label = label
# list of agent ips in the current node
self.agents = []
def add_agent(self, agent_ip):
# add an agent ip to the list of agents in the current node
self.agents.append(agent_ip)
def __repr__(self):
return '<Node {}: [{}]>'.format(self.label, ' | '.join(str(app.agents[ip]) for ip in self.agents))
class Ring():
def __init__(self, n_nodes):
self._nodes = [Node(i) for i in range(n_nodes)]
self.n_nodes = n_nodes
def get_node(self, label):
return self._nodes[label]
def next(self, agent):
"""Return next node."""
i = 1 if agent.cw else -1
return self._nodes[(agent.node+i) % self.n_nodes]
def prev(self, agent):
"""Return prev node."""
i = -1 if agent.cw else 1
return self._nodes[(agent.node+i) % self.n_nodes]
def blocked(self, agent):
"""Check if the next node is blocked."""
next_node = self.next(agent)
if agent.ip == app.malicious_ip:
return len(next_node.agents) > 0
else:
return app.malicious_ip in next_node.agents
def random_place_agents(self):
"""Randomly place agents in the ring."""
#a = app.agents[app.agents_ips[0]]
#a.node = 1
#self.get_node(1).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[1]]
#a.node = 2
#self.get_node(2).add_agent(a.ip)
#a.cw = False
#a = app.agents[app.agents_ips[2]]
#a.node = 4
#self.get_node(4).add_agent(a.ip)
#a.cw = True
#a = app.agents[app.malicious_ip]
#a.node = 6
#self.get_node(6).add_agent(a.ip)
#a.cw = True
# True = clockwise
# False = counterclockwise
a = app.agents[app.agents_ips[0]]
a.node = 3
self.get_node(3).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[1]]
a.node = 6
self.get_node(6).add_agent(a.ip)
a.cw = False
a = app.agents[app.agents_ips[2]]
a.node = 5
self.get_node(5).add_agent(a.ip)
a.cw = True
a = app.agents[app.malicious_ip]
a.node = 1
self.get_node(1).add_agent(a.ip)
a.cw = False
return
# at most 1 agent per node, randomize direction in case of unoriented ring
for agent, node in zip(app.agents.values(), random.sample(self._nodes, len(app.agents.keys()))):
agent.cw = True if config.oriented else random.choice([True, False])
agent.node = node.label
self.get_node(node.label).add_agent(agent.ip)
def dump(self):
ring = dict()
for node in self._nodes:
ring[str(node.label)] = [(app.agents[a].ip, str(app.agents[a].cw), app.agents[a].state, app.agents[a].node) for a in node.agents]
return ring
def __repr__(self):
return ', '.join(str(node) for node in self._nodes)
class MTFGRServer(Flask):
'''Wrapper around the Flask class used to store additional information.'''
def __init__(self, *args, **kwargs):
super(MTFGRServer, self).__init__(*args, **kwargs)
self.ring = Ring(config.n_nodes)
self.agents_ips = config.agents_ips
self.agents = dict()
self.malicious_ip = config.malicious_ip
self.oriented = config.oriented
self.started = False
# instance of the web application
app = MTFGRServer(__name__)
# auxiliary functions
def _reset():
"""Reset the global variables by parsing again the config file."""
import config
global log
app.ring = Ring(config.n_nodes)
app.agents = {ip: Agent(ip) for ip in config.agents_ips}
app.malicious_ip = config.malicious_ip
app.agents[app.malicious_ip] = Agent(app.malicious_ip, state='malicious')
app.oriented = config.oriented
app.started = False
app.ring.random_place_agents()
log = open('/tmp/ev3.log', 'a')
log.write('\n\nIIIIIIIIIINNNNNNNNNIIIIIIIIIIITTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTTT\\n\n')
# views
def _communicate_start():
"""Instruct each bot to start."""
port = 31337
for ip in app.agents_ips[::-1] + [app.malicious_ip]:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
# s.sendall(b'Go!\n')
s.close()
@app.route('/start')
def start():
app.started = True
try:
_communicate_start()
except Exception:
pass
return redirect(url_for('index'))
@app.route('/reset')
def reset():
_reset()
return redirect(url_for('index'))
@app.route('/status')
def global_status():
"""Get the whole ring status."""
return jsonify(**app.ring.dump())
@app.route | ('/get/<agent_ip>')
def get_status(agent_ip):
"""Get the list of agents in the current node."""
agent = app.agents[agent_ip]
# aggiungere blocked
return jsonify(agents=[app.agents[ip].state for ip in app.ring.get_node(agent.node).agents if ip != agent_ip],
blocked=app.ring.bloc | ked(agent))
@app.route('/set/<agent_ip>', methods=['GET'])
def set_status(agent_ip):
global log
turned = request.args.get('turned') == '1'
state = request.args.get('state')
stopped = request.args.get('stopped') == '1'
# logging
sss = '\n\n[Request] {} - ip: {}, turned: {}, state: {}, stopped: {}\n'.format(time.time(), agent_ip, turned, state, stopped)
log.write(sss)
log.write('[Status pre]\n')
log.write(str(app.ring.dump()))
agent = app.agents[agent_ip]
agent.state = state
agent.cw = agent.cw if not turned else not agent.cw
blocked = app.ring.blocked(agent)
if not blocked and not stopped:
# advance to the next node if not blocked
node = app.ring.get_node(agent.node)
next_node = app.ring.next(agent)
agent.node = next_node.label
node.agents.remove(agent_ip)
next_node.add_agent(agent_ip)
log.write('\n[Status post]\n')
log.write(str(app.ring.dump()))
return jsonify(blocked=blocked)
@app.route('/')
def index():
return render_template('base.html', started=app.started)
def main():
app.run(host='0.0.0.0', debug=config.debug)
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
from django.contrib im | port admin
from ionyweb.plugin_app.plugin_video.models import Plugin_Video
admin.site.regist | er(Plugin_Video)
|
rom functools import wraps
def requires_auth(endpoint_class):
""" Enables Authorization logic for decorated functions.
:param endpoint_class: the 'class' to which the decorated endpoint belongs
to. Can be 'resource' (resource endpoint), 'item'
(item endpoint) and 'home' for the API entry point.
.. versionchanged:: 0.0.7
Passing the 'resource' argument when inoking auth.authenticate()
.. versionchanged:: 0.0.5
Support for Cross-Origin Resource Sharing (CORS): 'OPTIONS' request
method is now public by default. The actual method ('GET', etc.) will
still be protected if so configured.
.. versionadded:: 0.0.4
"""
def fdec(f):
@wraps(f)
def decorated(*args, **kwargs):
if args:
# resource or item endpoint
resource_name = args[0]
resource = app.config['DOMAIN'][args[0]]
if endpoint_class == 'resource':
public = resource['public_methods']
roles = resource['allowed_roles']
if request.method in ['GET', 'HEAD', 'OPTIONS']:
roles += resource['allowed_read_roles']
else:
roles += resource['allowed_write_roles']
elif endpoint_class == 'item':
public = resource['public_item_methods']
roles = resource['allowed_item_roles']
if request.method in ['GET', 'HEAD', 'OPTIONS']:
roles += resource['allowed_item_read_roles']
else:
roles += resource['allowed_item_write_roles']
if callable(resource['authentication']):
auth = resource['authentication']()
else:
auth = resource['authentication']
else:
# home
resource_name = resource = None
public = app.config['PUBLIC_METHODS'] + ['OPTIONS']
roles = app.config['ALLOWED_ROLES']
if request.method in ['GET', 'OPTIONS']:
roles += app.config['ALLOWED_READ_ROLES']
else:
roles += app.config['ALLOWED_WRITE_ROLES']
auth = app.auth
if auth and request.method not in public:
if not auth.authorized(roles, resource_name, request.method):
return auth.authenticate()
return f(*args, **kwargs)
return decorated
return fdec
class BasicAuth(object):
""" Implements Basic AUTH logic. Should be subclassed to implement custom
authentication checking.
.. versionchanged:: 0.4
ensure all errors returns a parseable body #366.
auth.request_auth_value replaced with getter and setter methods which
rely on flask's 'g' object, for enhanced thread-safity.
.. versionchanged:: 0.1.1
auth.request_auth_value is now used to store the auth_field value.
.. versionchanged:: 0.0.9
Support for user_id property.
.. versionchanged:: 0.0.7
Support for 'resource' argument.
.. versionadded:: 0.0.4
"""
def set_request_auth_value(self, value):
g.auth_value = value
def get_request_auth_value(self):
return g.get("auth_value")
def check_auth(self, username, password, allowed_roles, resource, method):
""" This function is called to check if a username / password
combination is valid. Must be overridden with custom logic.
:param username: username provided with current request.
:param password: password provided with current request
:param allowed_roles: allowed user roles.
:param resource: resource being requested.
:param method: HTTP method being executed (POST, GET, etc.)
"""
raise NotImplementedError
def authenticate(self):
""" Returns a standard a 401 response that enables basic auth.
Override if you want to change the response and/or the realm.
"""
resp = Response(None, 401, {'WWW-Authenticate': 'Basic realm:"%s"' %
__package__})
abort(401, description='Please provide proper credentials',
response=resp)
def authorized(self, allowed_roles, resource, method):
""" Validates the the current request is allowed to pass through.
:param allowed_roles: allowed roles for the current request, can be a
string or a list of roles.
:param resource: resource being requested.
"""
auth = request.authorization
return auth and self.check_auth(auth.username, auth.password,
allowed_roles, resource, method)
class HMACAuth(BasicAuth):
""" Hash Message Authentication Code (HMAC) authentication logic. Must be
subclassed to implement custom authorization checking.
.. versionchanged:: 0.4
Ensure all errors returns a parseable body #366.
.. versionchanged:: 0.0.9
Replaced the now deprecated request.data with request.get_data().
.. versionchanged:: 0.0.7
Support for 'resource' argument.
.. versionadded:: 0.0.5
"""
def check_auth(self, userid, hmac_hash, headers, data, allowed_roles,
resource, method):
""" This function is called to check if a token is valid. Must be
overridden with custom logic.
:param userid: user id included with the request.
:param hmac_h | ash: hash included with the request.
:param headers: request headers. Suitable for hash computing.
:param data: request data. Suitable for hash computing.
:param allowed_roles: allowed user roles.
:param resource: resource being requested.
:param method: HTTP method being executed (POST, GET, etc.)
"""
raise NotImplementedError
def authenticate(self):
""" Returns a standard a 401. Override if you w | ant to change the
response.
"""
abort(401, description='Please provide proper credentials')
def authorized(self, allowed_roles, resource, method):
""" Validates the the current request is allowed to pass through.
:param allowed_roles: allowed roles for the current request, can be a
string or a list of roles.
:param resource: resource being requested.
"""
auth = request.headers.get('Authorization')
try:
userid, hmac_hash = auth.split(':')
except:
auth = None
return auth and self.check_auth(userid, hmac_hash, request.headers,
request.get_data(), allowed_roles,
resource, method)
class TokenAuth(BasicAuth):
""" Implements Token AUTH logic. Should be subclassed to implement custom
authentication checking.
.. versionchanged:: 0.4
Ensure all errors returns a parseable body #366.
.. versionchanged:: 0.0.7
Support for 'resource' argument.
.. versionadded:: 0.0.5
"""
def check_auth(self, token, allowed_roles, resource, method):
""" This function is called to check if a token is valid. Must be
overridden with custom logic.
:param token: decoded user name.
:param allowed_roles: allowed user roles
:param resource: resource being requested.
:param method: HTTP method being executed (POST, GET, etc.)
"""
raise NotImplementedError
def authenticate(self):
""" Returns a standard a 401 response that enables basic auth.
Override if you want to change the response and/or the realm.
"""
resp = Response(None, 401, {'WWW-Authenticate': 'Basic realm:"%s"' %
__package__})
abort(401, description='Please provide proper credentials',
response=resp)
def authorized(self, allowed_roles, resource |
# -*- coding: utf-8 -*-
#
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import nose
from nose.tools import *
import numpy as np
from sknano.structures import Graphene, PrimitiveCellGraphene, \
ConventionalCellGraphene, GraphenePrimitiveCell, GrapheneConventionalCell
def test1():
s = Graphene(armchair_edge_length=5, zigzag_edge_length=5)
assert_equals(s.zigzag_edge_length, 5)
assert_equals(s.armchair_edge_length, 5)
assert_true(isinstance(s, ConventionalCellGraphene))
assert_true(isinstance(s.unit_cell, GrapheneConventionalCell))
print(s.unit_cell)
def test2():
s = PrimitiveCellGraphene(edge_length=5)
assert_equals(s.edge_length, 5)
assert_true(isinstance(s, PrimitiveCellGraphene))
assert_true(isinstance(s.unit_cell, GraphenePrimitiveCell))
print(np.degrees(s.r1.angle(s.r2)))
print(s.unit_cell)
print(s.area)
print(s)
def test3():
s = ConventionalCellGraphene(armchair_edge_length=5, zigzag_edge_length=5)
assert_equals(s.zigzag_edge_length, 5)
assert_equals(s.armchair_edge_length, 5)
assert_true(isinstance(s, ConventionalCellGraphene))
assert_true(isinstance(s.unit_cell, GrapheneConventionalCell))
print(s.unit_cell)
print(s.area)
print(s)
def test4():
s = Graphene.from_conventional_cell(armchair_edge_length=5,
zigzag_edge_length=5)
assert_equals(s.zigzag_edge_length, 5)
assert_equals(s.armchair_edge_length, 5)
assert_true(isinstance(s.unit_cell, GrapheneConventionalCell))
print(s.unit_cell)
assert_true(isinstance(s, ConventionalCellGraphene))
def test5():
| s = Graphene.from_primitive_cell(edge_length=5)
assert_true(isinstance(s, PrimitiveCellGraphene))
assert_true(isinstance(s.unit_cell, GraphenePrimitiveCell))
if __name__ == '__main__':
nose.runmodu | le()
|
# ============================================================
# modelparser.py
#
# (C) Tiago Almeida 2016
#
# Still in early development stages.
#
# This module uses PLY (http://www.dabeaz.com/ply/ply.html)
# and a set of grammar rules to parse a custom model
# definition language.
# ============================================================
import functools as ftools
import pprint
import ply.lex as lex
import ply.yacc as yacc
import sys
import exceptions
# ============================================================
# Constants
# ============================================================
MULT_SINGLE = 1
MULT_ANY = 2
# ============================================================
# Lexer rules
# ============================================================
reserved = {
'String' : 'STRING',
'Date' : 'DATE',
}
# List of token names. This is always required
tokens = (
'MODELNAME',
'NUMBER',
'COMMA',
'STAR',
'LPAREN',
'RPAREN',
'COLON',
'LBRACKET',
'RBRACKET',
'SEMICOLON',
'EXCLAMATION',
'ID'
) + tuple(reserved.values())
# Regular expression rules for simple tokens
t_COMMA = r'\,'
t_STAR = r'\*'
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_COLON = r'\:'
t_LBRACKET = r'\{'
t_RBRACKET = r'\}'
t_SEMICOLON = r'\;'
t_EXCLAMATION = r'\!'
# A regular expression rule with some action code
def t_NUMBER(t):
r'\d+'
t.value = int(t.value)
return t
# Identifier match
def t_ID(t):
r'[a-zA-Z_][a-zA-Z_0-9]*'
t.type = reserved.get(t.value,'ID') # Check for reserved words
return t
# Define a rule so we can track line numbers
def t_newline(t):
r'\n+'
t.lexer.lineno += len(t.value)
# A string containing ignored characters (spaces and tabs)
t_ignore = ' \t'
# Error handling rule
def t_error(t):
print("Illegal character '%s'" % t.value[0])
t.lexer.skip(1)
# ============================================================
# Parser rules
# ============================================================
# ----------------
# BNF Grammar
# ----------------
# model : MODELNAME { fields }
# fields : fields field ;
# | field ;
models = []
fields = []
def p_file(p):
"""
rules : models
"""
p[0] = p[1]
def p_modelsdecl(p):
"""
models : models model
| model
"""
if len(p) >= 3:
models.append(p[2])
else:
models.append(p[1])
def p_modeldecl(p):
'model : ID LBRACKET fields RBRACKET'
global fields
p[0] = { 'model': p[1],
'fields': fields
}
fields = []
def p_fields_decl(p):
"""
fields : fields field
| field
"""
if len(p) >= 3:
fields.append(p[2])
else:
fields.append( | p[1])
def p_field_decl(p):
"""
field : ID COLON multiplicity datatype notnull SEMICOLON
"""
# return an object with the field data
#
p[0] = {
'name': p[1],
'type': p[4],
'mult': p[3],
'null': p[5]
}
def p_datatype(p):
"""
datatype : STRING
| DAT | E
| ID
"""
p[0] = p[1]
def p_field_multiplicity(p):
"""
multiplicity : STAR
| empty
"""
if p[1] == '*':
p[0] = MULT_ANY
else:
p[0] = MULT_SINGLE
def p_field_notnull(p):
"""
notnull : EXCLAMATION
| empty
"""
if p[1] == '!':
p[0] = False
else:
p[0] = True
def p_empty(p):
'empty :'
pass
def p_modeldecl_print_error(p):
'model : ID LBRACKET error RBRACKET'
print("Syntax error in model declaration. Bad body")
# Error rule for syntax errors
def p_error(p):
pass
def validate_models_unique(models):
"""
Given a list of models, validates there are no repetitions.
"""
index = {}
for m in models:
print(m['model'])
if m['model'] in index:
raise exceptions.ModelNotUnique(m['model'])
else:
index[m['model']] = True
def validate_fields_unique(models):
"""
Given a list of models, for each one validates there are no
repeated fields.
"""
pass
def parse(file_path, debug_lexer=False):
"""
"""
global models
models = []
# Build the lexer
lexer = lex.lex()
# Read argv(1) file
with open(file_path) as f:
data = f.read()
if debug_lexer:
lexer.input(data)
while True:
tok = lexer.token()
if not tok:
break # No more input
print(tok)
parser = yacc.yacc()
result = parser.parse(data)
return models
def parse_files(files_lst):
# parse the files and join the sublists of models into one
models_fragments = list(map(parse, files_lst))
models = list(ftools.reduce(lambda l1,l2: l1 + l2,
models_fragments))
validate_models_unique(models)
return models
def main():
parse(sys.argv[1])
if __name__ == '__main__':
main() |
from django.db import models
from django.contrib.auth.models import User
from helper_functions import my_strftime
# Create your models here.
#This only contains metadata about this thread (i.e. just the subject for now)
#It is used in a Many-to-Many relationship with User, with a through object that contains the has_been_read flag
class Thread(models.Model):
subject = models.CharField(max_length=64)
def getThread(self):
"""Returns list of most recent messages with corresponding info"""
return [message.getDetail() for message in self.message_set.order_by('time_sent')]
def getThreadInfo(self, user=None):
"""
Returns dictionary object containing basic info about thread,
such as most recent message/author, title, etc.
"""
if user == None:
has_been_read = False
else:
has_been_read = ThreadMembership.objects.get(user=user, thread=self).has_been_read
last_message = self.message_set.order_by('-time_sent')[0]
return { 'subject' : self.subject, 'last_message' : last_message.getDetail(), 'id' : self.id,
'has_been_read' : has_been_read }
class Message(models.Model):
thread = models.ForeignKey(Thread)
user = models.ForeignKey('userInfo.UserProfile') #th | e author of this message
time_sent = models.DateTimeField(auto_now_add=True)
text = models.TextField()
def getDetail(self):
"""Returns dictionary object containing the info of this object"""
return { 'author' : self.user.getInfo(),
'timestamp' : my_strftime(self.time_sent),
'text' : self.text }
class ThreadMembership(models.Model):
user = models.ForeignKey('userInfo.UserProfile')
| thread = models.ForeignKey(Thread)
#Meta data for user's relation to thread
has_been_read = models.BooleanField(default=False)
|
se...
def testPermissionPutWithQuotedEmail(self):
ret = self._put('/users/bob%40bobsworld.com/permissions/admin', data=dict(options=json.dumps(dict(products=["a"]))))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=1)), "Data: %s" % ret.data)
query = dbo.permissions.t.select()
query = query.where(dbo.permissions.username == 'bob@bobsworld.com')
query = query.where(dbo.permissions.permission == 'admin')
self.assertEqual(query.execute().fetchone(), ('admin', 'bob@bobsworld.com', {"products": ["a"]}, 1))
def testPermissionsPostWithHttpRemoteUser(self):
ret = self._httpRemoteUserPost('/users/bob/permissions/release_read_only', username="bob", data=dict(options=json.dumps(dict(products=["a", "b"])),
data_version=1))
self.assertEqual(ret.status_code, 200, ret.data)
self.assertEqual(json.loads(ret.data), dict(new_data_version=2))
r = dbo.permissions.t.select().where(dbo.permissions.username == 'bob').where(dbo.permissions.permission == "release_read_only").execute().fetchall()
self.assertEqual(len(r), 1)
self.assertEqual(r[0], ('release_read_only', 'bob', {"products": ["a", "b"]}, 2))
def testPermissionsPost(self):
ret = self._post('/users/bob/permissions/release_read_only', data=dict(options=json.dumps(dict(products=["a", "b"])), data_version=1))
self.assertEqual(ret.status_code, 200, ret.data)
self.assertEqual(json.loads(ret.data), dict(new_data_version=2))
r = dbo.permissions.t.select().where(dbo.permissions.username == 'bob').where(dbo.permissions.permission == "release_read_only").ex | ecute().fetchall()
self.assertEqual(len(r), 1)
self.assertEqual(r[0], ('release_read_only', 'bob', {"products": ["a", "b"]}, 2))
def testPermissionsPostMissing(self):
ret = self._post("/users/bill/permissions/rule", data=dict(options="", data_version=1))
self.assertStatusCode(ret, 404)
def testPermissionsPostBadInput(self):
| ret = self._post("/users/bill/permissions/admin")
self.assertStatusCode(ret, 400)
def testPermissionsPostWithoutPermission(self):
ret = self._post("/users/bob/permissions/rule", username="shane", data=dict(data_version=1, options=json.dumps(dict(actions=["create"]))))
self.assertStatusCode(ret, 403)
def testPermissionPutWithOption(self):
ret = self._put('/users/bob/permissions/release_locale', data=dict(options=json.dumps(dict(products=['a']))))
self.assertStatusCode(ret, 201)
self.assertEqual(ret.data, json.dumps(dict(new_data_version=1)), "Data: %s" % ret.data)
query = dbo.permissions.t.select()
query = query.where(dbo.permissions.username == 'bob')
query = query.where(dbo.permissions.permission == 'release_locale')
self.assertEqual(query.execute().fetchone(), ('release_locale', 'bob', dict(products=['a']), 1))
def testPermissionPutThatRequiresSignoff(self):
ret = self._put("/users/nancy/permissions/admin")
self.assertStatusCode(ret, 400)
self.assertIn("This change requires signoff", ret.data)
def testPermissionModify(self):
ret = self._put('/users/bob/permissions/rule',
data=dict(options=json.dumps(dict(products=['a', 'b'])), data_version=1))
self.assertStatusCode(ret, 200)
self.assertEqual(json.loads(ret.data), dict(new_data_version=2))
query = dbo.permissions.t.select()
query = query.where(dbo.permissions.username == 'bob')
query = query.where(dbo.permissions.permission == 'rule')
self.assertEqual(query.execute().fetchone(), ('rule', 'bob', dict(products=['a', 'b']), 2))
def testPermissionModifyWithoutDataVersion(self):
ret = self._put("/users/bob/permissions/release",
data=dict(options=json.dumps(dict(products=["different"]))))
self.assertStatusCode(ret, 400)
def testPermissionPutBadPermission(self):
ret = self._put('/users/bob/permissions/fake')
self.assertStatusCode(ret, 400)
def testPermissionPutBadOption(self):
ret = self._put('/users/bob/permissions/admin', data=dict(options=json.dumps(dict(foo=2))))
self.assertStatusCode(ret, 400)
# Discovered in https://bugzilla.mozilla.org/show_bug.cgi?id=1237264
def testPermissionPutBadJSON(self):
ret = self._put("/users/ashanti/permissions/rule", data=dict(options='{"products":'))
self.assertStatusCode(ret, 400)
def testPermissionPutWithoutPermission(self):
ret = self._put('/users/bob/permissions/admin', username="joseph")
self.assertStatusCode(ret, 403)
def testPermissionDelete(self):
ret = self._delete('/users/bob/permissions/release_read_only', qs=dict(data_version=1))
self.assertStatusCode(ret, 200)
query = dbo.permissions.t.select()
query = query.where(dbo.permissions.username == 'bob')
query = query.where(dbo.permissions.permission == 'release_read_only')
self.assertEqual(query.execute().fetchone(), None)
def testPermissionDeleteMissing(self):
ret = self._delete("/users/bill/permissions/release", qs={"data_version": 1})
self.assertStatusCode(ret, 404)
def testPermissionDeleteBadInput(self):
ret = self._delete("/users/bill/permissions/admin")
self.assertStatusCode(ret, 400)
def testPermissionDeleteWithoutPermission(self):
ret = self._delete("/users/bob/permissions/permission", qs=dict(data_version=1), username="anna")
self.assertStatusCode(ret, 403)
def testPermissionDeleteRequiresSignoff(self):
ret = self._delete("/users/bob/permissions/release", qs=dict(data_version=1))
self.assertStatusCode(ret, 400)
self.assertIn("This change requires signoff", ret.data)
class TestPermissionsScheduledChanges(ViewTest):
maxDiff = 10000
def setUp(self):
super(TestPermissionsScheduledChanges, self).setUp()
dbo.permissions.scheduled_changes.t.insert().execute(
sc_id=1, scheduled_by="bill", change_type="insert", data_version=1, base_permission="rule", base_username="janet",
base_options={"products": ["foo"]},
)
dbo.permissions.scheduled_changes.history.t.insert().execute(change_id=1, changed_by="bill", timestamp=20, sc_id=1)
dbo.permissions.scheduled_changes.history.t.insert().execute(
change_id=2, changed_by="bill", timestamp=21, sc_id=1, scheduled_by="bill", change_type="insert", data_version=1,
base_permission="rule", base_username="janet", base_options={"products": ["foo"]},
)
dbo.permissions.scheduled_changes.signoffs.t.insert().execute(sc_id=1, username="bill", role="releng")
dbo.permissions.scheduled_changes.signoffs.history.t.insert().execute(change_id=1, changed_by="bill", timestamp=30, sc_id=1, username="bill")
dbo.permissions.scheduled_changes.signoffs.history.t.insert().execute(change_id=2, changed_by="bill", timestamp=31, sc_id=1,
username="bill", role="releng")
dbo.permissions.scheduled_changes.conditions.t.insert().execute(sc_id=1, when=10000000, data_version=1)
dbo.permissions.scheduled_changes.conditions.history.t.insert().execute(change_id=1, changed_by="bill", timestamp=20, sc_id=1)
dbo.permissions.scheduled_changes.conditions.history.t.insert().execute(
change_id=2, changed_by="bill", timestamp=21, sc_id=1, when=10000000, data_version=1
)
dbo.permissions.scheduled_changes.t.insert().execute(
sc_id=2, scheduled_by="bill", change_type="update", data_version=1, base_permission="release_locale", base_username="ashanti",
base_options=None, base_data_version=1,
)
dbo.permissions.scheduled_changes.history.t.insert().execute(change_id=3, changed_by="bill", timestamp=40, sc_id=2)
dbo.permissions.scheduled_changes.history.t.ins |
"""
The file preprocesses the files/train.txt and files/test.txt files.
I requires the dependency based embeddings by Levy et al.. Download them from his website and change
the embeddingsPath variable in the script to point to the unzipped deps.words file.
"""
from __future__ import print_function
import numpy as np
import gzip
import os
import sys
if (sys.version_info > (3, 0)):
import pickle as pkl
else:
#Python 2.7 imports
import cPickle as pkl
from io import open
#We download German word embeddings from here https://www.ukp.tu-darmstadt.de/research/ukp-in-challenges/germeval-2014/
embeddingsPath = 'embeddings/2014_tudarmstadt_german_50mincount.vocab.gz'
#Train, Dev, and Test files
folder = 'data/'
files = [folder+'NER-de-train.tsv', folder+'NER-de-dev.tsv', folder+'NER-de-test.tsv']
#At which column position is the token and the tag, starting at 0
tokenPosition=1
tagPosition=2
#Size of the context windo
window_size = 3
def createMatrices(sentences, windowsize, word2Idx, label2Idx, case2Idx):
unknownIdx = word2Idx['UNKNOWN_TOKEN']
paddingIdx = word2Idx['PADDING_TOKEN']
dataset = []
wordCount = 0
unknownWordCount = 0
for sentence in sentences:
wordIndices = []
caseIndices = []
labelIndices = []
for word, label in sentence:
wordCount += 1
if word in word2Idx:
wordIdx = word2Idx[word]
elif word.lower() in word2Idx:
wordIdx = word2Idx[word.lower()]
else:
wordIdx = unknownIdx
unknownWordCount += 1
#Get the label and map to int
wordIndices.append(wordIdx)
caseIndices.append(getCasing(word, case2Idx))
labelIndices.append(label2Idx[label])
dataset.append([wordIndices, caseIndices, labelIndices])
return dataset
def readFile(filepath, tokenPosition, tagPosition):
sentences = []
sentence = []
for line in open(filepath):
line = line.strip()
if len(line) == 0 or line[0] == '#':
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
continue
splits = line.split('\t')
sentence.append([splits[tokenPosition], splits[tagPosition]])
if len(sentence) > 0:
sentences.append(sentence)
sentence = []
print(filepath, len(sentences), "sentences")
return sentences
def getCasing(word, caseLookup):
casing = 'other'
numDigits = 0
for char in word:
if char.isdigit():
numDigits += 1
digitFraction = numDigits / float(len(word))
if word.isdigit(): #Is a digit
casing = 'numeric'
elif digitFraction > 0.5:
casing = 'mainly_numeric'
elif word.islower(): #All lower case
casing = 'allLower'
elif word.isupper(): #All upper case
casing = 'allUpper'
elif word[0].isupper(): #is a title, initial char upper, then all lower
casing = 'initialUpper'
elif numDigits > 0:
casing = 'contains_digit'
return caseLookup[casing]
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #
# Start of the preprocessing
# ::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: #
outputFilePath = 'pkl/data.pkl.gz'
embeddingsPklPath = 'pkl/embeddings.pkl.gz'
trainSentences = readFile(files[0], tokenPosition, tagPosition)
devSentences = readFile(files[1], tokenPosition, tagPosition)
testSentences = readFile(files[2], tokenPosition, tagPosition)
#Mapping of the labels to integers
labelSet = set()
words = {}
for dataset in [trainSentences, devSentences, testSentences]:
for sentence in dataset:
for token, label in sentence:
labelSet.add(label)
words[token.lower()] = True
# :: Create a mapping for the labels ::
label2Idx = {}
for label in labelSet:
label2Idx[label] = len(label2Idx)
# :: Hard coded case lookup ::
case2Idx = {'numeric': 0, 'allLower':1, 'allUpper':2, 'initialUpper':3, 'other':4, 'mainly_numeric':5, 'contains_digit': 6, 'PADDING_TOKEN':7}
caseEmbeddings = np.identity(len(case2Idx), dtype='float32')
# :: Read in word embeddings ::
word2Idx = {}
wordEmbeddings = []
# :: Downloads the embeddings from the TU-Darmstadt.de webserver ::
if not os.path.isfile(embeddingsPath):
basename = os.path.basename(embeddingsPath)
if basename.startswith('2014_tudarmstadt_german_'):
print("Start downloading word embeddings for German using wget ...")
os.system("wget https://public.ukp.informatik.tu-darmstadt.de/reimers/2014_german_embeddings/"+basename+" -P embeddings/")
else:
print(embeddingsPath, "does not exist. Please provide pre-trained embeddings")
exit()
# :: Load the pre-trained embeddings file ::
fEmbeddings = gzip.open(embeddingsPath, "r") if embeddingsPath.endswith('.gz') else open(embeddingsPath, encoding="utf8")
for line in fEmbeddings:
split = line.decode("utf-8").strip().split(" ")
word = split[0]
if len(word2Idx) == 0: #Add padding+unknown
word2Idx["PADDING_TOKEN"] = len(word2Idx)
vector = np.zeros(len(split)-1) #Zero vector vor 'PADDING' word
wordEmbeddings.append(vector)
word2Idx["UNKNOWN_TOKEN"] = len(word2Idx)
vector = np.random.uniform(-0.25, 0.25, len(split)-1)
wordEmbeddings.append(vector)
if split[0].lower() in words:
vector = np.array([float(num) for num in split[1:]])
wordEmbeddings.append(vector)
word2Idx[split[0]] = len(word2Idx)
wordEmbeddings = np.array(wordEmbeddings)
print("Embeddings shape: ", wordEmbeddings.shape)
print("Len words: ", len(words))
embeddings = {'wordEmbeddings': wordEmbeddings, 'word2Idx': word2Idx,
'caseEmbeddings': caseEmbeddings, 'case2Idx': case2Idx,
'label2Idx': label2Idx}
f = gzip.open(embeddingsPklPath, 'wb')
pkl.dump(embeddings, f, -1)
f.close()
# :: Create matrices ::
train_set = createMatrices(tr | ainSentences, window_size, word2Idx, label2Idx, case2Idx | )
dev_set = createMatrices(devSentences, window_size, word2Idx, label2Idx, case2Idx)
test_set = createMatrices(testSentences, window_size, word2Idx, label2Idx, case2Idx)
f = gzip.open(outputFilePath, 'wb')
pkl.dump(train_set, f)
pkl.dump(dev_set, f)
pkl.dump(test_set, f)
f.close()
print("Data stored in pkl folder")
|
import pytest
from ..context import dnsimple, fixture_path
from ..request_helper import RequestHelper, request
from dnsimple.client import Client
class TestClient(RequestHelper, object):
def test_constructor_raises_errors_when_improperly_configured(self):
with pytest.raises(dnsimple.credentials.InvalidCredentialsException) as ex:
Client(email = 'user@host.com')
assert 'Invalid credentials supplied' in str(ex.value)
def test_constructor_raises_errors_when_no_credentials_found(self):
with pytest.raises(dnsimple.credentials.InvalidCredentialsException):
Client(credentials_search_paths = fixture_path('credentials'), credentials_filename = 'missing')
def test_constructor_configures_credentials_for_token_authentication(self):
subject = Client(email = 'user@host.com', user_token = 'toke')
assert isinstance(subject.request, dnsimple.connection.Request)
credentials = subject.request.credentials
assert credentials.email == 'user@host.com'
assert credentials.user_token == 'toke'
def test_constructor_configures_credentials_for_password_authentication(self):
subject = Client(email = 'user@host.com', password = 'password')
credentials = subject.request.credentials
assert credentials.email == 'user@host.com'
assert credentials.password == 'password'
def test_constructor_configures_credentials_for_domain_token_authentication(self):
subject = Client(domain_token = 'token')
assert subject.request.credentials.domain_token == 'token'
def test_constructor_configures_credentials_from_configuration_file(self):
subject = Client(credentials_search_paths = [fixture_path('credentials')], credentials_filename = 'basic')
credentials = subject.request.credentials
assert credentials.email == 'user@host.com'
assert credentials.user_token == 'user_token'
assert credentials.password == 'password'
def test_constructor_defaults | _sandbox_to_false(self):
subject = Client(email = 'user@host.com', password = 'password')
assert subject.request.sandbox is False
|
def test_constructor_enables_sandbox(self):
subject = Client(sandbox = True, email = 'user@host.com', password = 'password')
assert subject.request.sandbox is True
def test_transfer_creates_domain_transfer(self, mocker, request):
method = self.stub_request(mocker, request, method_name = 'post', success = True, data = {})
subject = Client(email = 'user@host.com', password = 'password')
contact = dnsimple.models.Contact(request, {'id': 1})
subject.request = request
result = subject.transfer('foo.com', contact)
method.assert_called_once_with('domain_transfers', {'domain': {'name': 'foo.com', 'registrant_id': 1}})
assert result == True
def test_transfer_returns_false_when_transfer_fails(self, mocker, request):
method = self.stub_request(mocker, request, method_name = 'post', success = False)
subject = Client(email = 'user@host.com', password = 'password')
contact = dnsimple.models.Contact(request, {'id': 1})
subject.request = request
result = subject.transfer('foo.com', contact)
assert result == False
|
from storitell.tastypie.resources import ModelResource
from storitell.stories.models import Story
from storitell.stories.extra_methods import moderate_comment
from storitell.ta | stypie.validation import Validation
# Stories can be read through a REST-ful interface. It'd be nice
# to be able to POST as well, but that requires validation I haven't
# had time to code yet. Want to add it? Be my guest.
class StoryResource(ModelResource):
class Meta:
queryset = Story.objects.all()
resource_name = 'st | ory'
fields = ['maintext','pub_date','upvotes']
allowed_methods = ['get']
|
# 10.0 is the maximum version
('|10.0', '5.1', True),
('|10.0', '10.0', True),
('|10.0', '10.1', False),
# 6.1 is the minimum version
('6.1|', '5.1', False),
('6.1|', '6.0', False),
| ('6.1|', '6.1', True),
('6.1|', '6.2', True),
('6.1|', '10.0', True),
('6.2|', '5.1', False),
('6.2|', '6.0', False),
('6.2|', '6.1', False),
('6.2|', '6.2', True),
# must be 6.2 or 6.3
('6.2|6.3', '6.0', False),
('6.2|6.3', '6.1', False) | ,
('6.2|6.3', '6.2', True),
('6.2|6.3', '6.3', True),
('6.2|6.3', '10.0', False),
# 10.0 is the minimum
('10.0|', '5.1', False),
('10.0|', '10.0', True))
for (req, mock, expected_return) in tests:
mock = parse_windows_build(mock)
actual_return = detectos(req, mock)
self.assertEqual(expected_return, actual_return,
'detectos(%s, %s)==%s instead of %s' % (req, mock,
actual_return, expected_return))
@common.skipUnlessWindows
def test_detect_file(self):
"""Test detect_file function"""
tests = [('%windir%\\system32\\kernel32.dll', True),
('%windir%\\system32', True),
('%ProgramFiles%\\Internet Explorer', True),
('%ProgramFiles%\\Internet Explorer\\', True),
('%windir%\\doesnotexist', False),
('%windir%\\system*', True),
('%windir%\\*ystem32', True),
('%windir%\\*ystem3*', True)]
# On 64-bit Windows, Winapp2.ini expands the %ProgramFiles% environment
# variable to also %ProgramW6432%, so test unique entries in
# %ProgramW6432%.
import struct
if 8 * struct.calcsize('P') != 32:
raise NotImplementedError('expecting 32-bit Python')
if os.getenv('ProgramW6432'):
dir_64 = os.listdir(os.getenv('ProgramFiles'))
dir_32 = os.listdir(os.getenv('ProgramW6432'))
dir_32_unique = set(dir_32) - set(dir_64)
if dir_32 and not dir_32_unique:
raise RuntimeError(
'Test expects objects in %ProgramW6432% not in %ProgramFiles%')
for pathname in dir_32_unique:
tests.append(('%%ProgramFiles%%\\%s' % pathname, True))
else:
logger.info(
'skipping %ProgramW6432% tests because WoW64 not detected')
for (pathname, expected_return) in tests:
actual_return = detect_file(pathname)
msg = 'detect_file(%s) returned %s' % (pathname, actual_return)
self.assertEqual(expected_return, actual_return, msg)
def setup_fake(self, f1_filename=None):
"""Setup the test environment"""
subkey = 'Software\\BleachBit\\DeleteThisKey\\AndThisKey'
# put ampersand in directory name to test
# https://github.com/bleachbit/bleachbit/issues/308
dirname = tempfile.mkdtemp(prefix='bleachbit-test-winapp&')
fname1 = os.path.join(dirname, f1_filename or 'deleteme.log')
open(fname1, 'w').close()
dirname2 = os.path.join(dirname, 'sub')
os.mkdir(dirname2)
fname2 = os.path.join(dirname2, 'deleteme.log')
open(fname2, 'w').close()
fbak = os.path.join(dirname, 'deleteme.bak')
open(fbak, 'w').close()
self.assertExists(fname1)
self.assertExists(fname2)
self.assertExists(fbak)
create_sub_key(subkey)
self.assertTrue(detect_registry_key(KEYFULL))
self.assertTrue(detect_registry_key('HKCU\\%s' % subkey))
return dirname, fname1, fname2, fbak
def ini2cleaner(self, body, do_next=True):
"""Write a minimal Winapp2.ini"""
ini = open(self.ini_fn, 'w')
ini.write('[someapp]\n')
ini.write('LangSecRef=3021\n')
ini.write(body)
ini.write('\n')
ini.close()
self.assertExists(self.ini_fn)
if do_next:
return next(Winapp(self.ini_fn).get_cleaners())
else:
return Winapp(self.ini_fn).get_cleaners()
@common.skipUnlessWindows
def test_fake(self):
"""Test with fake file"""
# reuse this path to store a winapp2.ini file in
(ini_h, self.ini_fn) = tempfile.mkstemp(
suffix='.ini', prefix='winapp2')
os.close(ini_h)
# a set of tests
# this map explains what each position in the test tuple means
# 0=line to write directly to winapp2.ini
# 1=filename1 to place in fake environment (default=deleteme.log)
# 2=auto-hide before cleaning
# 3=dirname exists after cleaning
# 4=filename1 (.\deleteme.log) exists after cleaning
# 5=sub\deleteme.log exists after cleaning
# 6=.\deleteme.bak exists after cleaning
tests = [
# single file
('FileKey1=%s|deleteme.log', None,
False, True, False, True, True),
# single file, case matching should be insensitive
('FileKey1=%s|dEleteme.LOG', None,
False, True, False, True, True),
# special characters for XML
('FileKey1=%s|special_chars_&-\'.txt', 'special_chars_&-\'.txt',
False, True, False, True, True),
# *.log
('FileKey1=%s|*.LOG', None, False, True, False, True, True),
# semicolon separates different file types
('FileKey1=%s|*.log;*.bak', None,
False, True, False, True, False),
# *.*
('FileKey1=%s|*.*', None, False, True, False, True, False),
# recurse *.*
('FileKey1=%s|*.*|RECURSE', None, False,
True, False, False, False),
# recurse *.log
('FileKey1=%s|*.log|RECURSE', None, False,
True, False, False, True),
# remove self *.*, this removes the directory
('FileKey1=%s|*.*|REMOVESELF', None,
False, False, False, False, False),
]
# Add positive detection, where the detection believes the application is present,
# to all the tests, which are also positive.
new_tests = []
for test in tests:
for detect in (
"\nDetectFile=%%APPDATA%%\\Microsoft",
"\nSpecialDetect=DET_WINDOWS",
"\nDetectFile1=%%APPDATA%%\\Microsoft\nDetectFile2=%%APPDATA%%\\does_not_exist",
"\nDetectFile1=%%APPDATA%%\\does_not_exist\nDetectFile2=%%APPDATA%%\\Microsoft",
"\nDetect=HKCU\\Software\\Microsoft",
# Below checks that a space is OK in the registry key
"\nDetect=HKCU\\Software\\Microsoft\\Command Processor",
# Below checks Detect# where one of two keys exist.
"\nDetect1=HKCU\\Software\\Microsoft\nDetect2=HKCU\\Software\\does_not_exist",
"\nDetect1=HKCU\\Software\\does_not_exist\nDetect2=HKCU\\Software\\Microsoft",
# Below checks Detect with DetectFile where one exists
"\nDetect=HKCU\\Software\\Microsoft\nDetectFile=%%APPDATA%%\\does_not_exist",
"\nDetect=HKCU\\Software\\does_not_exist\nDetectFile=%%APPDATA%%\\Microsoft"):
new_ini = test[0] + detect
new_test = [new_ini, ] + [x for x in test[1:]]
new_tests.append(new_test)
positive_tests = tests + new_tests
# execute positive tests
for test in positive_tests:
print('positive test: ', test)
self.assertEqual(len(test), 7)
(dirname, fname1, fname2, fbak) = self.setup_fake(test[1])
cleaner = self.ini2cleaner(test[0] % dirname)
self.assertEqual(test[2], cleaner.auto_hide())
self.r |
param object self: tthe object being initialised
@param object manager: the manager object that
@param str name: unique name for this object
@param dict configuration: parameters from the configuration file
@param dict callbacks: dictionary specifying functions to be run
on state machine transitions
"""
if config is None:
config = {}
if callbacks is None:
callbacks = {}
default_callbacks = {
'onactivate': self.on_activate,
'ondeactivate': self.on_deactivate
}
default_callbacks.update(callbacks)
# State machine definition
# the abbrivations for the event list are the following:
# name: event name,
# src: source state,
# dst: destination state
_baseStateList = {
'initial': 'deactivated',
'events': [
{'name': 'activate', 'src': 'deactivated', 'dst': 'idle'},
{'name': 'deactivate', 'src': 'idle', 'dst': 'deactivated'},
{'name': 'deactivate', 'src': 'running', 'dst': 'deactivated'},
{'name': 'run', 'src': 'idle', 'dst': 'running'},
{'name': 'stop', 'src': 'running', 'dst': 'idle'},
{'name': 'lock', 'src': 'idle', 'dst': 'locked'},
{'name': 'lock', 'src': 'running', 'dst': 'locked'},
{'name': 'block', 'src': 'idle', 'dst': 'blocked'},
{'name': 'block', 'src': 'running', 'dst': 'blocked'},
{'name': 'locktoblock', 'src': 'locked', 'dst': 'blocked'},
{'name': 'unlock', 'src': 'locked', 'dst': 'idle'},
{'name': 'unblock', 'src': 'blocked', 'dst': 'idle'},
{'name': 'runlock', 'src': 'locked', 'dst': 'running'},
{'name': 'runblock', 'src': 'blocked', 'dst': 'running'}
],
'callbacks': default_callbacks
}
# Initialise state machine:
if qtpy.PYQT4 or qtpy.PYSIDE:
QtCore.QObject.__init__(self)
Fysom.__init__(self, _baseStateList)
else:
super().__init__(cfg=_baseStateList, **kwargs)
# add connection base
self.connector = OrderedDict()
self.connector['in'] = OrderedDict()
for con in self._in:
self.connector['in'][con] = OrderedDict()
self.connector['in'][con]['class'] = self._in[con]
self.connector['in'][con]['object'] = None
self.connector['out'] = OrderedDict()
for con in self._out:
self.connector['out'][con] = OrderedDict()
self.connector['out'][con]['class'] = self._out[con]
self._manager = manager
self._name = name
self._configuration = config
self._statusVariables = OrderedDict()
# self.sigStateChanged.connect(lambda x: print(x.event, x.fsm._name))
def __getattr__(self, name):
"""
Attribute getter.
We'll reimplement it here because otherwise only __getattr__ of the
first base class (QObject) is called and the second base class is
never looked up.
Here we look up the first base class first and if the attribute is
not found, we'll look into the second base class.
"""
try:
return QtCore.QObject.__getattr__(self, name)
except AttributeError:
pass
return Fysom.__getattr__(self, name)
@property
def log(self):
"""
Returns a logger object
"""
return logging.getLogger("{0}.{1}".format(
self.__module__, self.__class__.__name__))
@QtCore.Slot(result=bool)
def _wrap_activation(self):
self.log.debug('Activation in thread {0}'.format(QtCore.QThread.currentThreadId()))
try:
self.activate()
except:
self.log.exception('Error during activation')
return False
return True
@QtCore.Slot(result=bool)
def _wrap_deactivation(self):
self.log.debug('Deactivation in thread {0}'.format(QtCore.QThread.currentThreadId()))
try:
self.deactivate()
except:
self.log.exception('Error during activation:')
return False
return True
def on_activate(self, e):
""" Method called when module is activated. If not overridden
this method returns an error.
@param object e: Fysom state change descriptor
"""
self.log.error('Please implement and specify the activation method '
'for {0}.'.format(self.__class__.__name__))
def on_deactivate(self, e):
""" Method called when module is deactivated. If not overridden
this method returns an error.
@param object e: Fysom state change descriptor
"""
self.log.error('Please implement and specify the deactivation '
'method {0}.'.format(self.__class__.__name__))
# Do not replace these in subclasses
def onchangestate(self, e):
""" Fysom callback for state transition.
@param object e: Fysom state transition description
"""
self.sigStateChanged.emit(e)
def getStatusVariables(self):
""" Return a dict of variable names and their content representing
the module state for saving.
@return dict: variable names and contents.
"""
return self._statusVariables
def setStatusVariables(self, variableDict):
""" Give a module a dict of variable names and their content
representing the mod | ule state.
@param OrderedDict dict: variable names and contents.
"""
if not isinstance(variableDict, (dict, OrderedDict)):
self.log.error('Did not pass a dict o | r OrderedDict to '
'setStatusVariables in {0}.'.format(
self.__class__.__name__))
return
self._statusVariables = variableDict
def getState(self):
"""Return the state of the state machine implemented in this class.
@return str: state of state machine
Valid return values are: 'deactivated', 'idle', 'running', 'locked',
'blocked'
"""
return self.current
def getConfiguration(self):
""" Return the configration dictionary for this module.
@return dict: confiuration dictionary
"""
return self._configuration
def getConfigDirectory(self):
""" Return the configuration directory for the manager this module
belongs to.
@return str: path of configuration directory
"""
return self._manager.configDir
@staticmethod
def identify():
""" Return module id.
@return dict: id dictionary with modclass and modtype keys.
"""
return {moduleclass: _class, moduletype: _modtype}
def get_main_dir(self):
""" Returns the absolut path to the directory of the main software.
@return string: path to the main tree of the software
"""
mainpath = os.path.abspath(
os.path.join(os.path.dirname(__file__), ".."))
return mainpath
def get_home_dir(self):
""" Returns the path to the home directory, which should definitely
exist.
@return string: absolute path to the home directory
"""
return os.path.abspath(os.path.expanduser('~'))
def get_in_connector(self, connector_name):
""" Return module connected to the given named connector.
@param str connector_name: name of the connector
@return obj: module that is connected to the named connector
"""
obj = self.connector['in'][connector_name]['object']
if obj is None:
|
if self.emittance_aim >= self.bucket_area:
self.tb1.append("Chosen emittance too large for this bucket. Aborting!")
raise RuntimeError("Chosen emittance too large for this bucket. Aborting!")
self.tb1.append("Calculating 4-sigma bunch length for an emittance of "
+np.str(self.emittance_aim)+" eVs")
# Make a guess, iterate to get closer
self.tau = self.dt_max/2.
while (np.fabs((self.emittance - self.emittance_aim)
/self.emittance_aim) > 0.001):
self.tau *= np.sqrt(self.emittance_aim/self.emittance)
self.tb1.append(" Bunch length is: "+np.str(self.tau*1.e9)+" ns")
self.tb1.append(" Corresponding matched rms relative momentum offset: "+
np.str(self.delta_b)+"")
self.tb1.append(" Emittance contour in phase: "+
np.str(self.phi_b)+" rad")
def setupUi(self, mainWindow):
mainWindow.setObjectName("mainWindow")
mainWindow.resize(586, 611)
mainWindow.setWindowOpacity(1.0)
mainWindow.setFixedSize(mainWindow.size())
# Label "Machine/Optics"
self.lbMachine = QtWidgets.QLabel(mainWindow)
self.lbMachine.setGeometry(QtCore.QRect(20, 20, 120, 17))
self.lbMachine.setMinimumSize(QtCore.QSize(70, 0))
self.lbMachine.setMaximumSize(QtCore.QSize(16777215, 17))
self.lbMachine.setObjectName("lbMachine")
# Label "Energy"
self.lbEnergy = QtWidgets.QLabel(mainWindow)
self.lbEnergy.setGeometry(QtCore.QRect(20, 80, 70, 17))
self.lbEnergy.setObjectName("lbEnergy")
# Custom energy box
self.leCustom = QtWidgets.QLineEdit(mainWindow)
self.leCustom.setEnabled(True)
self.leCustom.setGeometry(QtCore.QRect(145, 100, 70, 25))
self.leCustom.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-color: rgb(0, 0, 0);")
self.leCustom.hide()
self.leCustom.setText("")
self.leCustom.setObjectName("leCustom")
# Custom energy label (unit)
self.lbEV1 = QtWidgets.QLabel(mainWindow)
self.lbEV1.setEnabled(True)
self.lbEV1.setGeometry(QtCore.QRect(220, 100, 30, 25))
self.lbEV1.setObjectName("lbEV1")
self.lbEV1.hide()
# Label "Gamma Transition"
self.rbGammaT = QtWidgets.QLabel(mainWindow)
self.rbGammaT.setGeometry(QtCore.QRect(260, 80, 120, 17))
self.rbGammaT.setObjectName("rbGammaT")
# Custom gamma_t box
self.reCustom = QtWidgets.QLineEdit(mainWindow)
self.reCustom.setEnabled(True)
self.reCustom.setGeometry(QtCore.QRect(385, 100, 70, 25))
self.reCustom.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-color: rgb(0, 0, 0);")
self.reCustom.hide()
self.reCustom.setText("")
self.reCustom.setObjectName("reCustom")
# Label "Voltage" with units
self.lbVoltage = QtWidgets.QLabel(mainWindow)
self.lbVoltage.setGeometry(QtCore.QRect(20, 160, 70, 25))
self.lbVoltage.setObjectName("lbVoltage")
self.lbEV2 = QtWidgets.QLabel(mainWindow)
self.lbEV2.setGeometry(QtCore.QRect(150, 160, 31, 25))
self.lbEV2.setObjectName("lbEV2")
self.leVoltage = QtWidgets.QLineEdit(mainWindow)
self.leVoltage.setGeometry(QtCore.QRect(80, 155, 70, 25))
self.leVoltage.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-color: rgb(0, 0, 0);")
self.leVoltage.setText("")
self.leVoltage | .setObjectName("leVoltage")
| # Label "Optional"
self.lbOptional = QtWidgets.QLabel(mainWindow)
self.lbOptional.setGeometry(QtCore.QRect(20, 230, 70, 17))
self.lbOptional.setObjectName("lbOptional")
# Label "Emittance" with units
self.leEmittance = QtWidgets.QLineEdit(mainWindow)
self.leEmittance.setGeometry(QtCore.QRect(130, 270, 70, 25))
self.leEmittance.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-color: rgb(0, 0, 0);")
self.leEmittance.setText("")
self.leEmittance.setObjectName("leEmittance")
self.lbEVS1 = QtWidgets.QLabel(mainWindow)
self.lbEVS1.setGeometry(QtCore.QRect(200, 275, 41, 25))
self.lbEVS1.setObjectName("lbEVS1")
self.lbEVS2 = QtWidgets.QLabel(mainWindow)
self.lbEVS2.setGeometry(QtCore.QRect(330, 275, 41, 25))
self.lbEVS2.setObjectName("lbEVS2")
# Label "Bunch Length" with units
self.leBunchLength = QtWidgets.QLineEdit(mainWindow)
self.leBunchLength.setGeometry(QtCore.QRect(260, 270, 70, 25))
self.leBunchLength.setStyleSheet("background-color: rgb(255, 255, 255);\n"
"border-color: rgb(0, 0, 0);")
self.leBunchLength.setText("")
self.leBunchLength.setObjectName("leBunchLength")
# "Submit" button
self.pbSubmit = QtWidgets.QPushButton(mainWindow)
self.pbSubmit.setGeometry(QtCore.QRect(230, 320, 101, 27))
self.pbSubmit.setObjectName("pbSumbit")
self.tb1 = QtWidgets.QTextBrowser(mainWindow)
self.tb1.setGeometry(QtCore.QRect(10, 350, 561, 241))
self.tb1.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
self.tb1.setObjectName("tb1")
# Drop-down menus Machine/Optics, Energy, Gamma Transition
self.cbMachine = QtWidgets.QComboBox(mainWindow)
self.cbMachine.setGeometry(QtCore.QRect(20, 40, 115, 25))
self.cbMachine.setEditable(False)
self.cbMachine.setObjectName("cbMachine")
for i in range(len(gamma_ts)):
self.cbMachine.addItem("")
self.cbEnergy = QtWidgets.QComboBox(mainWindow)
self.cbEnergy.setGeometry(QtCore.QRect(20, 100, 115, 25))
self.cbEnergy.setObjectName("cbEnergy")
self.cbEnergy.addItem("")
self.cbEnergy.addItem("")
self.cbEnergy.addItem("")
self.cbGammaT = QtWidgets.QComboBox(mainWindow)
self.cbGammaT.setGeometry(QtCore.QRect(260, 100, 115, 25))
self.cbGammaT.setObjectName("cbGammaT")
self.cbGammaT.addItem("")
self.cbGammaT.addItem("")
# Radio button Bunch Length
self.rbBunchLength = QtWidgets.QRadioButton(mainWindow)
self.rbBunchLength.setGeometry(QtCore.QRect(260, 250, 140, 22))
self.rbBunchLength.setObjectName("rbBunchLength")
# Radio button Emittance
self.rbEmittance = QtWidgets.QRadioButton(mainWindow)
self.rbEmittance.setGeometry(QtCore.QRect(130, 250, 100, 22))
self.rbEmittance.setObjectName("rbEmittance")
# Radio button No option
self.rbNoOption = QtWidgets.QRadioButton(mainWindow)
self.rbNoOption.setGeometry(QtCore.QRect(20, 250, 100, 22))
self.rbNoOption.setObjectName('rbNoOption')
self.rbNoOption.setChecked(True)
self.retranslateUi(mainWindow)
QtCore.QMetaObject.connectSlotsByName(mainWindow)
self.addactions(mainWindow)
def retranslateUi(self, mainWindow):
_translate = QtCore.QCoreApplication.translate
# Label texts
mainWindow.setWindowTitle(_translate("mainWindow", "Bunch Parameter Calculator"))
self.lbMachine.setText(_translate("mainWindow", "Machine, Optics"))
self.lbEnergy.setText(_translate("mainWindow", "Energy"))
self.lbEV1.setText(_translate("mainWindow", "[eV]"))
self.rbGammaT.setText(_translate("mainWindow", "Transition Gamma"))
self.lbVoltage.setText(_translate("mainWindow", "Voltage"))
self.lbEV2.setText(_translate("mainWindow", "[V]"))
self.lbOptional.setText(_translate("mainWindow", "Optional"))
self.rbEmittance.setText(_translate("mainWindow", "Emittance"))
self.lbEVS1.setText(_translate("mainWindow", "[eVs]"))
self.lbEVS2.setText(_translate("mainWindow", "[s] |
= None
self.sent_bytes = None
self.request = None
self.user_agent = None
self.ssl_cipher = None
self.ssl_protocol = None
self.destination_group_arn = None
self.trace_identifier = None
self.domain_name = None
self.chosen_cert_arn = None
self.matched_rule_priority = None
self.actions_executed = None
self.redirect_url = None
self.error_reason = None
self.destination_list = None
self.destination_status_code_list = None
self.classification = None
self.classification_reason = None
class AWSELBParser(text_parser.PyparsingSingleLineTextParser):
"""Parses an AWS ELB access log file."""
NAME = 'aws_elb_access'
DATA_FORMAT = 'AWS ELB Access log file'
MAX_LINE_LENGTH = 3000
_ENCODING = 'utf-8'
BLANK = pyparsing.Literal('"-"')
_WORD = pyparsing.Word(pyparsing.printables) | BLANK
_QUOTE_INTEGER = (
pyparsing.OneOrMore('"') + text_parser.PyparsingConstants.INTEGER | BLANK)
_INTEGER = text_parser.PyparsingConstants.INTEGER | BLANK
_FLOAT = pyparsing.Word(pyparsing.nums + '.')
_PORT = pyparsing.Word(pyparsing.nums, max=6).setParseAction(
text_parser.ConvertTokenToInteger) | BLANK
_CLIENT_IP_ADDRESS_PORT = pyparsing.Group(
text_parser.PyparsingConstants.IP_ADDRESS('source_ip_address') +
pyparsing.Suppress(':') + _PORT('source_port') | BLANK)
_DESTINATION_IP_ADDRESS_PORT = pyparsing.Group(
text_parser.PyparsingConstants.IP_ADDRESS('destination_ip_address') +
pyparsing.Suppress(':') + _PORT('destination_port') | BLANK)
_DATE_TIME_ISOFORMAT_STRING = pyparsing.Combine(
pyparsing.Word(pyparsing.nums, exact=4) + pyparsing.Literal('-') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('-') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('T') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal(':') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal(':') +
pyparsing.Word(pyparsing.nums, exact=2) + pyparsing.Literal('.') +
pyparsing.Word(pyparsing.nums, exact=6) + pyparsing.Literal('Z'))
# A log line is defined as in the AWS ELB documentation
_LOG_LINE = (
_WORD.setResultsName('request_type') +
_DATE_TIME_ISOFORMAT_STRING.setResultsName('time') +
_WORD.setResultsName('resource_identifier') +
_CLIENT_IP_ADDRESS_PORT.setResultsName('source_ip_port') +
_DESTINATION_IP_ADDRESS_PORT.setResultsName('destination_ip_port') +
_FLOAT.setResultsName('request_processing_time') +
_FLOAT.setResultsName('destination_processing_time') +
_FLOAT.setResultsName('response_processing_time') +
_INTEGER.setResultsName('elb_status_code') +
_INTEGER.setResultsName('destination_status_code') +
_INTEGER.setResultsName('received_bytes') +
_INTEGER.setResultsName('sent_bytes') +
pyparsing.quotedString.setResultsName('request')
.setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName('user_agent')
.setParseAction(pyparsing.removeQuotes) +
_WORD.setResultsName('ssl_cipher') +
_WORD.setResultsName('ssl_protocol') +
_WORD.setResultsName('destination_group_arn') +
_WORD.setResultsName('trace_identifier') +
pyparsing.quotedString.setResultsName(
'domain_name').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'chosen_cert_arn').setParseAction(pyparsing.removeQuotes) +
_INTEGER.setResultsName('matched_rule_priority') +
_DATE_TIME_ISOFORMAT_STRING.setResultsName('request_creation_time') +
pyparsing.quotedString.setResultsName(
'actions_executed').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'redirect_url').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'error_reason').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'destination_list').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'destination_status_code_list').setParseAction(
pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'classification').setParseAction(pyparsing.removeQuotes) +
pyparsing.quotedString.setResultsName(
'classification_reason').setParseAction(pyparsing.removeQuotes)
)
LINE_STRUCTURES = [('elb_accesslog', _LOG_LINE)]
def _GetValueFromGroup(self, structure, name, key_name):
"""Retrieves a value from a Pyparsing.Group structure.
Args:
structure (pyparsing.ParseResults): tokens from a parsed log line.
name (str): name of the token.
key_name (str): key name to retrieve the value of.
Returns:
object: value for the specified key.
"""
structure_value = self._GetValueFromStructure(structure, name)
return structure_value.get(key_name)
def _GetDateTime(self, parser_mediator, time_structure):
"""Retrur | ns a dfdatetime object from a timestamp.
Args:
parser_medi | ator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
time_structure (str): a timestamp string of the event.
Returns:
TimeElements: Time elements contain separate values for year, month,
day of month, hours, minutes and seconds.
"""
date_time = None
try:
date_time = dfdatetime_time_elements.TimeElements()
date_time.CopyFromStringISO8601(time_structure)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'invalid date time value: {0!s}'.format(time_structure))
return date_time
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a log record structure and produces events.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfVFS.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure parsed from the log file.
Raises:
ParseError: when the structure type is unsupported.
"""
if key != 'elb_accesslog':
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
time_response_sent = structure.get('time')
time_request_received = structure.get('request_creation_time')
date_time_response_sent = self._GetDateTime(
parser_mediator, time_response_sent)
date_time_request_received = self._GetDateTime(
parser_mediator, time_request_received)
if date_time_request_received is None or date_time_response_sent is None:
return
event_data = AWSELBEventData()
event_data.request_type = self._GetValueFromStructure(
structure, 'request_type')
event_data.resource_identifier = self._GetValueFromStructure(
structure, 'resource_identifier')
event_data.source_ip_address = self._GetValueFromGroup(
structure, 'source_ip_port', 'source_ip_address')
event_data.source_port = self._GetValueFromGroup(
structure, 'source_ip_port', 'source_port')
event_data.destination_ip_address = self._GetValueFromGroup(
structure, 'destination_ip_port', 'destination_ip_address')
event_data.destination_port = self._GetValueFromGroup(
structure, 'destination_ip_port', 'destination_port')
event_data.request_processing_time = self._GetValueFromStructure(
structure, 'request_processing_time')
event_data.destination_processing_time = self._GetValueFromStructure(
structure, 'destination_processing_time')
event_data.response_processing_time = self._GetValueFromStructure(
structure, 'response_processing_time')
event_data.elb_status_code = self._GetValueFromStructure(
structure, 'elb_status_code')
event_data.destination_status_code = self._GetValueFromStructure(
structure, 'destination_status_code')
event_data.received_bytes = self._GetValueFromStructure(
structure, 'received_bytes')
|
def extractStealtranslationHomeBlog(item):
'''
Parser for 'stealtranslation.home.blog'
''' |
return Non | e |
import utils
import os
| import shutil
import sys
def go( boost_root ):
OUTPUT = "src/third_party/boost"
if os.path.exists( OUTPUT ):
shutil.rmtree( OUTPUT )
cmd = [ "bcp" , "--scan" , "--boost=%s" % boost_root ]
src = utils.getAllSourceFiles()
cmd += src
cmd.append( OUTPUT )
if not os.path.exists( OUTPUT ) | :
os.makedirs( OUTPUT )
res = utils.execsys( cmd )
out = open( OUTPUT + "/bcp-out.txt" , 'w' )
out.write( res[0] )
out.close()
out = open( OUTPUT + "/notes.txt" , 'w' )
out.write( "command: " + " ".join( cmd ) )
out.close()
print( res[1] )
if __name__ == "__main__":
if len(sys.argv) == 1:
print( "usage: python %s <boost root directory>" % sys.argv[0] )
sys.exit(1)
go( sys.argv[1] )
|
def save_model_as(X, columns, model, save_model, flatten):
'''Model Saver
WHAT: Saves a trained model so it can be loaded later
for predictions by predictor().
'''
model_json = model.to_json()
with open(save_model+".json", "w") as json_file:
json_file.write(model_json)
model.save_weights(save_model+".h5")
print("Model" + " " | + save_model + " " + "have been saved.")
temp = ""
f = open(save_model+".x", "w+")
# for a range of columns (two ints)
if type(X) == list:
| if len(X) == 2:
if type(X[0]) == int:
for i in range(X[0], X[1]):
try:
temp += columns[i] + " "
except:
pass
# for multiple column index
if type(X) == list:
if len(X) > 2:
if type(X[0]) == int:
for i in X:
temp += columns[i] + " "
# for multiple column labels
if type(X) == list:
if type(X[0]) == str:
for i in X:
temp += i+" "
temp = temp[:-1]
# for an integer as column name (int)
if type(X) == int:
temp = columns[X]
# for a single column label which contains string values
if type(X) == str:
temp = X
temp += " "+str(flatten)
f.write(temp)
f.close()
|
########
def install_and_import(package):
import importlib
try:
importlib.import_module(package)
except ImportError:
try:
import pip
except ImportError:
print "no pip"
os.system('python get_pip.py')
finally:
import pip
pip.main(['install', package])
finally:
globals()[package] = importlib.import_module(package)
#os is one of the modules that I know comes with 2.7, no questions asked.
import os
#these other ones I a am not so sure of. Thus the install function.
install_and_import("requests")
install_and_import("subprocess")
install_and_import("json")
install_and_import("sys")
install_and_import("time")
install_and_import("shutil")
install_and_import("urlparse")
install_and_import("itertools")
from commands import *
from files_processing import *
from constants import moses_dir_fn
from Ui_mosesDialog import MosesDialog
UI_INFO = """
<ui>
<menubar name='MenuBar'>
<menu action='VisualsMenu'>
<menu action='Visuals'>
<menuitem action='metro'/>
<menuitem action='paper'/>
<separator />
<menuitem action='lights_on_option'/>
</menu>
</menu>
</menubar>
</ui>
"""
class MTTTCore():
def __init__(self):
# Recognize OS
if os.name == 'posix': # Linux
self.is_linux, self.is_windows = True, False
elif os.name == 'nt': # Windows
self.is_linux, self.is_windows = False, True
else:
print "Unknown OS"
exit(1)
# Check Moses Config file.
self.moses_dir = ""
try:
f = open(moses_dir_fn, 'r')
self.moses_dir = f.read()
f.close()
except IOError, OSError:
# File does not exist.
self.moses_dir = self.get_moses_dir()
f = open(moses_dir_fn, 'w')
f.write(self.moses_dir)
f.close()
finally:
# File content is wrong
if not self.is_moses_dir_valid(self.moses_dir):
moses_dir = self.get_moses_dir()
f = open(moses_dir_fn, 'w')
f.write(self.moses_dir)
f.close()
self.saved_absolute_path = os.path.abspath("saved")
self.saved_relative_filepath = "./saved"
if not os.path.exists(self.saved_absolute_path):
os.makedirs(self.saved_absolute_path)
# Init
self.source_lang = None
self.t | arget_lang = None
self.output_text= None
self.cwd = os.getcwd()
def is_moses_dir_valid(self, directory):
is_valid = True
| if directory == "":
is_valid = False # Empty string
elif not os.path.exists(directory):
is_valid = False # Directory does not exist
else:
# Check if dir exists but does not contain moses installation
is_valid = self._check_moses_installation(directory)
return is_valid
def _check_moses_installation(self, directory):
# TODO: TRY catch OSError when permission denied!!
file_content = [f for f in os.listdir(directory)]
moses_files = ["/scripts/tokenizer/tokenizer.perl",
"/scripts/recaser/truecase.perl",
"/scripts/training/clean-corpus-n.perl",
"/bin/lmplz",
"/bin/build_binary",
"/scripts/training/train-model.perl",
"/bin/moses"
]
if self.is_windows:
moses_files = [f.replace("/", "\\")
for f in moses_files]
moses_files = [f + ".exe"
for f in moses_files
if "/bin" in f]
is_valid = True
for mfile in moses_files:
is_valid = is_valid and os.path.isfile(directory + mfile)
return is_valid
def get_moses_dir(self):
"""
Gets Moses directory.
"""
moses = MosesDialog()
self.moses_dir = moses.detect()
return self.moses_dir
def _prepare_corpus(self, output_text, source_lang, target_lang, st_train, tt_train, lm_text):
self.output_text = str(output_text)
self.source_lang = str(source_lang)
self.target_lang = str(target_lang)
self.lm_text = str(lm_text)
self.tt_train = str(tt_train)
self.st_train = str(st_train)
output_directory = adapt_path_for_cygwin(self.is_windows, self.output_text)
return_text = ""
if output_directory is not None:
# Change directory to the output_directory.
try:
os.chdir(self.output_text)
except:
# Output directory does not exist.
os.mkdir(self.output_text)
os.chdir(self.output_text)
cmds = []
# 1) Tokenization
# a) Target text
target_tok = generate_input_tok_fn(self.target_lang,
output_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.target_lang,
adapt_path_for_cygwin(self.is_windows,self.tt_train),
target_tok))
# b) Source text
source_tok = generate_input_tok_fn(self.source_lang,
output_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.source_lang,
adapt_path_for_cygwin(self.is_windows,self.st_train),
source_tok))
# c) Language model
lm_tok = generate_lm_tok_fn(output_directory)
cmds.append(get_tokenize_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
self.source_lang,
adapt_path_for_cygwin(self.is_windows,self.lm_text),
lm_tok))
# 2) Truecaser training
# a) Target text
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
target_tok))
# b) Source text
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
source_tok))
# c) Language model
cmds.append(get_truecaser_train_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
lm_tok))
# 3) Truecaser
input_true = output_directory + "/input.true"
# a) Target text
target_true = generate_input_true_fn(self.target_lang,
output_directory)
cmds.append(get_truecaser_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
target_tok,
target_true))
# b) Source text
source_true = generate_input_true_fn(self.source_lang,
output_directory)
cmds.append(get_truecaser_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
source_tok,
source_true))
# c) Language model
self.lm_true = lm_true = generate_lm_true_fn(output_directory)
cmds.append(get_truecaser_command(adapt_path_for_cygwin(self.is_windows, self.moses_dir),
target_tok, lm_true))
# 4) Cleaner
# a) Target text
self.input_clean = inpu |
'''
Proxy for drivers.
Copyright (c) 2009, 2013 Peter Parente
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
'''
import sys
import traceback
import weakref
class DriverProxy(object):
'''
Proxy to a driver implementation.
@ivar _module: Module containing the driver implementation
@type _module: module
@ivar _engine: Reference to the engine that owns the driver
@type _engine: L{engine.Engine}
@ivar _queue: Queue of commands outstanding for the driver
@type _queue: list
@ivar _busy: True when the driver is busy processing a command, False when
not
@type _busy: bool
@ivar _name: Name associated with the current utterance
@type _name: str
@ivar _debug: Debugging output enabled or not
@type _debug: bool
@ivar _iterator: Driver iterator to invoke when in an external run loop
@type _iterator: iterator
'''
def __init__(self, engine, driverName, debug):
'''
Constructor.
@param engine: Reference to the engine that owns the driver
@type engine: L{engine.Engine}
@param driverName: Name of the driver module to use under drivers/ or
None to select the default for the platform
@type driverName: str
@param debug: Debugging output enabled or not
@type debug: bool
'''
if driverName is None:
# pick default driver for common platforms
if sys.platform == 'darwin':
driverName = 'nsss'
elif sys.platform == 'win32':
driverName = 'sapi5'
else:
driverName = 'espeak'
# import driver module
name = 'drivers.%s' % driverName
self._module = __import__(name, globals(), locals(), [driverName])
# build driver instance
self._driver = self._module.buildDriver(weakref.proxy(self))
# initialize refs
self._engine = engine
self._queue = []
self._busy = True
self._name = None
self._iterator = None
self._debug = debug
def __del__(self):
try:
self._driver.destroy()
except (AttributeError, TypeError):
pass
def _push(self, mtd, args, name=None):
'''
Adds a command to the queue.
@param mtd: Method to invoke to process the command
@type mtd: method
@param args: Arguments to apply when invoking the method
@type args: tuple
@param name: Name associated with the command
@type name: str
'''
self._queue.append((mtd, args, name))
self._pump()
def _pump(self):
'''
Attempts to process the next command in the queue if one exists and the
driver is not currently busy.
'''
while (not self._busy) and len(self._queue):
cmd = self._queue.pop(0)
self._name = cmd[2]
try:
cmd[0](*cmd[1])
except Exception, e:
self.notify('error', exception=e)
if self._debug: traceback.print_exc()
def notify(self, topic, **kwargs):
'''
Sends a notification to the engine from the driver.
@param topic: Notification topic
@type topic: str
@param kwargs: Arbitrary keyword arguments
@type kwargs: dict
'''
kwargs['name'] = self._name
self._engine._notify(topic, **kwargs)
def setBusy(self, busy):
'''
Called by the driver to indicate it is busy.
@param busy: True when busy, false when idle
@type busy: bool
'''
self._busy = busy
if not self._busy:
self._pump()
def isBusy(self):
'''
@return: True if the driver is busy, false if not
@rtype: bool
'''
return self._busy
def say(self, text, name):
'''
Called by the engine to push a say command onto the queue.
@param text: Text to speak
@type text: unicode
@param name: Name to associate with the utterance
@type name: str
'''
self._push(self._driver.say, (text,), name)
def stop(self):
'''
Called by the engine to stop the current utterance and clear the queue
of commands.
'''
# clear queue up to first end loop command
while(True):
try:
mtd, args, name = self._queue[0]
except IndexError:
break
if(mtd == self._engine.endLoop): break
self._queue.pop(0)
self._driver.stop()
def getProperty(self, name):
'''
Called by the engine to get a driver property value.
@param name: Name of the property
@type name: str
@return: Property value
@rtype: object
'''
return self._driver.getProperty(name)
def setProperty(self, name, value):
'''
Called by the engine to set a driver property value.
@param name: Name of the property
@type name: str
@param value: Property value
| @type value: object
'''
self._push(self._driver.setProperty, (name, value))
def runAnd | Wait(self):
'''
Called by the engine to start an event loop, process all commands in
the queue at the start of the loop, and then exit the loop.
'''
self._push(self._engine.endLoop, tuple())
self._driver.startLoop()
def startLoop(self, useDriverLoop):
'''
Called by the engine to start an event loop.
'''
if useDriverLoop:
self._driver.startLoop()
else:
self._iterator = self._driver.iterate()
def endLoop(self, useDriverLoop):
'''
Called by the engine to stop an event loop.
'''
self._queue = []
self._driver.stop()
if useDriverLoop:
self._driver.endLoop()
else:
self._iterator = None
self.setBusy(True)
def iterate(self):
'''
Called by the engine to iterate driver commands and notifications from
within an external event loop.
'''
try:
self._iterator.next()
except StopIteration:
pass |
exception.InstanceInvalidState:
# Note(yufang521247): instance which has never been active
# is not allowed to be soft_deleted. Thus we have to call
# delete() to clean up the instance.
self.compute_api.delete(context, instance)
else:
self.compute_api.delete(context, instance)
@wsgi.serializers(xml=ServerTemplate)
def update(self, req, id, body):
"""Update server then pass on to version-specific controller."""
if not self.is_valid_body(body, 'server'):
raise exc.HTTPUnprocessableEntity()
ctxt = req.environ['nova.context']
update_dict = {}
if 'name' in body['server']:
name = body['server']['name']
self._validate_server_name(name)
update_dict['display_name'] = name.strip()
if 'accessIPv4' in body['server']:
access_ipv4 = body['server']['accessIPv4']
if access_ipv4:
self._validate_access_ipv4(access_ipv4)
update_dict['access_ip_v4'] = (
access_ipv4 and access_ipv4.strip() or None)
if 'accessIPv6' in body['server']:
access_ipv6 = body['server']['accessIPv6']
if access_ipv6:
self._validate_access_ipv6(access_ipv6)
update_dict['access_ip_v6'] = (
access_ipv6 and access_ipv6.strip() or None)
if 'auto_disk_config' in body['server']:
auto_disk_config = strutils.bool_from_string(
body['server']['auto_disk_config'])
update_dict['auto_disk_config'] = auto_disk_config
if 'hostId' in body['server']:
msg = _("HostId cannot be updated.")
raise exc.HTTPBadRequest(explanation=msg)
if 'personality' in body['server']:
msg = _("Personality cannot be updated.")
| raise exc.HTTPBadRequest(explanation=msg)
try:
instance = self.compute_api.get(ctxt, id,
want_objects=True)
req.cac | he_db_instance(instance)
policy.enforce(ctxt, 'compute:update', instance)
instance.update(update_dict)
instance.save()
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
return self._view_builder.show(req, instance)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('confirmResize')
def _action_confirm_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.confirm_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'confirmResize')
return exc.HTTPNoContent()
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('revertResize')
def _action_revert_resize(self, req, id, body):
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.revert_resize(context, instance)
except exception.MigrationNotFound:
msg = _("Instance has not been resized.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound:
msg = _("Flavor used by the instance could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'revertResize')
return webob.Response(status_int=202)
@wsgi.response(202)
@wsgi.serializers(xml=FullServerTemplate)
@wsgi.deserializers(xml=ActionDeserializer)
@wsgi.action('reboot')
def _action_reboot(self, req, id, body):
if 'reboot' in body and 'type' in body['reboot']:
if not isinstance(body['reboot']['type'], six.string_types):
msg = _("Argument 'type' for reboot must be a string")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
valid_reboot_types = ['HARD', 'SOFT']
reboot_type = body['reboot']['type'].upper()
if not valid_reboot_types.count(reboot_type):
msg = _("Argument 'type' for reboot is not HARD or SOFT")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
else:
msg = _("Missing argument 'type' for reboot")
LOG.error(msg)
raise exc.HTTPBadRequest(explanation=msg)
context = req.environ['nova.context']
instance = self._get_server(context, req, id)
try:
self.compute_api.reboot(context, instance, reboot_type)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'reboot')
return webob.Response(status_int=202)
def _resize(self, req, instance_id, flavor_id, **kwargs):
"""Begin the resize process with given instance/flavor."""
context = req.environ["nova.context"]
instance = self._get_server(context, req, instance_id)
try:
self.compute_api.resize(context, instance, flavor_id, **kwargs)
except exception.QuotaError as error:
raise exc.HTTPRequestEntityTooLarge(
explanation=error.format_message(),
headers={'Retry-After': 0})
except exception.FlavorNotFound:
msg = _("Unable to locate requested flavor.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.CannotResizeToSameFlavor:
msg = _("Resize requires a flavor change.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'resize')
except exception.ImageNotAuthorized:
msg = _("You are not authorized to access the image "
"the instance was started with.")
raise exc.HTTPUnauthorized(explanation=msg)
except exception.ImageNotFound:
msg = _("Image that the instance was started "
"with could not be found.")
raise exc.HTTPBadRequest(explanation=msg)
except exception.Invalid:
msg = _("Invalid instance image.")
raise exc.HTTPBadRequest(explanation=msg)
return webob.Response(status_int=202)
@wsgi.response(204)
def delete(self, req, id):
"""Destroys a server."""
try:
self._delete(req.environ['nova.context'], req, id)
except exception.NotFound:
msg = _("Instance could not be found")
raise exc.HTTPNotFound(explanation=msg)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'delete')
def _image_ref_from_req_data |
"""
This config file extends the test environment configuration
so that we can run the lettuce acceptance tests.
"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .test import *
from .sauce import *
# You need to start the server in debug mode,
# otherwise the browser will not render the pages correctly
DEBUG = True
SITE_NAME = 'localhost:{}'.format(LETTUCE_SERVER_PORT)
# Output Django logs to a fi | le
import logging
logging.basicConfig(filename=TEST_ROOT / "log" / "lms_acceptance.log", level=logging.ERROR)
# set root logger level
logging.getLogger().setLevel(logging.ERROR)
import os
from random import choice
def seed( | ):
return os.getppid()
# Silence noisy logs
LOG_OVERRIDES = [
('track.middleware', logging.CRITICAL),
('codejail.safe_exec', logging.ERROR),
('edx.courseware', logging.ERROR),
('audit', logging.ERROR),
('instructor_task.api_helper', logging.ERROR),
]
for log_name, log_level in LOG_OVERRIDES:
logging.getLogger(log_name).setLevel(log_level)
update_module_store_settings(
MODULESTORE,
doc_store_settings={
'db': 'acceptance_xmodule',
'collection': 'acceptance_modulestore_%s' % seed(),
},
module_store_options={
'fs_root': TEST_ROOT / "data",
},
default_store=os.environ.get('DEFAULT_STORE', 'draft'),
)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'acceptance_xcontent_%s' % seed(),
}
}
# Set this up so that 'paver lms --settings=acceptance' and running the
# harvest command both use the same (test) database
# which they can flush without messing up your dev db
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': TEST_ROOT / "db" / "test_edx.db",
'TEST_NAME': TEST_ROOT / "db" / "test_edx.db",
'OPTIONS': {
'timeout': 30,
},
'ATOMIC_REQUESTS': True,
}
}
TRACKING_BACKENDS.update({
'mongo': {
'ENGINE': 'track.backends.mongodb.MongoBackend'
}
})
EVENT_TRACKING_BACKENDS['tracking_logs']['OPTIONS']['backends'].update({
'mongo': {
'ENGINE': 'eventtracking.backends.mongodb.MongoBackend',
'OPTIONS': {
'database': 'track'
}
}
})
BULK_EMAIL_DEFAULT_FROM_EMAIL = "test@test.org"
# Forums are disabled in test.py to speed up unit tests, but we do not have
# per-test control for lettuce acceptance tests.
# If you are writing an acceptance test that needs the discussion service enabled,
# do not write it in lettuce, but instead write it using bok-choy.
# DO NOT CHANGE THIS SETTING HERE.
FEATURES['ENABLE_DISCUSSION_SERVICE'] = False
# Use the auto_auth workflow for creating users and logging them in
FEATURES['AUTOMATIC_AUTH_FOR_TESTING'] = True
# Enable third-party authentication
FEATURES['ENABLE_THIRD_PARTY_AUTH'] = True
THIRD_PARTY_AUTH = {
"Google": {
"SOCIAL_AUTH_GOOGLE_OAUTH2_KEY": "test",
"SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET": "test"
},
"Facebook": {
"SOCIAL_AUTH_FACEBOOK_KEY": "test",
"SOCIAL_AUTH_FACEBOOK_SECRET": "test"
}
}
# Enable fake payment processing page
FEATURES['ENABLE_PAYMENT_FAKE'] = True
# Enable email on the instructor dash
FEATURES['ENABLE_INSTRUCTOR_EMAIL'] = True
FEATURES['REQUIRE_COURSE_EMAIL_AUTH'] = False
FEATURES['ENABLE_SPECIAL_EXAMS'] = True
# Don't actually send any requests to Software Secure for student identity
# verification.
FEATURES['AUTOMATIC_VERIFY_STUDENT_IDENTITY_FOR_TESTING'] = True
# HACK
# Setting this flag to false causes imports to not load correctly in the lettuce python files
# We do not yet understand why this occurs. Setting this to true is a stopgap measure
USE_I18N = True
FEATURES['ENABLE_FEEDBACK_SUBMISSION'] = False
# Include the lettuce app for acceptance testing, including the 'harvest' django-admin command
INSTALLED_APPS += ('lettuce.django',)
LETTUCE_APPS = ('courseware', 'instructor')
# Lettuce appears to have a bug that causes it to search
# `instructor_task` when we specify the `instructor` app.
# This causes some pretty cryptic errors as lettuce tries
# to parse files in `instructor_task` as features.
# As a quick workaround, explicitly exclude the `instructor_task` app.
LETTUCE_AVOID_APPS = ('instructor_task',)
LETTUCE_BROWSER = os.environ.get('LETTUCE_BROWSER', 'chrome')
# Where to run: local, saucelabs, or grid
LETTUCE_SELENIUM_CLIENT = os.environ.get('LETTUCE_SELENIUM_CLIENT', 'local')
SELENIUM_GRID = {
'URL': 'http://127.0.0.1:4444/wd/hub',
'BROWSER': LETTUCE_BROWSER,
}
#####################################################################
# See if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
# Because an override for where to run will affect which ports to use,
# set these up after the local overrides.
# Configure XQueue interface to use our stub XQueue server
XQUEUE_INTERFACE = {
"url": "http://127.0.0.1:{0:d}".format(XQUEUE_PORT),
"django_auth": {
"username": "lms",
"password": "***REMOVED***"
},
"basic_auth": ('anant', 'agarwal'),
}
# Point the URL used to test YouTube availability to our stub YouTube server
YOUTUBE['API'] = "http://127.0.0.1:{0}/get_youtube_api/".format(YOUTUBE_PORT)
YOUTUBE['METADATA_URL'] = "http://127.0.0.1:{0}/test_youtube/".format(YOUTUBE_PORT)
YOUTUBE['TEXT_API']['url'] = "127.0.0.1:{0}/test_transcripts_youtube/".format(YOUTUBE_PORT)
if FEATURES.get('ENABLE_COURSEWARE_SEARCH') or \
FEATURES.get('ENABLE_DASHBOARD_SEARCH') or \
FEATURES.get('ENABLE_COURSE_DISCOVERY'):
# Use MockSearchEngine as the search engine for test scenario
SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine"
# Generate a random UUID so that different runs of acceptance tests don't break each other
import uuid
SECRET_KEY = uuid.uuid4().hex
ANONYMOUS_ID_SECRET_KEY = SECRET_KEY
USERNAME_CIPHER_SECRET_KEY = SECRET_KEY
############################### PIPELINE #######################################
PIPELINE_ENABLED = False
# We want to make sure that any new migrations are run
# see https://groups.google.com/forum/#!msg/django-developers/PWPj3etj3-U/kCl6pMsQYYoJ
MIGRATION_MODULES = {}
|
# $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY | OF SUCH DAMAGE.
"""
Implements controller for GridFTP option pages.
"""
from datafinder.gui.admin.datastore_configuration_wizard.gridftp impo | rt performance_option_controller
from datafinder.gui.admin.datastore_configuration_wizard.gridftp import security_option_controller
__version__ = "$Revision-Id:$"
|
# Helpers for pytest tests
import subprocess
import json
import os
def find_cppcheck_binary():
possible_locations = [
"./cppcheck",
"./build/bin/cppcheck",
r".\bin\cppcheck.exe",
]
for location in possible_locations:
if os.path.exists(location):
break
else:
raise RuntimeError("Could not find cppcheck binary")
return location
def dump_create(fpath, *argv):
cppcheck_binary = find_cppcheck_binary()
cmd = [cppcheck_binary, "--dump", "-DDUMMY", "--quiet", fpath] + list(argv) |
p = subprocess.Popen(cmd)
p.communicate()
if p.returncode != 0:
raise OSError("cppcheck returns error code: %d" % p.returncode)
subprocess.Popen(["sync"])
def dump_remove(fpath):
subprocess.Popen(["rm", "-f", fpath | + ".dump"])
def convert_json_output(raw_json_strings):
"""Convert raw stdout/stderr cppcheck JSON output to python dict."""
json_output = {}
for line in raw_json_strings:
try:
json_line = json.loads(line)
# json_output[json_line['errorId']] = json_line
json_output.setdefault(json_line['errorId'], []).append(json_line)
except ValueError:
pass
return json_output
|
from django.contrib import admin
from .models import User
from application.models import (Contact, Personal, Wife, Occupation, Children,
Hod, Committee, UserCommittee, Legal)
# Register your models here.
class ContactInline(admin.StackedInline):
| model = Contact
class PersonalInline(admin.StackedInline):
model = Personal
class WifeInline(admin.StackedInline):
model = Wife
class OccupationInline(admin.StackedInline):
model = Occupation
class HodInline(admin.StackedInline):
model = Hod
class ChildrenInline(admin.StackedInline):
model = Children
class UserCommitteeInline(admin.StackedInline):
model = UserCommittee
class UserAdmin(admin.ModelAdmin):
inlines = [
ContactInline,
Per | sonalInline,
WifeInline,
OccupationInline,
HodInline,
ChildrenInline,
UserCommitteeInline
]
class LegalAdmin(admin.ModelAdmin):
model = Legal
admin.site.register(User, UserAdmin)
admin.site.register(Legal, LegalAdmin)
admin.site.site_header = 'Hebrew Order of David Administration'
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2009 Gary Burton
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
String mappings for constants
"""
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from ..lib import Person, Citation, FamilyRelType
from ..const import GRAMPS_LOCALE as glocale
_ = glocale.translation.sgettext
def _T_(value): # enable deferred translations (see Python docs 22.1.3.4)
return value
# _T_ is a gramps-defi | ned keyword -- see po/update_po.py and po/genpot.sh
#-------------------------------------------------------------------------
#
# Integer to String mappings for constants
#
#--------------------------- | ----------------------------------------------
gender = {
Person.MALE : _("male"),
Person.FEMALE : _("female"),
Person.UNKNOWN : _("gender|unknown"),
}
def format_gender( type):
return gender.get(type[0], _("Invalid"))
conf_strings = {
Citation.CONF_VERY_HIGH : _T_("Very High"),
Citation.CONF_HIGH : _T_("High"),
Citation.CONF_NORMAL : _T_("Normal"),
Citation.CONF_LOW : _T_("Low"),
Citation.CONF_VERY_LOW : _T_("Very Low"),
}
# note that a list /very/ similar to this is in EditCitation._setup_fields
# but that has the glocale's translated values since it is used in the UI
family_rel_descriptions = {
FamilyRelType.MARRIED : _("A legal or common-law relationship "
"between a husband and wife"),
FamilyRelType.UNMARRIED : _("No legal or common-law relationship "
"between man and woman"),
FamilyRelType.CIVIL_UNION : _("An established relationship between "
"members of the same sex"),
FamilyRelType.UNKNOWN : _("Unknown relationship between a man "
"and woman"),
FamilyRelType.CUSTOM : _("An unspecified relationship between "
"a man and woman"),
}
data_recover_msg = _('The data can only be recovered by Undo operation '
'or by quitting with abandoning changes.')
|
self.turnText.hide()
self.clockNode.stop()
self.clockNode.hide()
if winDirection == 0:
blinkList = self.findHorizontal(x, y, playerNum)
elif winDirection == 1:
blinkList = self.findVertical(x, y, playerNum)
elif winDirect | ion == 2:
blinkList = self.findDiagonal(x, y, playerNum)
if blinkList != []:
print blinkList
val0 = x * 7 + y
x = blinkList[0][0]
y = blinkList[0][1]
val1 = x * 7 + y
x = blinkList[1][0]
y = blinkList[1][1]
val2 = x * 7 + y
x = blinkList[2][0]
y = blinkList[2][1]
val3 = x * 7 + y
self.winningSeque | nce = Sequence()
downBlinkerParallel = Parallel(LerpColorInterval(self.locatorList[val0], 0.3, Vec4(0.5, 0.5, 0.5, 0.5), Vec4(1, 1, 1, 1)), LerpColorInterval(self.locatorList[val1], 0.3, Vec4(0.5, 0.5, 0.5, 0.5), Vec4(1, 1, 1, 1)), LerpColorInterval(self.locatorList[val2], 0.3, Vec4(0.5, 0.5, 0.5, 0.5), Vec4(1, 1, 1, 1)), LerpColorInterval(self.locatorList[val3], 0.3, Vec4(0.5, 0.5, 0.5, 0.5), Vec4(1, 1, 1, 1)))
upBlinkerParallel = Parallel(LerpColorInterval(self.locatorList[val0], 0.3, Vec4(1, 1, 1, 1), Vec4(0.5, 0.5, 0.5, 0.5)), LerpColorInterval(self.locatorList[val1], 0.3, Vec4(1, 1, 1, 1), Vec4(0.5, 0.5, 0.5, 0.5)), LerpColorInterval(self.locatorList[val2], 0.3, Vec4(1, 1, 1, 1), Vec4(0.5, 0.5, 0.5, 0.5)), LerpColorInterval(self.locatorList[val3], 0.3, Vec4(1, 1, 1, 1), Vec4(0.5, 0.5, 0.5, 0.5)))
self.winningSequence.append(downBlinkerParallel)
self.winningSequence.append(upBlinkerParallel)
self.winningSequence.loop()
def tie(self):
self.tieSequence = Sequence(autoFinish=1)
self.clockNode.stop()
self.clockNode.hide()
self.isMyTurn = False
self.moveSequence.finish()
if self.turnText:
self.turnText.hide()
for x in xrange(41):
self.tieSequence.append(Parallel(LerpColorInterval(self.locatorList[x], 0.15, Vec4(0.5, 0.5, 0.5, 0.5), Vec4(1, 1, 1, 1)), LerpColorInterval(self.locatorList[x], 0.15, Vec4(1, 1, 1, 1), Vec4(0.5, 0.5, 0.5, 0.5))))
whisper = WhisperPopup('This Find Four game has resulted in a Tie!', OTPGlobals.getInterfaceFont(), WhisperPopup.WTNormal)
whisper.manage(base.marginManager)
self.tieSequence.start()
def hideChildren(self, nodeList):
pass
def animatePeice(self, tableState, moveCol, movePos, turn):
messenger.send('wakeup')
for x in xrange(6):
for y in xrange(7):
self.board[x][y] = tableState[x][y]
pos = self.startingPositions[moveCol].getPos()
if turn == 0:
peice = self.startingPositions[moveCol].getChild(1).getChildren()[2]
peice.show()
elif turn == 1:
peice = self.startingPositions[moveCol].getChild(1).getChildren()[3]
peice.show()
self.moveSequence = Sequence()
startPos = self.startingPositions[moveCol].getPos()
arrayLoc = movePos * 7 + moveCol
self.moveSequence.append(LerpPosInterval(self.startingPositions[moveCol], 1.5, self.locatorList[arrayLoc].getPos(self), startPos))
self.moveSequence.append(Func(peice.hide))
self.moveSequence.append(Func(self.startingPositions[moveCol].setPos, startPos))
self.moveSequence.append(Func(self.updateGameState))
self.moveSequence.start()
def announceWin(self, avId):
self.fsm.request('gameOver')
def doRandomMove(self):
if self.isMyTurn:
if self.moveCol != None:
self.d_requestMove(self.moveCol)
self.moveCol = None
self.isMyTurn = False
taskMgr.remove('playerTurnTask')
else:
hasfound = False
while hasfound == False:
x = random.randint(0, 6)
if self.board[0][x] == 0:
self.d_requestMove(x)
self.moveCol = None
self.isMyTurn = False
taskMgr.remove('playerTurnTask')
hasfound = True
return
def doNothing(self):
pass
def checkHorizontal(self, rVal, cVal, playerNum):
if cVal == 3:
for x in xrange(1, 4):
if self.board[rVal][cVal - x] != playerNum:
break
if self.board[rVal][cVal - x] == playerNum and x == 3:
return True
for x in xrange(1, 4):
if self.board[rVal][cVal + x] != playerNum:
break
if self.board[rVal][cVal + x] == playerNum and x == 3:
return True
return False
elif cVal == 2:
for x in xrange(1, 4):
if self.board[rVal][cVal + x] != playerNum:
break
if self.board[rVal][cVal + x] == playerNum and x == 3:
return True
return False
elif cVal == 4:
for x in xrange(1, 4):
if self.board[rVal][cVal - x] != playerNum:
break
if self.board[rVal][cVal - x] == playerNum and x == 3:
return True
return False
else:
return False
def checkVertical(self, rVal, cVal, playerNum):
if rVal == 2:
for x in xrange(1, 4):
if self.board[rVal + x][cVal] != playerNum:
break
if self.board[rVal + x][cVal] == playerNum and x == 3:
return True
return False
elif rVal == 3:
for x in xrange(1, 4):
if self.board[rVal - x][cVal] != playerNum:
break
if self.board[rVal - x][cVal] == playerNum and x == 3:
return True
return False
else:
return False
def checkDiagonal(self, rVal, cVal, playerNum):
if cVal <= 2:
if rVal == 2:
for x in xrange(1, 4):
if self.board[rVal + x][cVal + x] != playerNum:
break
if self.board[rVal + x][cVal + x] == playerNum and x == 3:
return True
return False
if rVal == 3:
for x in xrange(1, 4):
if self.board[rVal - x][cVal + x] != playerNum:
break
if self.board[rVal - x][cVal + x] == playerNum and x == 3:
return True
return False
elif cVal >= 4:
if rVal == 2:
for x in xrange(1, 4):
if self.board[rVal + x][cVal - x] != playerNum:
break
if self.board[rVal + x][cVal - x] == playerNum and x == 3:
return True
return False
if rVal == 3:
for x in xrange(1, 4):
if self.board[rVal - x][cVal - x] != playerNum:
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return True
return False
else:
if rVal == 3 or rVal == 4 or rVal == 5:
for x in xrange(1, 4):
if self.board[rVal - x][cVal - x] != playerNum:
break
if self.board[rVal - x][cVal - x] == playerNum and x == 3:
return True
for x in xrange(1, 4):
if self.board[rVal - x][cVal - x] != playerNum:
break
if self.board[rVal - x][cVal - x] == player |
# For some reason, probably because we were trying to serialize the default
# object, we put the "filter" field into the metadata. But the filter doesn't
# make sense for data other than location, so it doesn't seem like it should be
# in the metadata. Putting it into the metadata also means that it is not
# accessible as part of the data frame (although maybe we should put all
# metadata into the data frame).
# So this simple script moves the filter from the metadata into the data for
# location entries and removes it for all other entries
import logging
import emission.core.get_database as edb
def get_curr_key(entry):
return entry["metadata"]["key"]
def is_location_entry(entry):
curr_key = get_curr_key(entry)
return curr_key == "background/location" or curr_key == "background/filtered_location"
def move_all_filters_to_data():
tsdb = edb.get_timeseries_db()
for entry in tsdb.find():
if "filter" in entry["metadata"]:
curr_filter = entry["metadata"]["filter"]
if is_location_entry(entry):
entry["data"]["filter"] = curr_filter
logging.debug("for entry %s, found key %s, moved filter %s into data" %
(entry["_id"], get_curr_key(entry), curr_filter))
# For all cases, including the location one, we want to delete the filter from metadata
del entry["metadata"]["filter"]
tsdb.save(entry)
logging.debug("for entry %s, for key %s, deleted filter %s from metadata" %
(entry["_id"], g | et_curr_key(entry), curr_filter))
else:
pass
# logging.warning("No filter found for entr | y %s, skipping" % entry)
if "filter" not in entry["data"] and is_location_entry(entry):
# This must be an entry from before the time that we started sending
# entries to the server. At that time, we only sent time entries,
# so set it to time in this case
entry["data"]["filter"] = "time"
logging.debug("No entry found in either data or metadata, for key %s setting to 'time'" % entry["metadata"]["key"])
tsdb.save(entry)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class document_page_create_menu(osv.osv_memory):
""" Create Menu """
_name = "document.page.create.menu"
_description = "Wizard Create Menu"
_columns = {
'menu_name': fields.char('Menu Name', size=256, required=True),
'menu_parent_id': fields.many2one('ir.ui.menu', 'Parent Menu', required=True),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(document_page_create_menu,self).default_get(cr, uid, fields, context=context)
page_id = context.get('active_id')
obj_page = self.pool.get('document.page')
page = obj_page.browse(cr, uid, page_id, context=context)
res['menu_name'] = page.name
return res
def document_page_menu_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
obj_page = self.pool.get('document.page')
obj_view = self.pool.get('ir.ui.view')
obj_menu = self.pool.get('ir.ui.menu')
obj_action = self.pool.get('ir.actions.act_window')
page_id = context.get('active_id', False)
page = obj_page.browse(cr, uid, page_id, context=context)
datas = self.browse(cr, uid, ids, context=context)
data = False
if datas:
data | = datas[0]
if not data:
return {}
value = {
'name': 'Document Page',
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'document.page',
'view_id': False,
'type': 'ir.actions.act_window',
'target': 'inlineview',
}
value['domain'] = "[('parent_id','=',%d)]" % (page.id)
value | ['res_id'] = page.id
action_id = obj_action.create(cr, SUPERUSER_ID, value)
# only the super user is allowed to create menu due to security rules on ir.values
menu_id = obj_menu.create(cr, SUPERUSER_ID, {
'name': data.menu_name,
'parent_id':data.menu_parent_id.id,
'icon': 'STOCK_DIALOG_QUESTION',
'action': 'ir.actions.act_window,'+ str(action_id),
}, context)
obj_page.write(cr, uid, [page_id], {'menu_id':menu_id})
return {
'type': 'ir.actions.client',
'tag': 'reload',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
DIRECTOR = 0
ACTOR = 1
PRODUCER = 2
SCREENPLAY = 3
PHOTOGRAPHY = 4
WRITER = 5
PEOPLE_ROLE = (
(DIRECTOR, 'Director'),
(ACTOR, 'Actor'),
(PRODUCER, 'Producer'),
(SCREENPLAY, 'Screenplay'),
(PHOTOGRAPHY, 'Director of Photography'),
(WRITER, 'Writer')
)
@python_2_unicode_compatible
class People(models.Model):
name = models.CharField(max_length=200)
tmdb_id = models.IntegerField(blank=True)
profile = models.CharField(max_length=200, null=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Genre(models.Model):
name = models.CharField(max_length=200)
tmdb_id = models.IntegerField(blank=True)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Movie(models.Model):
title = models.CharField(max_length=200, blank=True)
titlehash = models.CharField(max_length=200)
filename = models.CharField(max_length=200, blank=True)
filepath = models.CharField(max_length=255, blank=True)
poster = models.CharField(max_length=200, null=True)
year = models.IntegerField(null=True)
tmdb_id = models.IntegerField(null=True)
clean = models.IntegerField(null=True, default=0)
possible = models.TextField | (null=True, default=0)
search = models.TextField(null=True, | default=0)
genres = models.ManyToManyField(Genre)
def __str__(self):
return self.title
def custom_meth(self):
return "Voila"
@python_2_unicode_compatible
class Role(models.Model):
role = models.IntegerField(default=0, choices=PEOPLE_ROLE)
people = models.ForeignKey(People, on_delete=models.CASCADE)
movie = models.ForeignKey(Movie, on_delete=models.CASCADE)
tmdb_id = models.CharField(max_length=200)
def __str__(self):
return "ROLE" + self.role
|
from django.app | s import AppConfig
class GeoPositionConfig(AppConfig):
name = | 'geoposition'
verbose_name = "GeoPosition"
|
import numpy as np
from . import errors
from .container import DiskImageContainer
from .segments import SegmentData
class DCMContainer(DiskImageContainer):
valid_densities = {
0: (720, 128),
1: (720, 256),
2: (1040, 128),
}
def get_next(self):
try:
data = self.raw[self.index]
except IndexError:
raise errors.InvalidContainer("Incomplete DCM file")
else:
self.index += 1
return data
def unpack_bytes(self, data):
self.index = 0
self.count = len(data)
self.raw = data
archive_type = self.get_next()
if archive_type == 0xf9 or archive_type == 0xfa:
archive_flags = self.get_next()
if archive_flags & 0x1f != 1:
if archive_type == 0xf9:
| raise errors.InvalidContainer("DCM multi-file archive combined in the wrong order")
else:
raise errors.InvalidContainer("Expected pass one of DCM archive first")
density_flag = (archive_flags >> 5) & 3
if density_flag not in self.valid_densities:
raise errors.InvalidContainer(f"Unsupported density flag {density_flag} in DCM")
else:
raise errors.InvalidContainer("Not a DC | M file")
# DCM decoding goes here. Currently, instead of decoding it raises the
# UnsupportedContainer exception, which signals to the caller that the
# container has been successfully identified but can't be parsed.
#
# When decoding is supported, return the decoded byte array instead of
# this exception.
raise errors.UnsupportedContainer("DCM archives are not yet supported")
|
import pytest
from cli_config.tag import tag
from utility.nix_error import NixError
def test_tag_show_no_tag(capsys):
with pytest.raises(SystemExit) as _excinfo:
tag.tag("nixconfig", ["show"])
_out, _err = capsys.readouterr()
assert "2" in str(_excinfo.value), "Exception doesn't contain expected string"
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert "the following arguments are required: tag" in _err, "StdErr doesn't contain expected string"
def test_tag_show_invalid_tag(capsys):
with pytest.raises(NixError) as _excinfo:
tag.tag("nixconfig" | , ["show", "badtag"])
_out, _err = capsys.readouterr()
a | ssert "Unknown tag: badtag" in str(_excinfo.value)
assert len(_out) == 0, "StdOut should be empty, contains: {}".format(_out)
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
def test_tag_show_good_tag(capsys):
tag.tag("nixconfig", ["show", "tag1"])
_out, _err = capsys.readouterr()
assert "script1" in _out, "'script1' should be in output"
assert "script2" in _out, "'script2' should be in output"
assert "script3" not in _out, "'script2' should be in output"
assert len(_err) is 0, "StdErr should be empty, contains: {}".format(_err)
|
"""Support for an interface resource in Skytap."""
import json
from skytap.framework.ApiClient import ApiClient # noqa
from skytap.models.PublishedServices import PublishedServices # noqa
from skytap.models.SkytapResource import SkytapResource # noqa
class Interface(SkytapResource):
"""One Skytap (network) Interface."""
def __getattr__(self, key):
"""Get attributes.
Interfaces aren't fully returned when the API call is made -
Published Services aren't returned. Often this doesn't matter,
so we don't automatically pull this information. However, if you ask
for the services, this function will go and get the requested
information on demand. This allows saving of API calls (we don't
request this unless you're accessing Published Services), but also
you can treat the object as if the services are there all along. We'll
get the info when you ask for it, and you can move along like it was
there from the start.
If y | ou're doing anything other than asking for services, then this
passes the call upstream to do the default stuff.
"""
if key == 'services':
| api = ApiClient()
services_json = json.loads(api.rest(self.url))
self.services = PublishedServices(services_json["services"],
self.url)
return self.services
return super(Interface, self).__getattr__(key)
|
# coding=utf-8
# Copyright 2017 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from pants.backend.jvm.tasks.nailgun_task import NailgunTask
from pants.base.exceptions import TaskError
from pants.base.workunit import WorkUnitLabel
from pants.contrib.codeanalysis.tasks.indexable_java_targets import IndexableJavaTargets
class IndexJava(NailgunTask):
cache_target_dirs = True
_KYTHE_JAVA_INDEXER_MAIN = 'com.google.devtools.kythe.analyzers.java.JavaIndexer'
@classmethod
def subsystem_dependencies(cls):
return super(IndexJava, cls).subsystem_dependencies() + (IndexableJavaTargets,)
@classmethod
def implementation_version(cls):
# Bump this version to invalidate all past artifacts generated by this task.
return super(IndexJava, cls).implementation_version() + [('IndexJava', 8), ]
@classmethod
def product_types(cls):
return ['kythe_entries_files']
@classmethod
def prepare(cls, options, round_manager):
super(IndexJava, cls).prepare(options, round_manager)
round_manager.require_data('kindex_files')
@classmethod
def register_options(cls, register):
super(IndexJava, cls).register_options(register)
cls.register_jvm_tool(register,
'kythe-java-indexer',
main=cls._KYTHE_JAVA_INDEXER_MAIN)
@staticmethod
def _entries_file(vt):
return os.path.join(vt.results_dir, 'index.entries')
def execute(self):
indexable_targets = IndexableJavaTargets.global_instance().get(self.context)
with self.invalidated(indexable_targets, invalidate_dependents=True) as invalidation_check:
if invalidation_check.invalid_vts:
indexer_cp = self.tool_classpath('kythe-java-indexer')
# Kythe jars embed a copy of Java 9's com.sun.tools.javac and javax.tools, for use on JDK8.
# We must put these jars on the bootclasspath, ahead of any others, to ensure that we load
# the Java 9 versions, and not the runtime's versions.
jvm_options = ['-Xbootclasspath/p:{}'.format(':'.join(indexer_cp))]
jvm_options.extend(self.get_options().jvm_options)
for vt in invalidation_check.invalid_vts | :
self._index(vt, indexer_cp, jvm_options)
for vt in invalidation_check.all_vts:
entries = self._entries_file(vt)
self.context.products.get_data('kythe_entries_files', dict)[vt.target] = entries
def _index(self, vt, indexer_cp, jvm_options):
self.context.log.info('Kythe indexing {}'.format(vt.target.address.spec))
kindex_file = self.context.products.get_data('kindex_files').get(vt.target)
if not kindex_file:
| raise TaskError('No .kindex file found for {}'.format(vt.target.address.spec))
args = [kindex_file, '--emit_jvm', 'semantic', '--out', self._entries_file(vt)]
result = self.runjava(classpath=indexer_cp, main=self._KYTHE_JAVA_INDEXER_MAIN,
jvm_options=jvm_options,
args=args, workunit_name='kythe-index',
workunit_labels=[WorkUnitLabel.COMPILER])
if result != 0:
raise TaskError('java {main} ... exited non-zero ({result})'.format(
main=self._KYTHE_INDEXER_MAIN, result=result))
|
#some useful operations in module arithmetic
def power2(base, mod):
return (base ** 2) % mod
def prod(num1, num2, mod):
return (num1 * num2) % mod
def power(base, exp, mod):
k = 0
while exp >> k != 0:
k += 1
k -= 1
result = base
for i in range(k - 1, -1, -1):
if 1 << i & exp == 0:
result = power2(result, mod)
else:
result = prod(power2(result, mod), base, mod)
return result
def euclid(a, b):
if a % b == 0:
return b
else:
return euclid(b, a % b)
def extEuclid(a, b):
R_last = a
R_now = b
R_fut = R_last % R_now
Q_last = 1
Q_now = 0
P_last = 0
P_now = 1
while R_fut > 0:
G = R_last // R_now
R_fut = R_last % R_now
P_fut = P_last - P_now * G
Q_fut = Q_last - Q_now * G
R_last = R_now
R_now = R_fut
P_last = P_now
P | _now = P_fut
Q_last = Q_now
Q_now = Q_fut
return (Q_l | ast, P_last, a * Q_last + b * P_last)
|
# -*- coding: iso-8859-1 -*-
"""
MoinMoin - cli show script
@copyright: 2006 MoinMoin:ThomasWaldmann
@license: GNU GPL, see COPYING for de | tails.
"""
from MoinMoin.script import MoinScript
from MoinMoin.wsgiapp import run
class PluginScript(MoinScript):
"""\
Purpose:
========
Just run a CLI request and show the output.
Detailed Instructions:
======================
General syntax: moin [options] cli show
[options] usua | lly should be:
--config-dir=/path/to/my/cfg/ --wiki-url=http://wiki.example.org/
"""
def __init__(self, argv, def_values):
MoinScript.__init__(self, argv, def_values)
def mainloop(self):
self.init_request()
run(self.request)
|
import unittest2
from models.event import Event
from models.match import Match
from models.team import Team
class TestKeyNameValidators(unittest2.TestCase):
def setUp(self):
self.valid_team_key = "frc177"
self.valid_team_key2 = "frc1"
self.invalid_team_key = "bcr077"
self.invalid_team_key2 = "frc 011"
self.invalid_team_key3 = "frc711\\"
self.valid_event_key = "2010ct"
self.valid_event_key2 = "2014onto2"
self.invalid_event_key = "210c1"
self.invalid_event_key2 = "frc2010ct"
self.invalid_event_key3 = "2010 ct"
self.valid_match_key = "2010ct_sf1m2"
| self.invalid_match_key = "0010c1_0m2"
self.invalid_match_key2 = "2010c1_1f1m1"
self.invalid_match_key3 = "2010c1_ef10m1"
def test_valid_team_key(self):
self.assertEqual(Team.validate_key_name(self.valid_team_key), True)
self.assertEqual(Team.validate_key_name(self.valid_team_key2), True)
def test_invalid_team_key(self):
self.assertEqual(Team.validate_key_name(self.invalid_team_key), False)
self.assertEqu | al(Team.validate_key_name(self.invalid_team_key2), False)
self.assertEqual(Team.validate_key_name(self.invalid_team_key3), False)
def test_valid_event_key(self):
self.assertEqual(Event.validate_key_name(self.valid_event_key), True)
self.assertEqual(Event.validate_key_name(self.valid_event_key2), True)
def test_invalid_event_key(self):
self.assertEqual(Event.validate_key_name(self.invalid_event_key), False)
self.assertEqual(Event.validate_key_name(self.invalid_event_key2), False)
self.assertEqual(Event.validate_key_name(self.invalid_event_key3), False)
def test_valid_match_key(self):
self.assertEqual(Match.validate_key_name(self.valid_match_key), True)
def test_invalid_match_key(self):
self.assertEqual(Match.validate_key_name(self.invalid_match_key), False)
self.assertEqual(Match.validate_key_name(self.invalid_match_key2), False)
self.assertEqual(Match.validate_key_name(self.invalid_match_key3), False)
if __name__ == '__main__':
unittest.main()
|
import Logger
import os
# The following five lines of code MUST ABSOLUTELY appear in this order. DO NOT MOVE OR CHANGE THE FOLLOWING FOUR LINES OF CODE.
# Logger.initPins() Should never be called by the user. It should only be called when this script is automatically run.
Logger.init() # Initialzie the Logger Python module.
Logger.initPins() # Sets pins to initial state. This function should only be called once, when called automatically when powered on.
Logger.setRomBusy() # Tell the AVR datalogger that the EEPROM chip is in use
Logger.setPowerGood() # Tell the AVR datalogger that the Raspberry Pi is powered | on
dataTuple = Logger.loadData() # Read the data from the EEPROM chip
Logger.setRomFree() # Tell the AVR datalogger that the EEPROM chip is no longer in use.
# Process the contents of dataTuple here. The format is as follows:
# Index | | dataTuple
# ---------------------------------------------------------
# 0 Number of Records
# 1 Year logging started
# 2 Month logging started
# 3 Day logging started
# 4 Hour logging started
# 5 Minute logging started
# 6 Second logging started
# 7 Data Byte
# 8 Data Byte
# 9 Data Byte
# 10 Data Byte
# ... ...
if (dataTuple[0] == Logger.bufferMax()): # This means that the Pi was turned on by the Datalogger, not a user, so it should turn itself off.
Logger.setPowerOff() # Tell the AVR datalogger that the Raspberry Pi is shutting down.
os.system("sudo poweroff") # Shut down the Raspberry Pi
|
from neo.io.basefromrawio import BaseFromRaw
from neo.rawio.plexonrawi | o import PlexonRawIO
class PlexonIO(PlexonRawIO, BaseFromRaw):
"""
Class for reading the old data format from Plexon
acquisition system (.plx)
Note that Plexon now use a new format PL2 which is NOT
supported by this IO.
Compatible with versions 100 to 106.
Other versions have not been tested.
"""
_prefered_signal_group_mode = 'group-by-same-units'
def __init__(self, filename):
PlexonRawIO.__init__(self, filename=filename)
BaseFrom | Raw.__init__(self, filename)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 9 2 | 1:30:31 2019
@author: Rignak
"""
import os
from os.path import join, split
lines = []
for root, folders, filenames in os.walk('..'):
for filename in filenames:
if filename == | 'readme.md':
lines += ['']
with open(join(root, filename), 'r', encoding='utf-8') as file:
spoiler = False
lines.append(f"\n# {split(root)[-1]}")
for line in file.readlines():
if line.startswith('#') and not spoiler:
lines.append(f"\n<details>\n<summary> {split(root)[-1]} </summary>\n\n")
spoiler = True
if line.startswith('!'):
path = line.split('(')[-1][:-1]
line = line.replace(path, root[3:] + '/' + path)
lines.append(line)
lines.append("\n</details>\n")
with open(join('..', 'README.md'), 'w', encoding='utf-8') as file:
for line in lines:
file.write(line)
|
c | lass Student(object):
"""For student records"""
def __init__(self, name=None):
# This special method is called a "constructor"
self.name = name
def print_name(self):
| print self.name
jenny = Student('Jenny')
jenny.print_name() # prints 'Jenny'
### Exercise Time ###
bill = Student()
bill.print_name()
|
# -*- coding: UTF-8 -*-
# Copyright 2009-2018 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
"""Defines the :class:`DisableDeleteHandler` class.
See :doc:`/dev/delete`.
"""
# import logging ; logger = logging.getLogger(__name__)
from django.conf import settings
from django.db import models
from .utils import full_model_name as fmn
class DisableDeleteHandler(object):
"""A helper object used to find out whether a known object can be
deleted or not.
Lino installs an instance of this on each model in an attribute
`_lino_ddh` during kernel startup.
.. attribute:: fklist
A list of tuples `(model, fk)`, one item for each FK field in
the application which points to this model.
.. attribute:: model
The owning model (i.e. ``m._lino_ddh.model is m`` is True for
every model)
"""
def __init__(self, model):
self.model = model
self.fklist = []
def add_fk(self, model, fk):
# called from kernel during startup. fk_model is None for
# fields defined on a parent model.
for m, fld in self.fklist:
if model is m and fld.name == fk.name:
# avoid duplicate entries caused by MTI children
return
self.fklist.append((model, fk))
def f(a):
return fmn(a[0]) + '.' + a[1].name
self.fklist.sort(key=f)
def __str__(self):
s = ','.join([m.__name__ + '.' + fk.name for m, fk in self.fklist])
return "<DisableDeleteHandler(%s, %s)>" % (self.model, s)
def disable_delete_on_object(self, obj, ignore_models=set()):
"""Return a veto message which explains why this object cannot be
deleted. Return `None` if there is no veto.
If `ignore_model` (a set of model class objects) is specified,
do not check for vetos on ForeignKey fields defined on one of
these models.
"""
#logger.info("20101104 called %s.disable_delete(%s)", obj, self)
# print "20150831 disable_delete", obj, self
for m, fk in self.fklist:
if m in ignore_models:
# print "20150831 skipping", m, fk
continue
# if m.__name__.endswith("Partner") and fk.name == 'partner':
# print 20150831, m, fk
if fk.name in m.allow_cascaded_delete:
continue
if fk.null and fk.remote_field.on_delete == models.SET_NULL:
continue
n = m.objects.filter(**{fk.name: obj}).count()
if n:
return obj.delete_veto_message(m, n)
kernel = settings.SITE.kernel
# print "2014 | 1208 generic related objects for %s:" % obj
for gfk, fk_field, qs in kernel.get_generic_related(obj):
if gfk.name in qs.model.allow_cascaded_delete:
continue
if fk_field.null: | # a nullable GFK is no reason to veto
continue
n = qs.count()
# print "20141208 - %s %s %s" % (
# gfk.model, gfk.name, qs.query)
if n:
return obj.delete_veto_message(qs.model, n)
return None
|
import importlib
import json
from .base import MongoEnginericsAdapter
class ApistarWSGIAdapter(MongoEnginericsAdapter):
def __init__(self, *args, **kwargs):
self.engine = importlib.import_module('apistar')
self._wsgi = importlib.import_module('apistar.frameworks.wsgi')
super(ApistarWSGIAdapter | , self).__init__(*args, **kwargs)
def attach(self, ctrl):
def find(query: self.engine.http.QueryParams):
return ctrl.find(query)
def update(item_id, updates: s | elf.engine.http.Body):
return ctrl.update(item_id, json.loads(updates))
def create(body: self.engine.http.Body):
return ctrl.create(json.loads(body))
def find_one(item_id):
return ctrl.find_one(item_id)
def delete(item_id):
return ctrl.delete(item_id)
return self.engine.Include('/{}'.format(ctrl.name), [
self.engine.Route('/', 'GET', find),
self.engine.Route('/', 'POST', create),
self.engine.Route('/{item_id}', 'GET', find_one),
self.engine.Route('/{item_id}', 'PUT', update),
self.engine.Route('/{item_id}', 'DELETE', delete),
])
def get_app(self):
routes = [self.attach(ctrl()) for ctrl in self._controllers]
return self._wsgi.WSGIApp(routes=routes)
|
import re
import requests
import bs4
from ralybot import hook
from ralybot.util import web
# different forks of cloudflare-scrape have different package layouts
try:
from cfscrape import cfscrape
except ImportError:
import cfscrape
except ImportError:
cfscrape = None
class SteamError(Exception):
pass
def percentage(part, whole):
return 100 * float(part) / float(whole)
CALC_URL = "https://steamdb.info/calculator/"
PLAYED_RE = re.compile(r"(.*)\((.*)%\)")
def get_data(user, currency="us"):
"""
Takes a user's Steam Community ID and returns a dict containing info about the games the user owns.
:type user: str
:type currency: str
:return: dict
"""
data = {}
# form the request
params = {'player': user, 'currency': currency}
# get the page
try:
if cfscrape:
scraper = cfscrape.create_scraper()
request = scraper.get(CALC_URL, params=params)
else:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, '
'like Gecko) Chrome/41.0.2228.0 Safari/537.36',
'Referer': 'https://steamdb.info/'
}
request = requests.get(CALC_URL, params=params, headers=headers)
request.raise_for_status()
except (requests.exceptions.HTTPError, requests.exceptions.ConnectionError) as e:
raise SteamError("Could not get user info: {}".format(e))
# parse that page!
soup = bs | 4.BeautifulSoup(request.content)
# get all the data we need
try:
data["name"] = soup.find("h1", {"class": "header-title"}).find("a").text
data["url"] = request.url
data["status"] = soup.find('td', text='Status').find_next('td').text
data["value"] = soup.find("h1", {"class": "calculator-price"}).text
data["value_sales"] = soup.find("h1", {"class": "calculator-pric | e-lowest"}).text
data["count"] = int(soup.find("div",
{"class": "pull-right price-container"}).find("p").find("span", {"class":
"number"}).text)
played = soup.find('td', text='Games not played').find_next('td').text
played = PLAYED_RE.search(played).groups()
data["count_unplayed"] = int(played[0])
data["count_played"] = data["count"] - data["count_unplayed"]
data["percent_unplayed"] = round(percentage(data["count_unplayed"], data["count"]), 1)
data["percent_played"] = round(percentage(data["count_played"], data["count"]), 1)
except AttributeError:
raise SteamError("Could not read info, does this user exist?")
return data
@hook.command
def steamcalc(text):
"""steamcalc <username> - Gets value of steam account. Uses steamcommunity.com/id/<nickname>."""
user = text.strip().lower()
try:
data = get_data(user)
except SteamError as e:
return "{}".format(e)
data["short_url"] = web.try_shorten(data["url"])
return "\x02{name}\x02 has \x02{count}\x02 games with a total value of \x02{value}\x02" \
" (\x02{value_sales}\x02 during sales). \x02{count_unplayed}\x02 games" \
" (\x02{percent_unplayed}%\x02) have never been played - {short_url}".format(**data)
|
from selenium import webdriver
fr | om fixture.session import SessionHelper
from fixture.group import GroupHelper
from fixture.contact import ContactHelper
class Application:
def __init__(self, browser, base_url):
if browser == "firefox":
self.wd = webdriver.Firefox()
elif browser == "chrome":
| self.wd = webdriver.Chrome()
elif browser == "ie":
self.wd = webdriver.Ie()
else:
raise ValueError("Unrecognezed browser %s" % browser)
self.session = SessionHelper(self)
self.group = GroupHelper(self)
self.contact = ContactHelper(self)
self.base_url = base_url
def is_valid(self):
try:
self.wd.current_url
return True
except:
return False
def open_home_page(self):
wd = self.wd
if not (wd.current_url.endswith("/addressbook/")):
wd.get(self.base_url)
def destroy(self):
self.wd.quit()
|
#!/usr/bin/env python
# encoding: utf-8
"""
nim-game.py
Created by Shuailong on 2015-12-21.
https://leetcode.com/problems/nim-game/.
"""
class Solution1(object):
def canWinNim(self, n):
"""
:type n: int
:rtype: bool
"""
'''Too time consuming'''
win1 = True
win2 = True
win3 = True
win = True
i = 4
while i < n+1:
win = not win1 or not win2 or not win3
win1 = win2
win2 = win3
| win3 = win
i += 1
return win
class Solution(object):
def canWinNim(self, n):
"""
:type n: int
| :rtype: bool
"""
'''Find the law and rewrite'''
return n & 3 != 0
# return n % 4 != 0
def main():
solution = Solution()
n = 4
print solution.canWinNim(n)
if __name__ == '__main__':
main()
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Cloud TPU profiler package."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from setuptools import setup
_VERSION = '1.7.0'
CONSOLE_SCRIPTS = [
'capture_tpu_profile=cloud_tpu_profiler.main:run_main',
]
setup(
name='cloud_tpu_profiler',
version=_VERSION.replace('-', ''),
description='Trace and profile | Cloud TPU performance',
long_description='Tools for capture TPU profile',
url='https://www.tensorflow.org/tfrc/',
author='Google Inc.',
author_email='opensource@google.com',
packages=['cloud_tpu_profiler'],
package_data={
'cloud_tpu_profiler': ['data/*'],
},
entry_points={
'console_scripts': CONSOLE | _SCRIPTS,
},
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
license='Apache 2.0',
keywords='tensorflow performance tpu',
)
|
"""Softmax | ."""
scores = [3.0, 1.0, 0.2]
import numpy as np
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / sum(np.exp(x))
print(softmax(scores))
# Plot softmax curves
import matplotlib. | pyplot as plt
x = np.arange(-2.0, 6.0, 0.1)
scores = np.vstack([x, np.ones_like(x), 0.2 * np.ones_like(x)])
plt.plot(x, softmax(scores).T, linewidth=2)
plt.show()
|
#!/usr/bin/env python
import os
import sys
if __nam | e__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "bitcamp.settings")
from django.core.management import execute_from_co | mmand_line
execute_from_command_line(sys.argv)
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
from openerp.exceptions import except_orm
from openerp.tools.translate import _
class StockReservation(models.Model):
""" Allow to reserve products.
The fields mandatory for the creation of a reservation are:
* product_id
* product_uom_qty
* product_uom
* name
The following fields are required but have default values that you may
want to override:
* company_id
* location_id
* dest_location_id
Optionally, you may be interested to define:
* date_validity (once passed, the reservation will be released)
* note
"""
_name = 'stock.reservation'
_description = 'Stock Reservation'
_inherits = {'stock.move': 'move_id'}
move_id = fields.Many2one(
'stock.move',
'Reservation Move',
required=True,
readonly=True,
ondelete='cascade',
select=1)
date_validity = fields.Date('Validity Date')
@api.model
def default_get(self, fields_list):
"""
Ensure default value of computed field `product_qty` is not set
as it would raise an error
"""
res = super(StockReservation, self).default_get(fields_list)
if 'product_qty' in res:
del res['product_qty']
return res
@api.model
def get_location_from_ref(self, ref):
""" Get a location from a xmlid if allowed
:param ref: tuple (module, xmlid)
"""
data_obj = self.env['ir.model.data']
try:
location = data_obj.xmlid_to_object(ref, raise_if_not_found=True)
location.check_access_rule('read')
location_id = location.id
except (except_orm, ValueError):
location_id = False
return location_id
@api.model
def _default_picking_type_id(self):
""" Search for an internal picking type
"""
type_obj = self.env['stock.picking.type']
types = type_obj.search([('code', '=', 'internal')], limit= | 1)
if types:
return types[0].id
return False
@api.model
def _default_location_id(self):
move_obj = self.env['stock.move']
picking_type_id = self._default_picking_type_id()
return (move_obj
.with_context(default_picking_type_id=picking_type_id)
._default_location_source())
@api.model
def _default_location_dest_id(self):
ref = 'stock_reserve.stock_location_reservati | on'
return self.get_location_from_ref(ref)
_defaults = {
'picking_type_id': _default_picking_type_id,
'location_id': _default_location_id,
'location_dest_id': _default_location_dest_id,
'product_uom_qty': 1.0,
}
@api.multi
def reserve(self):
""" Confirm a reservation
The reservation is done using the default UOM of the product.
A date until which the product is reserved can be specified.
"""
self.date_expected = fields.Datetime.now()
self.move_id.action_confirm()
self.move_id.picking_id.action_assign()
return True
@api.multi
def release(self):
"""
Release moves from reservation
"""
self.mapped('move_id').action_cancel()
return True
@api.model
def release_validity_exceeded(self, ids=None):
""" Release all the reservation having an exceeded validity date """
domain = [('date_validity', '<', fields.date.today()),
('state', '=', 'assigned')]
if ids:
domain.append(('id', 'in', ids))
reserv_ids = self.search(domain)
reserv_ids.release()
return True
@api.multi
def unlink(self):
""" Release the reservation before the unlink """
self.release()
return super(StockReservation, self).unlink()
@api.onchange('product_id')
def _onchange_product_id(self):
""" set product_uom and name from product onchange """
# save value before reading of self.move_id as this last one erase
# product_id value
product = self.product_id
# WARNING this gettattr erase self.product_id
move = self.move_id
result = move.onchange_product_id(
prod_id=product.id, loc_id=False, loc_dest_id=False,
partner_id=False)
if result.get('value'):
vals = result['value']
# only keep the existing fields on the view
self.name = vals.get('name')
self.product_uom = vals.get('product_uom')
# repeat assignation of product_id so we don't loose it
self.product_id = product.id
@api.onchange('product_uom_qty')
def _onchange_quantity(self):
""" On change of product quantity avoid negative quantities """
if not self.product_id or self.product_uom_qty <= 0.0:
self.product_uom_qty = 0.0
@api.multi
def open_move(self):
assert len(self.ids) == 1, "1 ID expected, got %r" % self.ids
reserv = self.move_id
IrModelData = self.env['ir.model.data']
ref_form2 = 'stock.action_move_form2'
action = IrModelData.xmlid_to_object(ref_form2)
action_dict = action.read()[0]
action_dict['name'] = _('Reservation Move')
# open directly in the form view
ref_form = 'stock.view_move_form'
view_id = IrModelData.xmlid_to_res_id(ref_form)
action_dict.update(
views=[(view_id, 'form')],
res_id=reserv.id,
)
return action_dict
|
warnings
from collections import deque
from .utils.generic_utils import Progbar
class CallbackList(object):
def __init__(self, callbacks=[], queue_length=10):
self.callbacks = [c for c in callbacks]
self.queue_length = queue_length
def append(self, callback):
self.callbacks.append(callback)
def _set_params(self, params):
for callback in self.callbacks:
callback._set_params(params)
def _set_model(self, model):
for callback in self.callbacks:
callback._set_model(model)
def on_epoch_begin(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_begin(epoch, logs)
self._delta_t_batch = 0.
self._delta_ts_batch_begin = deque([], maxlen=self.queue_length)
self._delta_ts_batch_end = deque([], maxlen=self.queue_length)
def on_epoch_end(self, epoch, logs={}):
for callback in self.callbacks:
callback.on_epoch_end(epoch, logs)
def on_batch_begin(self, batch, logs={}):
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_begin(batch, logs)
self._delta_ts_batch_begin.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_begin)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1:
warnings.warn('Method on_batch_begin() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
self._t_enter_batch = time.time()
def on_batch_end(self, batch, logs={}):
self._delta_t_batch = time.time() - self._t_enter_batch
t_before_callbacks = time.time()
for callback in self.callbacks:
callback.on_batch_end(batch, logs)
self._delta_ts_batch_end.append(time.time() - t_before_callbacks)
delta_t_median = np.median(self._delta_ts_batch_end)
if self._delta_t_batch > 0. and delta_t_median > 0.95 * self._delta_t_batch and delta_t_median > 0.1:
warnings.warn('Method on_batch_end() is slow compared '
'to the batch update (%f). Check your callbacks.' % delta_t_median)
def on_train_begin(self, logs={}):
for callback in self.callbacks:
callback.on_train_begin(logs)
def on_train_end(self, logs={}):
for callback in self.callbacks:
callback.on_train_end(logs)
class Callback(object):
def __init__(self):
pass
def _set_params(self, params):
self.params = params
def _set_model(self, model):
self.model = model
def on_epoch_begin(self, epoch, logs={}):
pass
def on_epoch_end(self, epoch, logs={}):
pass
def on_batch_begin(self, batch, logs={}):
pass
def on_batch_end(self, batch, logs={}):
pass
def on_train_begin(self, logs={}):
pass
def on_train_end(self, logs={}):
pass
class BaseLogger(Callback):
def on_train_begin(self, logs={}):
self.verbose = self.params['verbose']
def on_epoch_begin(self, epoch, logs={}):
if self.verbose:
print('Epoch %d' % epoch)
self.progbar = Progbar(target=self.params['nb_sample'],
verbose=self.verbose)
self.seen = 0
self.totals = {}
def on_batch_begin(self, batch, logs={}):
if self.seen < self.params['nb_sample']:
self.log_values = []
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
for k in self.params['metrics']:
if k in logs:
self.log_values.append((k, logs[ | k]))
# skip progbar update for the last batch; will be handled by on_epoch_end
if self.verbose and self.seen < self.params['nb_sample']:
self.progbar.update(self.seen, self.log_values)
def on_epoch_end(self, epoch, logs={}):
for k in self.params['metrics']:
if k in self.totals:
self.log_values.append((k, self.totals[k] / self.seen))
if k in logs:
| self.log_values.append((k, logs[k]))
if self.verbose:
self.progbar.update(self.seen, self.log_values)
class History(Callback):
def on_train_begin(self, logs={}):
self.epoch = []
self.history = {}
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, logs={}):
self.epoch.append(epoch)
for k, v in self.totals.items():
if k not in self.history:
self.history[k] = []
self.history[k].append(v / self.seen)
for k, v in logs.items():
if k not in self.history:
self.history[k] = []
self.history[k].append(v)
class ModelCheckpoint(Callback):
def __init__(self, filepath, monitor='val_loss', verbose=0, save_best_only=False):
super(Callback, self).__init__()
self.monitor = monitor
self.verbose = verbose
self.filepath = filepath
self.save_best_only = save_best_only
self.best = np.Inf
def on_epoch_end(self, epoch, logs={}):
if self.save_best_only:
current = logs.get(self.monitor)
if current is None:
warnings.warn("Can save best model only with %s available, skipping." % (self.monitor), RuntimeWarning)
else:
if current < self.best:
if self.verbose > 0:
print("Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %s"
% (epoch, self.monitor, self.best, current, self.filepath))
self.best = current
self.model.save_weights(self.filepath, overwrite=True)
else:
if self.verbose > 0:
print("Epoch %05d: %s did not improve" % (epoch, self.monitor))
else:
if self.verbose > 0:
print("Epoch %05d: saving model to %s" % (epoch, self.filepath))
self.model.save_weights(self.filepath, overwrite=True)
class EarlyStopping(Callback):
def __init__(self, monitor='val_loss', patience=0, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.patience = patience
self.verbose = verbose
self.best = np.Inf
self.wait = 0
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % (self.monitor), RuntimeWarning)
if current < self.best:
self.best = current
self.wait = 0
else:
if self.wait >= self.patience:
if self.verbose > 0:
print("Epoch %05d: early stopping" % (epoch))
self.model.stop_training = True
self.wait += 1
class RemoteMonitor(Callback):
def __init__(self, root='http://localhost:9000'):
self.root = root
def on_epoch_begin(self, epoch, logs={}):
self.seen = 0
self.totals = {}
def on_batch_end(self, batch, logs={}):
batch_size = logs.get('size', 0)
self.seen += batch_size
for k, v in logs.items():
if k in self.totals:
self.totals[k] += v * batch_size
else:
self.totals[k] = v * batch_size
def on_epoch_end(self, epoch, |
s not round properly'
if long(1.9) == 1L == long(1.1) and long(-1.1) == -1L == long(-1.9): pass
else: raise TestFailed, 'long() does not round properly'
if float(1) == 1.0 and float(-1) == -1.0 and float(0) == 0.0: pass
else: raise TestFailed, 'float() does not work properly'
print '6.4.1 32-bit integers'
if 12 + 24 != 36: raise TestFailed, 'int op'
if 12 + (-24) != -12: raise TestFailed, 'int op'
if (-12) + 24 != 12: raise TestFailed, 'int op'
if (-12) + (-24) != -36: raise TestFailed, 'int op'
if not 12 < 24: raise TestFailed, 'int op'
if not -24 < -12: raise TestFailed, 'int op'
# Test for a particular bug in integer multiply
xsize, ysize, zsize = 238, 356, 4
if not (xsize*ysize*zsize == zsize*xsize*ysize == 338912):
raise TestFailed, 'int mul commutativity'
# And another.
m = -sys.maxint - 1
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor
prod = divisor * j
if prod != m:
raise TestFailed, "%r * %r == %r != %r" % (divisor, j, prod, m)
if type(prod) is not int:
raise TestFailed, ("expected type(prod) to be int, not %r" %
type(prod))
# Check for expected * overflow to long.
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor - 1
prod = divisor * j
if type(prod) is not long:
raise TestFailed, ("expected type(%r) to be long, not %r" %
(prod, type(prod)))
# Check for expected * overflow to long.
m = sys.maxint
for divisor in 1, 2, 4, 8, 16, 32:
j = m // divisor + 1
prod = divisor * j
if type(prod) is not long:
raise TestFailed, ("expected type(%r) to be long, not %r" %
(prod, type(prod)))
print '6.4.2 Long integers'
if 12L + 24L != 36L: raise TestFailed, 'long op'
if 12L + (-24L) != -12L: raise TestFailed, 'long op'
if (-12L) + 24L != 12L: raise TestFailed, 'long op'
if (-12L) + (-24L) != -36L: raise TestFailed, 'long op'
if not 12L < 24L: raise TestFailed, 'long op'
if not -24L < -12L: raise TestFailed, 'long op'
x = sys.maxint
if int(long(x)) != x: raise TestFailed, 'long op'
try: int(long(x)+1L)
except OverflowError: pass
else:raise TestFailed, 'long op'
x = -x
if int(long(x)) != x: raise TestFailed, 'long op'
x = x-1
if int(long(x)) != x: raise TestFailed, 'long op'
try: int(long(x)-1L)
except OverflowError: pass
else:raise TestFailed, 'long op'
print '6.4.3 Floating point numbers'
if 12.0 + 24.0 != 36.0: raise TestFailed, 'float op'
if 12.0 + (-24.0) != -12.0: raise TestFailed, 'float op'
if (-12.0) + 24.0 != 12.0: raise TestFailed, 'float op'
if (-12.0) + (-24.0) != -36.0: raise TestFailed, 'float op'
if not 12.0 < 24.0: raise TestFailed, 'float op'
if not -24.0 < -12.0: raise TestFailed, 'float op'
print '6.5 Sequence types'
print '6.5.1 Strings'
if len('') != 0: raise TestFailed, 'len(\'\')'
if len('a') != 1: raise TestFailed, 'len(\'a\')'
if len('abcdef') != 6: raise TestFailed, 'len(\'abcdef\')'
if 'xyz' + 'abcde' != 'xyzabcde': raise TestFailed, 'string concatenation'
if 'xyz'*3 != 'xyzxyzxyz': raise TestFailed, 'string repetition *3'
if 0*'abcde' != '': raise TestFailed, 'string repetition 0*'
if min('abc') != 'a' or max('abc') != 'c': raise TestFailed, 'min/max string'
if 'a' in 'abc' and 'b' in 'abc' and 'c' in 'abc' and 'd' not in 'abc': pass
else: raise TestFailed, 'in/not in string'
x = 'x'*103
if '%s!'%x != x+'!': raise TestFailed, 'nasty string formatting bug'
print '6.5.2 Tuples'
if len(()) != 0: raise TestFailed, 'len(())'
if len((1,)) != 1: raise TestFailed, 'len((1,))'
if len((1,2,3,4,5,6)) != 6: raise TestFailed, 'len((1,2,3,4,5,6))'
if (1,2)+(3,4) != (1,2,3,4): raise TestFailed, 'tuple concatenation'
if (1,2)*3 != (1,2,1,2,1,2): raise TestFailed, 'tuple repetition *3'
if 0*(1,2,3) != (): raise TestFailed, 'tuple repetition 0*'
if min((1,2)) != 1 or max((1,2)) != 2: raise TestFailed, 'min/max tuple'
if 0 in (0,1,2) and 1 in (0,1,2) and 2 in (0,1,2) and 3 not in (0,1,2): pass
else: raise TestFailed, 'in/not in tuple'
print '6.5.3 Lists'
if len([]) != 0: raise TestFailed, 'len([])'
if len([1,]) != 1: raise TestFailed, 'len([1,])'
if len([1,2,3,4,5,6]) != 6: raise TestFailed, 'len([1,2,3,4,5,6])'
if [1,2]+[3,4] != [1,2,3,4]: raise TestFailed, 'list concatenation'
if [1,2]*3 != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3'
if [1,2]*3L != [1,2,1,2,1,2]: raise TestFailed, 'list repetition *3L'
if 0*[1,2,3] != []: raise TestFailed, 'list repetition 0*'
if 0L*[1,2,3] != []: raise TestFailed, 'list repetition 0L*'
if min([1,2]) != 1 or max([1,2]) != 2: raise TestFailed, 'min/max list'
if 0 in [0,1,2] and 1 in [0,1,2] and 2 in [0,1,2] and 3 not in [0,1,2]: pass
else: raise TestFailed, 'in/not in list'
a = [1, 2, 3, 4, 5]
a[:-1] = a
if a != [1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (head)"
a = [1, 2, 3, 4, 5]
a[1:] = a
if a != [1, 1, 2, 3, 4, 5]:
raise TestFailed, "list self-slice-assign (tail)"
a = [1, 2, 3, 4, 5]
a[1:-1] = a
if a != [1, 1, 2, 3, 4, 5, 5]:
raise TestFailed, "list self-slice-assign (center)"
print '6.5.3a Additional list operations'
a = [0,1,2,3,4]
a[0L] = 1
a[1L] = 2
a[2L] = 3
if a != [1,2,3,3,4]: raise TestFailed, 'list item assignment [0L], [1L], [2L]'
a[0] = 5
a[1] = 6
a[2] = 7
if a != [5,6,7,3,4]: raise TestFailed, 'list item assignment [0], [1], [2]'
a[-2L] = 88
a[-1L] = 99
if a != [5,6,7,88,99]: raise TestFailed, 'list item assignment [-2L], [-1L]'
a[-2] = 8
a[-1] = 9
if a != [5,6,7,8,9]: raise TestFailed, 'list item assignment [-2], [-1]'
a[:2] = [0,4]
a[-3:] = []
a[1:1] = [1,2,3]
if a != [0,1,2,3,4]: raise TestFailed, 'list slice assignment'
a[ 1L : 4L] = [7,8,9]
if a != [0,7,8,9,4]: raise TestFailed, 'list slice assignment using long ints'
del a[1:4]
if a != [0,4]: raise TestFailed, 'list slice deletion'
del a[0]
if a != [4]: raise TestFailed, 'list item deletion [0]'
del a[-1]
if a != []: raise TestFailed, 'list item deletion [-1]'
a=range(0,5)
del a[1L:4L]
if a != [0,4]: raise TestFailed, 'list slice deletion'
del a[0L]
if a != [4]: raise TestFailed, 'list item deletion [0]'
del a[-1L]
if a != []: raise TestFailed, 'list item deletion [-1]'
a.append(0)
a.append(1)
a.append(2)
if a != [0,1,2]: raise TestFailed, 'list append'
a.insert(0, -2)
a.insert(1, -1)
a.insert(2,0)
if a != [-2,-1,0,0,1,2]: raise TestFailed, 'list insert'
if a.count(0) != 2: raise TestFailed, ' list count'
if a.index(0) != 2: raise TestFailed, 'list index'
a.remove(0)
if a != [-2,-1,0,1,2]: raise TestFailed, 'list remove'
a.reverse()
if a != [2,1,0,-1,-2]: raise TestFailed, 'list reverse'
a.sort()
if a != [-2,-1,0,1,2]: raise TestFailed, 'list sort'
def revcmp(a, b): return cmp(b, a)
a.sort(revcmp)
if a != [2,1,0,-1,-2]: raise TestFailed, 'list sort with cmp func'
# The following dumps core in unpatched Python 1.5:
def myComparison(x,y):
return cmp(x%3, y%7)
z = range(12)
z.sort(myComparison)
# Test extreme cases with long ints
a = [0,1,2,3,4]
if a[ -pow(2,128L): 3 ] != [0,1,2]:
raise TestFailed, "list slicing with too-small long integer"
| if a[ 3: pow(2,145L) ] != [3,4]:
raise TestFailed, "list slicing with too-large long integer"
print '6.6 Mappings == Dictionaries'
d = {}
if d.keys() != []: raise TestFailed, '{}.keys()'
if d.has_key('a') != 0: raise TestFailed, '{}.has_key(\'a\')'
if ('a' in d) != 0: raise Test | Failed, "'a' in {}"
if ('a' not in d) != 1: raise TestFailed, "'a' not in {}"
if len(d) != 0: raise TestFailed, 'len({})'
d = {'a': 1, 'b': 2}
if len(d) != 2: raise TestFailed, 'len(dict)'
k = d.keys()
k.sort()
if k != ['a', 'b']: raise TestFailed, 'dict keys()'
if d.has_key('a') and d.has_key('b') and not d.has_key('c'): pass
else: raise TestFailed, 'dict keys()'
if 'a' in d and 'b' in d and 'c' not in d: pass
else: raise TestFailed, 'dict keys() # in/not in version'
if d['a'] != 1 or d['b'] != 2: raise TestFailed, 'dict item'
d['c'] = 3
d['a'] = 4
if d['c'] != 3 or d['a'] != 4: raise TestFailed, 'dict item assignment'
del d['b']
if d != {'a': 4, 'c': 3}: raise TestFailed, 'dict item deletion'
# dict.clear()
d = {1:1, 2:2, 3:3}
d.clear()
if d != {}: raise TestFailed, 'dict clear'
# dict.update()
d.update({1:100})
d.update({2:20})
d.update({1:1, 2:2, 3:3})
if d != {1:1, 2:2, 3:3}: raise Tes |
# -*- coding: utf-8 -*-
# Copyright 2016-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""Runs some tests about the notification framework.
You can run only these tests by issuing::
$ go team
$ python manage.py test tests.test_notify
Or::
$ go noi
$ python setup.py test -s tests.ProjectsTests.test_team
"""
from __future__ import unicode_literals
import datetime
from mock import patch
from django.conf import settings
from django.utils.timezone import make_aware
from lino.api import dd, rt
from lino.utils.djangotest import TestCase
from lino.core import constants
from lino.modlib.users.choicelists import UserTypes
from lino.utils.instantiator import create
from lino.modlib.notify.models import send_pending_emails_often
from lino.modlib.notify.choicelists import MailModes
from lino.core.diff import ChangeWatcher
import contextlib
@contextlib.contextmanager
def capture_stdout():
import sys
from cStringIO import StringIO
oldout = sys.stdout
try:
out = StringIO()
sys.stdout = out
yield out
finally:
sys.stdout = oldout
out = out.getvalue()
class TestCase(TestCase):
"""Miscellaneous tests."""
maxDiff = None
def test_01(self):
self.assertEqual(settings.SETTINGS_MODULE, None)
self.assertEqual(settings.LOGGING, {})
self.assertEqual(settings.SERVER_EMAIL, 'root@localhost')
@patch('lino.api.dd.logger')
def test_comment(self, logger):
"""Test what happens when a comment is posted on a ticket with
watchers.
"""
ContentType = rt.models.contenttypes.ContentType
Comment = rt.models.comments.Comment
Ticket = rt.models.tickets.Ticket
Project = rt.models.tickets.Project
Vote = rt.models.votes.Vote
Message = rt.models.notify.Message
User = settings.SITE.user_model
create(Project, name="Project")
robin = create(
User, username='robin',
first_name="Robin",
user_type=UserTypes.admin)
aline = create(
User, username='aline',
first_name="Aline",
email="aline@example.com", language='fr')
obj = create(
Ticket, summary="Save the world, après moi le déluge",
user=robin)
create(Vote, votable=obj, user=aline)
self.assertEqual(Message.objects.count(), 0)
url = "/api/comments/CommentsByRFC"
post_data = dict()
post_data[constants.URL_PARAM_ACTION_NAME] = 'submit_insert'
post_data.update(short_text="I don't agree.")
post_data[constants.URL_PARAM_MASTER_PK] = obj.pk
ct = ContentType.objects.get_for_model(Ticket)
post_data[constants.URL_PARAM_MASTER_TYPE] = ct.id
# post_data[constants.URL_PARAM_REQUESTING_PANEL] = '123'
response = self.client.post(
url, post_data,
REMOTE_USER='robin',
HTTP_ACCEPT_LANGUAGE='en')
result = self.check_json_result(
response, 'rows success message close_window')
self.assertEqual(result['success'], True)
self.assertEqual(
result['message'],
"""Comment "Comment #1" has been created.""")
self.assertEqual(Message.objects.count(), 1)
msg = Message.objects.all()[0]
# self.assertEqual(msg.message_type)
self.assertEqual(msg.seen, None)
self.assertEqual(msg.user, aline)
expected = """Robin a commenté [ticket 1] (Save the world, """\
"""après moi le déluge): I don't agree."""
self.assertEqual(expected, msg.body)
# manually set created timestamp so we can test on it later.
now = datetime.datetime(2016, 12, 22, 19, 45, 55)
if settings.USE_TZ:
now = make_aware(now)
msg.created = now
msg.save()
settings.SERVER_EMAIL = 'root@example.com'
with capture_stdout() as out:
send_pending_emails_often()
out = out.getvalue().strip()
print(out)
expected = """send email
Sender: root@example.com
To: aline@example.com
Subject: [Django] Robin a comment? #1 (? Save the world, apr?s moi le d?luge)
<body>
(22/12/2016 19:45)
Robin a comment? <a href="http://127.0.0.1:8000/api/tickets/Ticket/1" title="Save the world, après moi le déluge">#1</a> (Save the world, apr?s moi le d?luge): I don't agree.
</body>
"""
self.assertEquivalent(expected, out)
self.assertEqual(logger.debug.call_count, 1)
logger.debug.assert_called_with(
'Send out %s summari | es for %d users.',
MailModes.often, 1)
# logger.info.assert_called_with(
# 'Notify %s users about %s', 1, 'Change by robin')
Message.objects.all().delete()
self.assertEqual(Message.objects.count(), 0)
|
ar = rt.login('robin')
cw = ChangeWatcher(obj)
obj.priority = 200
obj.save_watched_instance(ar, cw)
with capture_stdout() as out:
send_pending_emails_often()
out = out.getvalue().strip()
# print(out)
expected = ""
# self.assertEquivalent(expected, out)
# we do not test the output because the datetime changes. But
# we actually just wanted to see if there is no
# UnicodeException. We capture it in order to hide it from
# test runner output.
self.assertEqual(logger.debug.call_count, 2)
logger.debug.assert_called_with(
'Send out %s summaries for %d users.',
MailModes.often, 1)
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apac | he License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES | OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from .client import AccessApprovalClient
from .async_client import AccessApprovalAsyncClient
__all__ = (
"AccessApprovalClient",
"AccessApprovalAsyncClient",
)
|
# Copyright 2019 Canonical, Ltd.
#
# This program is free software: you can redi | stribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or F | ITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import asyncio
import concurrent.futures
import logging
log = logging.getLogger("subiquitycore.async_helpers")
def _done(fut):
try:
fut.result()
except asyncio.CancelledError:
pass
def schedule_task(coro, propagate_errors=True):
loop = asyncio.get_event_loop()
if asyncio.iscoroutine(coro):
task = asyncio.Task(coro)
else:
task = coro
if propagate_errors:
task.add_done_callback(_done)
loop.call_soon(asyncio.ensure_future, task)
return task
async def run_in_thread(func, *args):
loop = asyncio.get_event_loop()
try:
return await loop.run_in_executor(None, func, *args)
except concurrent.futures.CancelledError:
raise asyncio.CancelledError
class SingleInstanceTask:
def __init__(self, func, propagate_errors=True):
self.func = func
self.propagate_errors = propagate_errors
self.task = None
async def _start(self, old):
if old is not None:
old.cancel()
try:
await old
except BaseException:
pass
schedule_task(self.task, self.propagate_errors)
async def start(self, *args, **kw):
await self.start_sync(*args, **kw)
return self.task
def start_sync(self, *args, **kw):
old = self.task
coro = self.func(*args, **kw)
if asyncio.iscoroutine(coro):
self.task = asyncio.Task(coro)
else:
self.task = coro
return schedule_task(self._start(old))
async def wait(self):
while True:
try:
return await self.task
except asyncio.CancelledError:
pass
|
arning = slim_formatwarning
def _normalize_ld(launch_description_fn):
# A launch description fn can return just a launch description, or a tuple of
# (launch_description, test_context). This wrapper function normalizes things
# so we always get a tuple, sometimes with an empty dictionary for the test_context
def normalize(result):
if isinstance(result, tuple):
return result
else:
return result, {}
def wrapper(**kwargs):
fn_args = inspect.getfullargspec(launc | h_description_fn)
if 'ready_fn' in f | n_args.args + fn_args.kwonlyargs:
# This is an old-style launch_description function which expects ready_fn to be passed
# in to the function
# This type of launch description will be deprecated in the future. Warn about it
# here
warnings.warn(
'Passing ready_fn as an argument to generate_test_description will '
'be removed in a future release. Include a launch_testing.actions.ReadyToTest '
'action in the LaunchDescription instead.'
)
return normalize(launch_description_fn(**kwargs))
else:
# This is a new-style launch_description which should contain a ReadyToTest action
ready_fn = kwargs.pop('ready_fn')
result = normalize(launch_description_fn(**kwargs))
# Fish the ReadyToTest action out of the launch description and plumb our
# ready_fn to it
def iterate_ready_to_test_actions(entities):
"""Recursively search LaunchDescription entities for all ReadyToTest actions."""
for entity in entities:
if isinstance(entity, ReadyToTest):
yield entity
yield from iterate_ready_to_test_actions(
entity.describe_sub_entities()
)
for conditional_sub_entity in entity.describe_conditional_sub_entities():
yield from iterate_ready_to_test_actions(
conditional_sub_entity[1]
)
try:
ready_action = next(e for e in iterate_ready_to_test_actions(result[0].entities))
except StopIteration: # No ReadyToTest action found
raise Exception(
'generate_test_description functions without a ready_fn argument must return '
'a LaunchDescription containing a ReadyToTest action'
)
ready_action._add_callback(ready_fn)
return result
return wrapper
class TestRun:
def __init__(self,
name,
test_description_function,
param_args,
pre_shutdown_tests,
post_shutdown_tests):
self.name = name
if not hasattr(test_description_function, '__markers__'):
test_description_function.__markers__ = {}
self._test_description_function = test_description_function
self.normalized_test_description = _normalize_ld(test_description_function)
self.param_args = param_args
self.pre_shutdown_tests = pre_shutdown_tests
self.post_shutdown_tests = post_shutdown_tests
# If we're parametrized, extend the test names so we can tell more easily what
# params they were run with
if self.param_args:
for tc in itertools.chain(
_iterate_tests_in_test_suite(pre_shutdown_tests),
_iterate_tests_in_test_suite(post_shutdown_tests)
):
test_method = getattr(tc, tc._testMethodName)
new_name = tc._testMethodName + self._format_params()
setattr(tc, '_testMethodName', new_name)
setattr(tc, new_name, test_method)
# Disable cleanup of test cases once they are run
for tc in itertools.chain(
_iterate_test_suites(pre_shutdown_tests),
_iterate_test_suites(post_shutdown_tests)
):
tc._removeTestAtIndex = lambda *args, **kwargs: None
@property
def markers(self):
return self._test_description_function.__markers__
def bind(self, tests, injected_attributes={}, injected_args={}):
"""
Bind injected_attributes and injected_args to tests.
Injected Attributes can be accessed from a test as self.name
Injected Arguments can be accessed as an argument if the test has an argument with a
matching name
"""
# Inject test attributes into the test as self.whatever. This method of giving
# objects to the test is pretty inferior to injecting them as arguments to the
# test methods - we may deprecate this in favor of everything being an argument
for name, value in injected_attributes.items():
_give_attribute_to_tests(value, name, tests)
# Give objects with matching names as arguments to tests. This doesn't have the
# weird scoping and name collision issues that the above method has. In fact,
# we give proc_info and proc_output to the tests as arguments too, so anything
# you can do with test attributes can also be accomplished with test arguments
_bind_test_args_to_tests(injected_args, tests)
def get_launch_description(self):
"""
Get just the launch description portion of the test_description.
This should only be used for the purposes of introspecting the launch description. The
returned launch description is not meant to be launched
"""
return self.normalized_test_description(ready_fn=lambda: None)[0]
def all_cases(self):
yield from _iterate_tests_in_test_suite(self.pre_shutdown_tests)
yield from _iterate_tests_in_test_suite(self.post_shutdown_tests)
def __str__(self):
return self.name + self._format_params()
def _format_params(self):
if not self.param_args:
return ''
else:
str_args = map(str, self.param_args.values())
return '[{}]'.format(', '.join(str_args))
def LoadTestsFromPythonModule(module, *, name='launch_tests'):
if not hasattr(module.generate_test_description, '__parametrized__'):
normalized_test_description_func = (
lambda: [(module.generate_test_description, {})]
)
else:
normalized_test_description_func = module.generate_test_description
# If our test description is parameterized, we'll load a set of tests for each
# individual launch
return [TestRun(name,
description,
args,
PreShutdownTestLoader().loadTestsFromModule(module),
PostShutdownTestLoader().loadTestsFromModule(module))
for description, args in normalized_test_description_func()]
def PreShutdownTestLoader():
return _make_loader(False)
def PostShutdownTestLoader():
return _make_loader(True)
def _make_loader(load_post_shutdown):
class _loader(unittest.TestLoader):
"""TestLoader selectively loads pre-shutdown or post-shutdown tests."""
def loadTestsFromTestCase(self, testCaseClass):
if getattr(testCaseClass, '__post_shutdown_test__', False) == load_post_shutdown:
# Isolate test classes instances on a per parameterization basis
cases = super(_loader, self).loadTestsFromTestCase(
type(testCaseClass.__name__, (testCaseClass,), {
'__module__': testCaseClass.__module__
})
)
return cases
# Empty test suites will be ignored by the test runner
return self.suiteClass()
return _loader()
def _bind_test_args_to_tests(context, test_suite):
# Look for tests that expect additional arguments and bind items from the context
# to the tests
for test in _iterate_tests_in_test_suite(test_suite):
# Need to reach a littl |
` or ``categories``, which can
be used to select which portion of the corpus should be returned.
"""
def __init__(self, root, fileids, encoding='utf8', tagset=None):
"""
:type root: PathPointer or str
:param root: A path pointer identifying the root directory for
this corpus. If a string is specified, then it will be
converted to a ``PathPointer`` automatically.
:param fileids: A list of the files that make up this corpus.
This list can either be specified explicitly, as a list of
strings; or implicitly, as a regular expression over file
paths. The absolute path for each file will be constructed
by joining the reader's root to each file name.
:param encoding: The default unicode encoding for the files
that make up the corpus. The value of ``encoding`` can be any
of the following:
- A string: ``encoding`` is the encoding name for all files.
- A dictionary: ``encoding[file_id]`` is the encoding
name for the file whose identifier is ``file_id``. If
``file_id`` is not in ``encoding``, then the file
contents will be processed using non-unicode byte strings.
- A list: ``encoding`` should be a list of ``(regexp, encoding)``
tuples. The encoding for a file whose identifier is ``file_id``
will be the ``encoding`` value for the first tuple whose
``regexp`` matches the ``file_id``. If no tuple's ``regexp``
matches the ``file_id``, the file contents will be processed
using non-unicode byte strings.
- None: the file contents of all files will be
processed using non-unicode byte strings.
:param tagset: The name of the tagset used by this corpus, to be used
for normalizing or converting the POS tags returned by the
tagged_...() methods.
"""
# Convert the root to a path pointer, if necessary.
if isinstance(root, compat.string_types) and not isinstance(root, PathPointer):
m = re.match('(.*\.zip)/?(.*)$|', root)
zipfile, zipentry = m.groups()
if zipfile:
root = ZipFilePathPointer(zipfile, zipentry)
else:
root = FileSystemPathPointer(root)
elif not isinstance(root, PathPointer):
raise TypeError('CorpusReader: expected a string or a PathPointer')
# If `fileids` is a regexp, then expand it.
if isinstance(fileids, compat.string_types):
fileids = find_corpus_fileids(root, fileids)
self._fileids = fileids
"""A list of the relative paths for the fileids that make up
this corpus."""
self._root = root
"""The root directory for this corpus."""
# If encoding was specified as a list of regexps, then convert
# it to a dictionary.
if isinstance(encoding, list):
encoding_dict = {}
for fileid in self._fileids:
for x in encoding:
(regexp, enc) = x
if re.match(regexp, fileid):
encoding_dict[fileid] = enc
break
encoding = encoding_dict
self._encoding = encoding
"""The default unicode encoding for the fileids that make up
this corpus. If ``encoding`` is None, then the file
contents are processed using byte strings."""
self._tagset = tagset
def __repr__(self):
if isinstance(self._root, ZipFilePathPointer):
path = '%s/%s' % (self._root.zipfile.filename, self._root.entry)
else:
path = '%s' % self._root.path
return '<%s in %r>' % (self.__class__.__name__, path)
def ensure_loaded(self):
"""
Load this corpus (if it has not already been loaded). This is
used by LazyCorpusLoader as a simple method that can be used to
make sure a corpus is loaded -- e.g., in case a user wants to
do help(some_corpus).
"""
pass # no need to actually do anything.
def readme(self):
"""
Return the contents of the corpus README file, if it exists.
"""
return self.open("README").read()
def license(self):
"""
Return the contents of the corpus LICENSE file, if it exists.
"""
return self.open("LICENSE").read()
def citation(self):
"""
Return the contents of the corpus citation.bib file, if it exists.
"""
return self.open("citation.bib").read()
def fileids(self):
"""
Return a list of file identifiers for the fileids that make up
this corpus.
"""
return self._fileids
def abspath(self, fileid):
"""
Return the absolute path for the given file.
:type fileid: str
:param fileid: The file identifier for the file whose path
should be returned.
:rtype: PathPointer
"""
return self._root.join(fileid)
def abspaths(self, fileids=None, include_encoding=False,
include_fileid=False):
"""
Return a list of the absolute paths for all fileids in this corpus;
or for the given list of fileids, if specified.
:type fileids: None or str or list
:param fileids: Specifies the set of fileids for which paths should
be returned. Can be None, for all fileids; a list of
file identifiers, for a specified set of fileids; or a single
file identifier, for a single file. Note that the return
value is always a list of paths, even if ``fileids`` is a
single file identifier.
:param include_encoding: If true, then return a list of
``(path_pointer, encoding)`` tuples.
:rtype: list(PathPointer)
"""
if fileids is None:
fileids = self._fileids
elif isinstance(fileids, compat.string_types):
fileids = [fileids]
paths = [self._root.join(f) for f in fileids]
if include_encoding and include_fileid:
return list(zip(paths, [self.encoding(f) for f in fileids], fileids))
elif include_fileid:
return list(zip(paths, fileids))
elif include_encoding:
return list(zip(paths, [self.encoding(f) for f in fileids]))
else:
return paths
def open(self, file):
"""
Return an open stream that can be used to read the given file.
If the file's encoding is not None, then the stream will
automatically decode the file's contents into unicode.
:param file: The file identifier of the file to read.
"""
encoding = self.encoding(file)
stream = self._root.join(file).open(encoding)
return stream
| def encoding(self, file):
"""
Return the unicode encoding for the given corpus file, if known.
If the encoding is unknown, or if the given file should be
processed using byte strings (str), then return None.
"""
| if isinstance(self._encoding, dict):
return self._encoding.get(file)
else:
return self._encoding
def _get_root(self): return self._root
root = property(_get_root, doc="""
The directory where this corpus is stored.
:type: PathPointer""")
######################################################################
#{ Corpora containing categorized items
######################################################################
class CategorizedCorpusReader(object):
"""
A mixin class used to aid in the implementation of corpus readers
for categorized corpora. This class defines the method
``categories()``, which returns a list of the categories for the
corpus or for a specified set of fileids; and overrides ``fileids()``
to take a ``categories`` argument, restricting the set of fileids to
be r |
from | pele.systems import BaseSystem
import pele.utils.elements.elements as elem # This is a dictionary of element parameters for atoms
class MolecularSystem(BaseSystem):
"""
Representation for a molecular system, this system stores info about atoms, bonds,
angles and torsions.
It is possible to represent the molecule using a graph. Howeve | r, this class is used
to quickly and efficiently:
- add/remove atoms, bonds, angles and torsions;
- read/write PDB and other common formats;
- interface between different formats of input files for CHARMM, AMBER etc.;
- visualise molecular structures;
- measure distances between structures.
"""
def __init__(self):
atoms = []
bonds = []
class Atom(object):
"""
Representation of an Atom, object. Can have
""" |
# -*- coding: utf-8 -*-
import sys
PY3 = False
if sys.version_info[0] >= 3: PY3 = True; unicode = str; unichr = chr; long = int
if PY3:
import urllib.parse as urlparse # Es muy lento en PY2. En PY3 es nativo
else:
import urlparse # Usamos el nativo de PY2 que es más rápido
import re
from platformcode import config, logger
from core import scrapertools
from core import servertools
from core.item import Item
from core import httptools
from channels import filtertools
from channels import autoplay
IDIOMAS = {'vo': 'VO'}
list_language = list(IDIOMAS.values())
list_quality = []
list_servers = ['gounlimited']
host = 'http://sexgalaxy.net' #'http://streamingporn.xyz'
# UBIQFILE y KS2C
def mainlist(item):
logger.info()
itemlist = []
autoplay.init(item.channel, list_servers, list_quality)
itemlist.append(item.clone(title="Peliculas", action="lista", url=host + "/full-movies/"))
itemlist.append(item.clone(title="Peliculas JAV", action="lista", url=host + "/jav-movies/"))
itemlist.append(item.clone(title="Videos", action="lista", url=host + "/new-releases/"))
itemlist.append(item.clone(title="Canales", action="categorias", url=host + "/videos/"))
itemlist.append(item.clone(title="Categorias", action="categorias", url=host + "/videos/"))
itemlist.append(item.clone(title="Buscar", action="search"))
autoplay.show_option(item.channel, itemlist)
return itemlist
def search(item, texto):
logger.info()
texto = texto.replace(" ", "+")
item.url = "%s/?s=%s&submit=Search" % (host, texto)
try:
return lista(item)
except:
import sys
for line in sys.exc_info():
logger.error("%s" % line)
return []
def categorias(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
if "Categorias" in item.title:
data = scrapertools.find_single_match(data, '>Popular Categories<(.*?)>Popular Paysites<')
else:
data = scrapertools.find_single_match(data, '>Popular Paysites<(.*?)</p>')
patron = '<a href="([^"]+)">([^<]+)<'
matches = re.compile(patron, re.DOTALL).findall(data)
for scrapedurl, scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
scrapedtitle = str(scrapedtitle)
thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
itemlist.append(item.clone(action="lista", title=scrapedtitle, url=scrapedurl,
thumbnail=scrapedthumbnail, plot=scrapedplot))
return itemlist
def lista(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url, timeout=3).data
patron = '<article id="post-.*?'
patron += '<a href="([^"]+)" rel="bookmark">([^<]+)<.*?'
patron += '<img src="([^"]+)"'
matches = re.compile(patron, re.DOTALL).findall(data)
| for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
scrapedplot = ""
if not "manyvids" in scrapedtitle:
itemlist.append(item.clone(action="findvideos", title=scrapedtitle, contentTitle=scrapedtitle,
fanart=scrapedthumbnail, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot))
next_page = scrapertools.find_single_match(data, '<div class="nav-previous"><a href="([^"]+)"')
if next_page != "":
itemlist.app | end(item.clone(action="lista", title="[COLOR blue]Página Siguiente >>[/COLOR]", url=next_page))
return itemlist
def findvideos(item):
logger.info()
itemlist = []
data = httptools.downloadpage(item.url).data
data = re.sub(r"\n|\r|\t|amp;|\s{2}| ", "", data)
patron = '<a href="([^"]+)" rel="nofollow[^<]+>(?:|<strong> |)(?:Streaming|Download)'
matches = scrapertools.find_multiple_matches(data, patron)
for url in matches:
if not "ubiqfile" in url:
itemlist.append(item.clone(action='play',title="%s", contentTitle=item.title, url=url))
itemlist = servertools.get_servers_itemlist(itemlist, lambda i: i.title % i.server.capitalize())
# Requerido para FilterTools
itemlist = filtertools.get_links(itemlist, item, list_language, list_quality)
# Requerido para AutoPlay
autoplay.start(itemlist, item)
return itemlist
|
verifyOrder = {
'orderId': 1234,
'orderDate': '2013-08-01 15:23:45',
| 'prices': [{
'id': 1,
'laborFee': '2',
'oneTimeFee': '2',
'oneTimeFeeTax': '.1',
'q | uantity': 1,
'recurringFee': '2',
'recurringFeeTax': '.1',
'hourlyRecurringFee': '2',
'setupFee': '1',
'item': {'id': 1, 'description': 'this is a thing'},
}]}
placeOrder = verifyOrder
|
# yellowbrick.model_selection
# Visualizers that wrap the model selection libraries of Scikit-Learn
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Fri Mar 30 10:36:12 2018 -0400
#
# ID: __init__.py [c5355ee] benjamin@bengfort.com $
"""
Visualizers that wrap the model selection libraries of Scikit-Learn
"""
##########################################################################
## Imports
######################################################################## | ##
from .learning_curve import LearningCurve, lea | rning_curve
from .validation_curve import ValidationCurve, validation_curve
from .cross_validation import CVScores, cv_scores
# RFECV and Feature Importances moved here as of YB v1.0
from .importances import FeatureImportances, feature_importances
from .rfecv import RFECV, rfecv
|
import unittest
import numpy as np
from numpy.testing import assert_allclose
import theano
from keras.layers import convolutional
class TestConvolutions(unittest.TestCase):
def test_convolution_1d(self):
nb_samples = 9
nb_steps = 7
input_dim = 10
filter_length = 6
nb_filter = 5
weights_in = [np.ones((nb_filter, input_dim, filter_length, 1)), np.ones(nb_filter)]
input = np.ones((nb_samples, nb_steps, input_dim))
for weight in [None, weights_in]:
for border_mode in ['valid', 'full', 'same']:
for subsample_length in [1, 3]:
if border_mode == 'same' and subsample_length != 1:
continue
for W_regularizer in [None, 'l2']:
for b_regularizer in [None, 'l2']:
for act_regularizer in [None, 'l2']:
layer = convolutional.Convolution1D(
nb_filter, filter_length, weights=weight,
border_mode=border_mode, W_regularizer=W_regularizer,
b_regularizer=b_regularizer, activity_regularizer=act_regularizer,
subsample_length=subsample_length, input_shape=(None, input_dim))
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
assert input.shape[0] == out.shape[0]
if border_mode == 'same' and subsample_length == 1:
assert input.shape[1] == out.shape[1]
config = layer.get_config()
def test_maxpooling_1d(self):
nb_samples = 9
nb_steps = 7
input_dim = 10
input = np.ones((nb_samples, nb_steps, input_dim))
for ignore_border in [True, False]:
for stride in [1, 2]:
layer = convolutional.MaxPooling1D(stride=stride, ignore_border=ignore_border)
layer.input = theano.shared(value=input)
for train in [True, False]:
layer.get_output(train).eval()
config = layer.get_config()
def test_convolution_2d(self):
nb_samples = 8
nb_filter = 9
stack_size = 7
nb_row = 10
nb_col = 6
input_nb_row = 11
input_nb_col = 12
weights_in = [np.ones((nb_filter, stack_size, nb_row, nb_col)), np.ones(nb_filter)]
input = | np.ones((nb_samples, stack_size, input_nb_row, in | put_nb_col))
for weight in [None, weights_in]:
for border_mode in ['valid', 'full', 'same']:
for subsample in [(1, 1), (2, 3)]:
if border_mode == 'same' and subsample != (1, 1):
continue
for W_regularizer in [None, 'l2']:
for b_regularizer in [None, 'l2']:
for act_regularizer in [None, 'l2']:
layer = convolutional.Convolution2D(
nb_filter, nb_row, nb_col, weights=weight,
border_mode=border_mode, W_regularizer=W_regularizer,
b_regularizer=b_regularizer, activity_regularizer=act_regularizer,
subsample=subsample, input_shape=(stack_size, None, None))
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
if border_mode == 'same' and subsample == (1, 1):
assert out.shape[2:] == input.shape[2:]
config = layer.get_config()
def test_maxpooling_2d(self):
nb_samples = 9
stack_size = 7
input_nb_row = 11
input_nb_col = 12
pool_size = (3, 3)
input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
for ignore_border in [True, False]:
for stride in [(1, 1), (2, 2)]:
layer = convolutional.MaxPooling2D(stride=stride, ignore_border=ignore_border, pool_size=pool_size)
layer.input = theano.shared(value=input)
for train in [True, False]:
layer.get_output(train).eval()
config = layer.get_config()
def test_zero_padding_2d(self):
nb_samples = 9
stack_size = 7
input_nb_row = 11
input_nb_col = 12
input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
layer = convolutional.ZeroPadding2D(padding=(2, 2))
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
for offset in [0, 1, -1, -2]:
assert_allclose(out[:, :, offset, :], 0.)
assert_allclose(out[:, :, :, offset], 0.)
assert_allclose(out[:, :, 2:-2, 2:-2], 1.)
config = layer.get_config()
def test_upsample_1d(self):
nb_samples = 9
nb_steps = 7
input_dim = 10
input = np.ones((nb_samples, nb_steps, input_dim))
for length in [2, 3, 9]:
layer = convolutional.UpSample1D(length=length)
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
assert out.shape[1] == length*nb_steps
config = layer.get_config()
def test_upsample_2d(self):
nb_samples = 9
stack_size = 7
input_nb_row = 11
input_nb_col = 12
input = np.ones((nb_samples, stack_size, input_nb_row, input_nb_col))
for length_row in [2, 3, 9]:
for length_col in [2, 3, 9]:
layer = convolutional.UpSample2D(size=(length_row, length_col))
layer.input = theano.shared(value=input)
for train in [True, False]:
out = layer.get_output(train).eval()
assert out.shape[2] == length_row*input_nb_row
assert out.shape[3] == length_col*input_nb_col
config = layer.get_config()
if __name__ == '__main__':
unittest.main()
|
# Authors:
# Pedro Jose Pereira Vieito <pvieito@gmail.com> (Twitter: @pvieito)
#
# URL: https://github.com/mr-orange/Sick-Beard
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
#
# Uses the Synology Download Station API: http://download.synology.com/download/Document/DeveloperGuide/Synology_Download_Station_Web_API.pdf
import sickbeard
from sickbeard.clients.generic import GenericClient
class DownloadStationAPI(GenericClient):
def __init__(self, host=None, username=None, password=None):
super(DownloadStationAPI, self).__init__('DownloadStation', host, username, password)
self.url = self.host + 'webapi/DownloadStation/task.cgi'
def _get_auth(self):
auth_url = self.host + 'webapi/auth.cgi?api=SYNO.API.Auth&version=2&method=login&account=' + self.username + '&passwd=' + self.password + '&session=DownloadStation&format=sid'
try:
self.response = self.session.get(auth_url, verify=False)
self.auth = self.response.json()['data']['sid']
except:
return None
return self.auth
def _add_torrent_uri(self, result):
data = {'api':'SYNO.DownloadStation.Task',
'version':'1' | , 'method':'create',
'session':'DownloadStation',
'_sid':self.auth,
'uri':result.url
}
if sickbeard.TORRENT_PATH:
data['destination'] = sickbeard.TORRENT_PATH
self._ | request(method='post', data=data)
return self.response.json()['success']
def _add_torrent_file(self, result):
data = {'api':'SYNO.DownloadStation.Task',
'version':'1',
'method':'create',
'session':'DownloadStation',
'_sid':self.auth
}
if sickbeard.TORRENT_PATH:
data['destination'] = sickbeard.TORRENT_PATH
files = {'file':(result.name + '.torrent', result.content)}
self._request(method='post', data=data, files=files)
return self.response.json()['success']
api = DownloadStationAPI() |
#!/usr/bin/env python2
# Copyright (c) 2014-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Test mempool limiting together/eviction with the wallet
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class MempoolLimitTest(BitcoinTestFramework):
def __init__(self):
self.txouts = gen_return_txouts()
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-maxmempool=5", "-spendzeroconfchange=0"]))
self.is_network_split = False
self.sync_all()
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def run_test(self):
txids = []
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], | 90)
#create a mempool tx that will be evicted
us0 = utxos.pop()
inputs = [{ "txid" : us0["txid"], "vout" : us0["vout"]}]
outputs = {self.nodes[0].getnewaddress() : 0.0001}
tx = self.nodes[0].createrawtransaction(inputs, | outputs)
self.nodes[0].settxfee(self.relayfee) # specifically fund this tx with low fee
txF = self.nodes[0].fundrawtransaction(tx)
self.nodes[0].settxfee(0) # return to automatic fee selection
txFS = self.nodes[0].signrawtransaction(txF['hex'])
txid = self.nodes[0].sendrawtransaction(txFS['hex'])
relayfee = self.nodes[0].getnetworkinfo()['relayfee']
base_fee = relayfee*100
for i in xrange (4):
txids.append([])
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[30*i:30*i+30], (i+1)*base_fee)
# by now, the tx should be evicted, check confirmation state
assert(txid not in self.nodes[0].getrawmempool())
txdata = self.nodes[0].gettransaction(txid)
assert(txdata['confirmations'] == 0) #confirmation should still be 0
if __name__ == '__main__':
MempoolLimitTest().main()
|
#!/usr/bin/pythonTest
# -*- coding: utf-8 -*-
#
# Web functions want links
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# The GNU General Public License is available from:
# The Free Software Foundation, Inc.
# 51 Franklin Street, Fifth Floor
# Boston MA 02110-1301 USA
#
# http://www.gnu.org/licenses/gpl.html
#
# Copyright 2015-2016 Rick Graves
#
def getUniqueLinks( sReadFile, sOutFile ):
#
from File.Get import getListFromFileLines
from File.Write import QuickDumpLines
#
from Web.Address import getHostPathTuple, getDomainOffURL
from Web.Test import isURL
#
lLines = getListFromFileLines( sReadFile )
#
setLinks= frozenset( filter( isURL, lLines ) )
#
#
lDecorate = [ ( getHostPathTuple( sURL ), sURL ) for sURL in setLinks ]
#
lDecorate = [ ( ( getDomainOffURL( t[0][0] ), t[0][1] ), t[1] ) for t in lDecorate ]
#
lDecorate.sort()
#
lLinks = [ t[1] for t in lDecorate ]
#
QuickDumpLines( lLinks, sOutFile )
if __name__ == "__main__":
#
from os.path import join
from sys import argv
#
from six import print_ as print3
#
from Dir.Get import sTempDir
from File.Test import isFileThere
from Utils.Result import sayTestResult
#
lProblems = []
#
args = argv[ 1 : ]
#
sReadFile = join( sTempDir, 'LotsOfLinks.txt' )
sOutFile = join( sTempDir, 'UniqueLinks.txt' )
#
if args:
#
sReadFile = args[0]
#
if len( args ) > 1:
#
sOutFile = args[2]
#
#
else:
#
if isFileThere( sReadFile ):
#
getUniqueLinks( sReadFile, sOutFile )
#
else:
| #
print3( 'Usage: WantLinks [inputFile [, outputFile] ]' )
print3( 'default inputFile {temp dir}lotsolinks.txt' )
print3( 'default outputFile | {temp dir}UniqueLinks.txt' )
#
#
#
if False:
#
lProblems.append( 'getDotQuad4IspTester()' )
#
#
#
sayTestResult( lProblems ) |
###############################################################################
#
# file: urlfetcher.py
#
# Purpose: refer to module documentation for details
#
# Note: This file is part of Termsaver application, and should not be used
# or executed separately.
#
###############################################################################
#
# Copyright 2012 Termsaver
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
###############################################################################
"""
Simple screensaver that displays data from a URL.
See additional information in the class itself.
The screen class available here is | :
* `UrlFetcherScreen`
"""
#
# Internal modules
#
from termsaverlib.screen.base.urlfetcher import UrlFetcherBase
from termsaverlib import constants
from termsaverlib.i18n import _
class UrlFetcherScreen(UrlFetcherBase):
"""
Simple screensaver that displays data from a URL.
"""
de | f __init__(self):
"""
Creates a new instance of this class.
"""
UrlFetcherBase.__init__(self,
"urlfetcher",
_("displays url contents with typing animation"))
def _message_no_url(self):
"""
"""
return _("""
You just need to provide the URL from where %(app_title)s will read and
display on screen.
If you do not have any idea which URL to use, check out some examples here:
RFC
RFC-1034 - http://tools.ietf.org/rfc/rfc1034.txt
See a RFC list from Wikipedia:
http://en.wikipedia.org/wiki/List_of_RFCs
(remember to use the txt version)
""") % {
'app_title': constants.App.TITLE,
}
|
import numpy as np
import sys
import os
import time
from ase import Atom, Atoms
from ase.visualize import view
from ase.units import Bohr
from ase.structure import bulk
from gpaw import GPAW
from gpaw.atom.basis import BasisMaker
from gpaw.response.df import DF
from gpaw.mpi import serial_comm, rank, size
from gpaw.utilities import devnull
# Ground state calculation
a = 4.043
atoms = bulk('Al', 'fcc', a=a)
atoms.center()
calc = GPAW(gpts=(12,12,12),
kpts=(4,4,4),
xc='LDA')
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Al1.gpw','all')
# Excited state calculation
q = np.array([1./4.,0.,0.])
w = np.linspace(0, 24, 241)
df = DF(calc='Al1.gpw', q=q, w=w, eta=0.2, ecut=50)
#df.write('Al.pckl')
df.get_EELS_spectrum(filename='EELS_Al_1')
atoms = Atoms('Al8',scaled_positions=[(0,0,0),
(0.5,0,0),
(0,0.5,0),
(0,0,0.5),
(0.5,0.5,0),
(0.5,0,0.5),
(0.,0.5,0.5),
(0.5,0.5,0.5)],
cell=[(0,a,a),(a,0,a),(a,a,0)],
pbc=True)
calc = GPAW(gp | ts=(24,24,24),
kpts=(2,2,2),
xc='LDA')
atoms.set_calculator(calc)
atoms.get_potential_energy()
calc.write('Al2.gpw','all')
|
# Excited state calculation
q = np.array([1./2.,0.,0.])
w = np.linspace(0, 24, 241)
df = DF(calc='Al2.gpw', q=q, w=w, eta=0.2, ecut=50)
#df.write('Al.pckl')
df.get_EELS_spectrum(filename='EELS_Al_2')
d1 = np.loadtxt('EELS_Al_1')
d2 = np.loadtxt('EELS_Al_2')
error1 = (d1[1:,1] - d2[1:,1]) / d1[1:,1] * 100
error2 = (d1[1:,2] - d2[1:,2]) / d1[1:,2] * 100
if error1.max() > 0.2 or error2.max() > 0.2: # percent
print error1.max(), error2.max()
raise ValueError('Pls check spectrum !')
#if rank == 0:
# os.remove('Al1.gpw')
# os.remove('Al2.gpw')
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Tue 31 May 2016
@author: Fabrizio Coccetti (fabrizio.coccetti@centrofermi.it) [www.fc8.net]
Query Run Db and extract several infos
"""
import os
import MySQLdb
from datetime import datetime, timedelta
import ConfigParser
import logging
import logging.config
import calendar
from e3monitor.config.__stations__ import EEE_STATISTIC_STATIONS
from e3monitor.config.__files_server__ import (logConfigFile,
dbConfigFile,
pklStatFile,
pathWorkDir)
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
# Define start of the run and other dates
startRun = datetime(2015, 11, 7)
startRunStr = startRun.strftime("%Y-%m-%d")
# ATTENTION
# endRun must be the day + 1
endRun = datetime(2016, 5, 21)
endRunStr = startRun.strftime("%Y-%m-%d")
#today = datetime.today()
#todayStr = today.strftime("%Y-%m-%d")
# Set up logging
logging.config.fileConfig(logConfigFile)
logger = logging.getLogger('full')
logger.info('Started')
logger = logging.getLogger('plain')
# Open output file
w = open('/var/www/html/monitor/stats/tracks_per_day_per_station.csv', 'w')
logger.info('Opened output file.')
# Adding headers to the output file
w.write('Date' + ',')
for schoolName in EEE_STATISTIC_STATIONS:
w.write(schoolName + ',')
w.write('\n')
# Reading db ini file
logger.info('Reading ' + dbConfigFile)
parser = ConfigParser.ConfigParser()
parser.read(dbConfigFile)
host = parser.get('General', 'host')
user = parser.get('General', 'user')
dbname = parser.get('General', 'dbname')
passwd = parser.get('General', 'passwd')
# Connecting to the database
logger.info('Connecting to %s on %s (as %s)' % (dbname, host, user))
db = MySQLdb.connect(host=host, user=user, passwd=passwd, db=dbname)
cur = db.cursor()
# Query for the number of tracks every month
logger.info('Queries of the total number of Tracks')
query = "SELECT SUM(num_track_events) from runs2 WHERE (run_date = %s) AND station_name = %s;"
logger.info("Exec loop: " + query)
for _lastDay in daterange(startRun, endRun):
_lastDayStr = _lastDay.strftime("%Y-%m-%d")
# writing date to file
w.write(_lastDayStr + ',')
# Loop for each station in Run
for schoolName in EEE_STATISTIC_STATIONS:
queryParam = (_lastDayStr, schoolName)
logger.info('Parameters: ' + str(queryParam))
cur.execute(query, queryParam)
try:
_tracks = int(cur.fetchone()[0])
except:
_tracks = 0
logger.info('School: ' + schoolName + ' Tracks: ' + str(_tracks))
w.write(str(_tracks) + ',')
w.write('\n')
logger.info('Final Result of the queries:\n')
# Save the data extracted from the db
#logger.info('Writing data to file...')
#output = o | pen(os.path.join(pathWorkDir, pklStatFile), 'wb')
#pickle.dump(trackStat, output)
#output.close()
#logger = logging.getLogger('full')
#logger.info('Written ' | + os.path.join(pathWorkDir, pklStatFile))
cur.close()
db.close()
w.close()
logger.info('Finished')
|
#
# Licensed to the Apache Software Fou | ndation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "Lice | nse"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_terminate_job_flow`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.amazon.aws.operators.emr_terminate_job_flow import EmrTerminateJobFlowOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.amazon.aws.operators.emr_terminate_job_flow`.",
DeprecationWarning,
stacklevel=2,
)
|
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
if hasattr(self.rel, 'field') and self.rel.field is self:
obj.rel.field = obj
memodict[id(self)] = obj
return obj
def __copy__(self):
# We need to avoid hitting __reduce__, so define this
# slightly weird copy construct.
obj = Empty()
obj.__class__ = self.__class__
obj.__dict__ = self.__dict__.copy()
return obj
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app cache to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
# Fields are sometimes used without attaching them to models (for
# example in aggregation). In this case give back a plain field
# instance. The code below will create a new empty instance of
# class self.__class__, then update its dict with self.__dict__
# values - so, this is very close to normal pickle.
return _empty, (self.__class__,), self.__dict__
if self.model._deferred:
# Deferred model will not be found from the app cache. This could
# be fixed by reconstructing the deferred model on unpickle.
raise RuntimeError("Fields of deferred models can't be reduced")
return _load_field, (self.model._meta.app_label, self.model._meta.object_name,
self.name)
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in self.empty_values:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError as e:
if hasattr(e, 'code') and e.code in self.error_messages:
e.message = self.error_messages[e.code]
errors.extend(e.error_list)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should | override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value not in self.empty_values:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for
# options.
for optgroup_key, opt | group_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(
self.error_messages['invalid_choice'],
code='invalid_choice',
params={'value': value},
)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'], code='null')
if not self.blank and value in self.empty_values:
raise exceptions.ValidationError(self.error_messages['blank'], code='blank')
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# a custom field might be represented by a TEXT column type, which is
# the same as the TextField Django field type, which means the custom
# field's get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
params = self.db_parameters(connection)
if params['type']:
if params['check']:
return "%s CHECK (%s)" % (params['type'], params['check'])
else:
return params['type']
return None
def db_parameters(self, connection):
"""
Replacement for db_type, providing a range of different return
values (type, checks)
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
type_string = connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
type_string = None
try:
check_string = connection.creation.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
check_string = None
return {
"type": type_string,
"check": check_string,
}
def db_type_suffix(self, connection):
return connection.creation.data_types_suffix.get(self.get_internal_type())
@property
def unique(self):
return self._unique or self.primary_key
def set_attributes_from_name(self, name):
if not self.name:
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and self.name:
self.verbose_name = self.name.replace('_', ' ')
def contribute_to_class(self, cls, name, virtual_only=False):
self.set_attributes_from_name(name)
self.model = cls
if virtual_only:
cls._meta.add_virtual_field(self)
else:
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name,
curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_d |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.