text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import httplib
import io
import json
import logging
import traceback
from datetime import datetime, date
import dateutil.tz
import tornado.ioloop
import tornado.web
from ConfigParser import ConfigParser
from decimal import Decimal
from sqlalchemy import create_engine
class JsonDumper(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.replace(tzinfo=dateutil.tz.tzlocal()).strftime('%Y-%m-%dT%H:%M:%SZ%z')
elif isinstance(obj, date):
return obj.replace(tzinfo=dateutil.tz.tzlocal()).strftime('%Y-%m-%d')
elif isinstance(obj, Decimal):
return str(obj)
else:
return json.JSONEncoder.default(self, obj)
def json_dumps(data):
return json.dumps(data, cls=JsonDumper, sort_keys=False)
def get_post_visit(db, post_id):
v = db.execute('SELECT `visit` FROM `posts_visit` WHERE `post_id`=%s', post_id).first()
return v['visit'] if v else 0
def post_visit(db, post_id):
sql = "INSERT INTO `posts_visit` (`post_id`, `visit`) VALUES (%s,1) ON DUPLICATE KEY UPDATE `visit` = `visit`+1"
db.execute(sql, post_id)
return get_post_visit(db, post_id)
def post_categorys(db, post_id):
sql = "SELECT DISTINCT `wp_term_taxonomy`.`taxonomy`,`wp_terms`.`term_id`,`wp_terms`.`name`,`wp_terms`.`slug`,`wp_term_taxonomy`.`parent` AS `parent_id`,b.`name` AS `parent_name`,b.`slug` as `parent_slug`,`wp_options`.`option_value` AS `poster` FROM `wp_term_relationships` INNER JOIN `wp_term_taxonomy` ON `wp_term_taxonomy`.`term_taxonomy_id` = `wp_term_relationships`.`term_taxonomy_id` INNER JOIN `wp_terms` ON `wp_term_taxonomy`.`term_id`=`wp_terms`.`term_id` LEFT JOIN `wp_terms` b ON b.term_id = `wp_term_taxonomy`.`parent` LEFT JOIN `wp_options` ON `wp_options`.`option_name` = CONCAT('z_taxonomy_image', `wp_terms`.`term_id`)"
sql += " WHERE `wp_term_relationships`.`object_id` = %s"
rs = db.execute(sql, post_id)
return [{
'id': x['term_id'],
'name': x['name'],
'slug': x['slug'],
'poster': x['poster'],
'parent': {
'id': x['parent_id'],
'name': x['parent_name'],
'slug': x['parent_slug'],
} if x['parent_id'] and x['parent_name'] and x['parent_slug'] else None,
} for x in rs]
def post_meta(db, post_id):
sql = "SELECT DISTINCT `meta_key`,`meta_value` FROM `wp_postmeta` WHERE post_id=%s"
rs = db.execute(sql, post_id)
return dict((x['meta_key'], x['meta_value']) for x in rs if not x['meta_key'].startswith('_'))
def post_attachment(db, post_id):
sql = "SELECT DISTINCT `wp_posts`.`id`, `wp_posts`.`post_date`,`wp_posts`.`post_title`,`wp_posts`.`guid` as `url`, `wp_posts`.`post_mime_type` as `mime_type` FROM `wp_postmeta` JOIN `wp_posts` ON `wp_postmeta`.`meta_value` = `wp_posts`.`ID`"
sql += " WHERE `wp_postmeta`.`post_id` = %s"
rs = db.execute(sql, post_id)
return dict((x['id'], {
'date': x['post_date'],
'title': x['post_date'],
'mime_type': x['mime_type'],
'url': x['url'],
}) for x in rs)
def category_parents(db, category_id):
parent = []
p = db.execute('SELECT DISTINCT `parent` FROM `wp_term_taxonomy` WHERE `term_id`=%s', category_id).scalar()
if p:
parent.append(p)
parent += category_parents(db, p)
return parent
def category_childrens(db, category_id):
parent = []
childrens = db.execute('SELECT DISTINCT `term_id` FROM `wp_term_taxonomy` WHERE `parent`=%s',
category_id).fetchall()
parent += [x['term_id'] for x in childrens]
for p in parent:
parent += category_childrens(db, p)
return parent
def query_category(db, category_id):
sql = """SELECT `wp_term_taxonomy`.`taxonomy`,`wp_terms`.`term_id`,`wp_terms`.`name`,`wp_terms`.`slug`,`wp_term_taxonomy`.`parent` AS `parent_id`,b.`name` AS `parent_name`,b.`slug` as `parent_slug`,`wp_options`.`option_value` AS `poster` FROM `wp_term_taxonomy` INNER JOIN `wp_terms` ON `wp_term_taxonomy`.`term_id`=`wp_terms`.`term_id` LEFT JOIN `wp_terms` b ON b.term_id = `wp_term_taxonomy`.`parent` LEFT JOIN `wp_options` ON `wp_options`.`option_name` = CONCAT('z_taxonomy_image', `wp_terms`.`term_id`)"""
sql += " WHERE `wp_terms`.`term_id`=%s"
x = db.execute(sql, category_id).first()
return {
'id': x['term_id'],
'name': x['name'],
'slug': x['slug'],
'poster': x['poster'],
'parent': query_category(db, x['parent_id']) if x['parent_id'] else None,
} if x else None
def query_categorys(db, taxonomy, parent_id, offset, limit):
q = []
sql = """SELECT `wp_term_taxonomy`.`taxonomy`,`wp_terms`.`term_id`,`wp_terms`.`name`,`wp_terms`.`slug`,`wp_term_taxonomy`.`parent` AS `parent_id`,b.`name` AS `parent_name`,b.`slug` as `parent_slug`,`wp_options`.`option_value` AS `poster` FROM `wp_term_taxonomy` INNER JOIN `wp_terms` ON `wp_term_taxonomy`.`term_id`=`wp_terms`.`term_id` LEFT JOIN `wp_terms` b ON b.term_id = `wp_term_taxonomy`.`parent` LEFT JOIN `wp_options` ON `wp_options`.`option_name` = CONCAT('z_taxonomy_image', `wp_terms`.`term_id`)"""
sql += " WHERE `wp_terms`.`slug` != 'uncategorized' AND `wp_term_taxonomy`.`parent`=%s"
q.append(parent_id)
if taxonomy:
sql += " AND `wp_term_taxonomy`.`taxonomy`=%s"
q.append(taxonomy)
sql += " ORDER BY `wp_terms`.`term_order`"
sql += " LIMIT %s,%s"
q.append(offset)
q.append(limit)
rs = db.execute(sql, *q)
results = {
'offset': offset,
'max': limit,
'tax': taxonomy,
'parent_id': parent_id,
'parent': query_category(db, parent_id),
'data': [{
'id': x['term_id'],
'name': x['name'],
'slug': x['slug'],
'poster': x['poster'],
'parent': {
'id': x['parent_id'],
'name': x['parent_name'],
'slug': x['parent_slug'],
} if x['parent_id'] and x['parent_name'] and x['parent_slug'] else None,
} for x in rs]
}
rs.close()
return results
def query_post(db, post_id):
sql = """SELECT `wp_posts`.`ID` as `id`, `wp_posts`.`post_date`,`wp_posts`.`post_title`,`wp_posts`.`post_content` FROM `wp_posts`"""
sql += " WHERE `wp_posts`.`ID`=%s AND `wp_posts`.`post_status`='publish'"
x = db.execute(sql, post_id).first()
return {
'id': x['id'],
'data': x['post_date'],
'title': x['post_title'],
'content': x['post_content'],
'category': post_categorys(db, x['id']),
'meta': post_meta(db, x['id']),
'attachment': post_attachment(db, x['id']),
'visit': get_post_visit(db, x['id']),
} if x else None
# WHERE MATCH (post_title) AGAINST ('河北 宜昌' IN NATURAL LANGUAGE MODE)
def search_posts(db, taxonomy, category, offset, limit, kw):
q = []
sql = """SELECT DISTINCT `wp_posts`.`ID`, `wp_posts`.`post_date`,`wp_posts`.`post_title`,`wp_posts`.`post_content` FROM `wp_posts`"""
sql += " WHERE `wp_posts`.`post_type` =%s AND `wp_posts`.`post_status`='publish'"
sql += " AND MATCH (`wp_posts`.`post_title`) AGAINST (%s IN NATURAL LANGUAGE MODE)"
q.append(taxonomy)
if category:
categorys = category_childrens(db, category)
categorys.append(category)
sql = """SELECT DISTINCT `wp_posts`.`ID`, `wp_posts`.`post_date`,`wp_posts`.`post_title`,`wp_posts`.`post_content` FROM `wp_term_relationships` INNER JOIN `wp_posts` ON `wp_posts`.`ID` = `wp_term_relationships`.`object_id`"""
sql += " WHERE `wp_posts`.`post_type` =%s AND `wp_posts`.`post_status`='publish'"
sql += " AND `wp_term_relationships`.`term_taxonomy_id` IN (%s)" % ','.join(map(lambda x: '%s', categorys))
sql += " AND MATCH (`wp_posts`.`post_title`) AGAINST (%s IN NATURAL LANGUAGE MODE)"
q += categorys
q.append(kw)
else:
q.append(kw)
sql += " ORDER BY `wp_posts`.`post_date` DESC"
sql += " LIMIT %s,%s"
q.append(offset)
q.append(limit)
rs = db.execute(sql, *q)
results = {
'offset': offset,
'max': limit,
'tax': taxonomy,
'category': query_category(db, category) if category else None,
'data': [{
'id': x['id'],
'data': x['post_date'],
'title': x['post_title'],
'content': x['post_content'],
'category': post_categorys(db, x['id']),
'meta': post_meta(db, x['id']),
'attachment': post_attachment(db, x['id']),
'visit': get_post_visit(db, x['id']),
} for x in rs]
}
rs.close()
return results
def query_posts(db, taxonomy, category, offset, limit):
q = []
sql = """SELECT DISTINCT `wp_posts`.`ID`, `wp_posts`.`post_date`,`wp_posts`.`post_title`,`wp_posts`.`post_content` FROM `wp_posts`"""
sql += " WHERE `wp_posts`.`post_type` =%s AND `wp_posts`.`post_status`='publish'"
q.append(taxonomy)
if category:
categorys = category_childrens(db, category)
categorys.append(category)
sql = """SELECT DISTINCT `wp_posts`.`ID`, `wp_posts`.`post_date`,`wp_posts`.`post_title`,`wp_posts`.`post_content` FROM `wp_term_relationships` INNER JOIN `wp_posts` ON `wp_posts`.`ID` = `wp_term_relationships`.`object_id`"""
sql += " WHERE `wp_posts`.`post_type` =%s AND `wp_posts`.`post_status`='publish'"
sql += " AND `wp_term_relationships`.`term_taxonomy_id` IN (%s)" % ','.join(map(lambda x: '%s', categorys))
q += categorys
sql += " ORDER BY `wp_posts`.`post_date` DESC"
sql += " LIMIT %s,%s"
q.append(offset)
q.append(limit)
rs = db.execute(sql, *q)
results = {
'offset': offset,
'max': limit,
'tax': taxonomy,
'category': query_category(db, category) if category else None,
'data': [{
'id': x['id'],
'data': x['post_date'],
'title': x['post_title'],
'content': x['post_content'],
'category': post_categorys(db, x['id']),
'meta': post_meta(db, x['id']),
'attachment': post_attachment(db, x['id']),
'visit': get_post_visit(db, x['id']),
} for x in rs]
}
rs.close()
return results
class MainHandler(tornado.web.RequestHandler):
def initialize(self, database):
self.database = database
def get(self, p):
if not p:
logging.error("功能调用错误,未提供调用方法")
raise tornado.web.HTTPError(404, "功能调用错误,未提供调用方法")
params = str(p).split('/') if p else []
attr = getattr(self, 'func_' + params[0], None)
if not attr:
logging.error("功能方法不存在(%s)" % (str(p)))
raise tornado.web.HTTPError(404, "功能方法不存在(%s)" % (str(p)))
try:
args = {}
for k in self.request.arguments.keys():
args[k] = self.get_argument(k)
attr(params[1:], args)
except tornado.web.HTTPError, e:
traceback.print_exc()
logging.error("服务器内部功能调用错误(URI: %s, Error: %s, Content: %s)" % (str(p), str(e), str(self.request.body)))
raise
except BaseException, e:
traceback.print_exc()
logging.error("服务器内部功能调用错误(URI: %s, Error: %s, Content: %s)" % (str(p), str(e), str(self.request.body)))
raise
def set_default_headers(self):
self.set_header('Server', 'Wordpress-API-Server')
def response_json(self, data):
self.set_header('Content-Type', 'application/json; charset=utf-8')
self.write(json_dumps(data))
self.write('\n')
def write_error(self, status_code, **kwargs):
logging.error(u'Error Request Url: ' + unicode(self.request.path))
logging.error(u'Error Request Body: ' + unicode(self.request.body if self.request.body else ''))
data = {'error': status_code, 'message': httplib.responses[status_code]}
for item in kwargs['exc_info']:
if isinstance(item, tornado.web.HTTPError):
data['message'] = item.log_message
elif isinstance(item, Exception):
data['message'] = str(item)
self.response_json(data)
def func_category(self, path, data):
if not path:
raise tornado.web.HTTPError(404, "方法调用错误,未提供分类ID")
db = self.database.connect()
try:
results = query_category(db, path)
if not results:
raise tornado.web.HTTPError(404, "分类信息不存在(%s)" % (str(path)))
self.response_json(results)
finally:
db.close()
def func_category(self, path, data):
id = int(data.get('id', 0))
db = self.database.connect()
try:
results = query_category(db, id)
self.response_json(results)
finally:
db.close()
def func_visit(self, path, data):
id = int(data.get('id', 0))
db = self.database.connect()
try:
visit = post_visit(db, id)
self.response_json({'visit': visit})
finally:
db.close()
def func_categorys(self, path, data):
parent_id = int(data.get('p', 0))
taxonomy = data.get('tax', 'category')
offset = max(0, int(data.get('offset', 0)))
limit = min(100, int(data.get('max', 20)))
db = self.database.connect()
try:
results = query_categorys(db, taxonomy, parent_id, offset, limit)
self.response_json(results)
finally:
db.close()
def func_post(self, path, data):
if not path:
raise tornado.web.HTTPError(404, "方法调用错误,未提供文章ID")
db = self.database.connect()
try:
results = query_post(db, path)
if not results:
raise tornado.web.HTTPError(404, "文章不存在(%s)" % (str(path)))
self.response_json(results)
finally:
db.close()
def func_posts(self, path, data):
taxonomy = data.get('tax', 'post')
category = data.get('c', None)
offset = max(0, int(data.get('offset', 0)))
limit = min(100, int(data.get('max', 20)))
db = self.database.connect()
try:
results = query_posts(db, taxonomy, category, offset, limit)
self.response_json(results)
finally:
db.close()
def func_search_posts(self, path, data):
taxonomy = data.get('tax', 'post')
category = data.get('c', None)
kw = data.get('q', '')
offset = max(0, int(data.get('offset', 0)))
limit = min(100, int(data.get('max', 20)))
db = self.database.connect()
try:
results = search_posts(db, taxonomy, category, offset, limit, kw)
self.response_json(results)
finally:
db.close()
def func_icon(self, path, data):
from PIL import Image
from PIL import ImageDraw
from PIL import ImageFont, ImageColor
text = data.get('txt', u'测')
if not isinstance(text, unicode):
text = unicode(text)
font_size = data.get('fontsize', 100)
image_size = (int(data.get('s', 200)), int(data.get('s', 200)))
background_color = ImageColor.getrgb(data['bkcolor']) if 'bkcolor' in data else (0, 0, 0, 0)
text_color = ImageColor.getrgb(data['color']) if 'color' in data else (128, 128, 128)
font = ImageFont.truetype('wryh.ttf', font_size)
im = Image.new("RGBA", image_size, background_color)
text_size = font.getsize(text)
draw = ImageDraw.Draw(im)
draw.text((int((image_size[0] - text_size[0]) / 2), int((image_size[1] - text_size[1]) / 2 - font_size / 10)),
text,
text_color, font=font)
del draw
o = io.BytesIO()
im.save(o, format="PNG")
s = o.getvalue()
self.set_header('Content-type', 'image/png')
self.set_header('Content-length', len(s))
self.write(s)
self.finish()
if __name__ == "__main__":
settings = ConfigParser()
settings.read('settings.ini')
engine = create_engine(settings.get('default', 'db_uri'), echo=False, case_sensitive=False, convert_unicode=True,
echo_pool=True, pool_recycle=3600)
application = tornado.web.Application([
(r"/(.*)", MainHandler, dict(database=engine)),
])
application.listen(8888)
tornado.ioloop.IOLoop.current().start()
|
{
"content_hash": "3532aa3dab4d77ca4abcaf55e8793ca3",
"timestamp": "",
"source": "github",
"line_count": 472,
"max_line_length": 636,
"avg_line_length": 31.875,
"alnum_prop": 0.6241940844134264,
"repo_name": "daijingjing/wordpress_python_api",
"id": "5a8dea0e81d8fa5843fadb1977ce1c173add188c",
"size": "15299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15299"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def index(request):
return HttpResponse("Tool index api..")
def add_tool(request):
return HttpResponse("Add tool api..")
def update_tool(request):
return HttpResponse("Update tool api..")
def delete_tool(request):
return HttpResponse("Delete tool api..")
def disable_tool(response):
return HttpResponse("Disable tool api..")
def enable_tool(response):
return HttpResponse("Enable tool api..")
|
{
"content_hash": "9df85e5316c5913c21ece6e7ca18acd4",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 42,
"avg_line_length": 22.954545454545453,
"alnum_prop": 0.7524752475247525,
"repo_name": "adityabyreddy/opensas",
"id": "49b92b4dee3f3e9e4f94eaaad6d4c1b8441de2ce",
"size": "505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "10752"
}
],
"symlink_target": ""
}
|
"""
WSGI config for smsuela project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "smsuela.settings.production")
application = Cling(get_wsgi_application())
|
{
"content_hash": "c5a506bff5ad5b2aa8908c2d4b04c0d3",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 78,
"avg_line_length": 24.38888888888889,
"alnum_prop": 0.7744874715261959,
"repo_name": "rafaelmv/smsuela",
"id": "201454f19e35609718897a9773fbd34a39ee3581",
"size": "439",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smsuela/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1144"
},
{
"name": "HTML",
"bytes": "6140"
},
{
"name": "Python",
"bytes": "12445"
}
],
"symlink_target": ""
}
|
import sys
def split_pattern(pattern):
result = []
group = ''
is_group = False
for ch in pattern:
if ch == '(':
group = ''
is_group = True
elif ch == ')':
result.append(group[:])
is_group = False
else:
if is_group: group += ch
else: result.append(ch)
return result
def check(word, patterns):
for i in range(len(word)):
if not word[i] in patterns[i]:
return False
return True
def main():
# Read a problem
word_len, num_words, num_tests = Util.read_ints()
words = []
for i in range(num_words):
words.append(Util.read_line())
# Each test pattern
for i in range(num_tests):
pattern = Util.read_line()
patterns = split_pattern(pattern)
count = sum(check(w, patterns) for w in words)
Util.result(count)
##### Template #####
class Util:
count = 0
@classmethod
def result(cls, result):
cls.count += 1
print('Case #{}: {}'.format(cls.count, str(result)))
@staticmethod
def read_line(): return sys.stdin.readline().strip()
@staticmethod
def read_int(): return int(sys.stdin.readline())
@staticmethod
def read_ints(): return [int(x) for x in sys.stdin.readline().split()]
@staticmethod
def arr2d(y,x,init=0): return [[init] * x for _ in range(y)]
if __name__ == '__main__':
main()
|
{
"content_hash": "b32ab0a4b3aeefecb7bc4d7b49ce9fbb",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 74,
"avg_line_length": 25.857142857142858,
"alnum_prop": 0.5490331491712708,
"repo_name": "maku77/contest",
"id": "bbe95cdefd078e17e8fc5d9f78d72187355b3725",
"size": "1646",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "codejam/2009_Qualification/A-AlienLanguage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Groff",
"bytes": "184"
},
{
"name": "HTML",
"bytes": "7024"
},
{
"name": "Python",
"bytes": "41122"
}
],
"symlink_target": ""
}
|
"""
Copyright 2013 Steven Diamond
This file is part of CVXPY.
CVXPY is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
CVXPY is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with CVXPY. If not, see <http://www.gnu.org/licenses/>.
"""
import cvxpy.utilities as u
import cvxpy.lin_ops.lin_utils as lu
from cvxpy.atoms.elementwise.elementwise import Elementwise
from cvxpy.atoms.elementwise.square import square
import numpy as np
class sqrt(Elementwise):
""" Elementwise square root """
def __init__(self, x):
super(sqrt, self).__init__(x)
# Returns the elementwise square root of x.
@Elementwise.numpy_numeric
def numeric(self, values):
return np.sqrt(values[0])
# Always positive.
def sign_from_args(self):
return u.Sign.POSITIVE
# Default curvature.
def func_curvature(self):
return u.Curvature.CONCAVE
def monotonicity(self):
return [u.monotonicity.INCREASING]
@staticmethod
def graph_implementation(arg_objs, size, data=None):
"""Reduces the atom to an affine expression and list of constraints.
Parameters
----------
arg_objs : list
LinExpr for each argument.
size : tuple
The size of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
x = arg_objs[0]
t = lu.create_var(size)
# x >= 0 implied by x >= t^2.
obj, constraints = square.graph_implementation([t], size)
return (t, constraints + [lu.create_leq(obj, x), lu.create_geq(t)])
|
{
"content_hash": "5b8b01e72f97ea07b5cdcdf6ebe017d5",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 76,
"avg_line_length": 30.652173913043477,
"alnum_prop": 0.6633569739952718,
"repo_name": "riadnassiffe/Simulator",
"id": "9207c8e4b7fe8d979b3a400c7a99c191e21efca0",
"size": "2115",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/tools/ecos/cvxpy/cvxpy/atoms/elementwise/sqrt.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "66812"
}
],
"symlink_target": ""
}
|
import numpy as np
from onnx.defs import onnx_opset_version
from onnx.reference.op_run import OpRun
def _batchnorm_test_mode(
x: np.ndarray,
s: np.ndarray,
bias: np.ndarray,
mean: np.ndarray,
var: np.ndarray,
epsilon: float = 1e-5,
) -> np.ndarray:
dims_x = len(x.shape)
dim_ones = (1,) * (dims_x - 2)
s = s.reshape(-1, *dim_ones)
bias = bias.reshape(-1, *dim_ones)
mean = mean.reshape(-1, *dim_ones)
var = var.reshape(-1, *dim_ones)
y = s * (x - mean) / np.sqrt(var + epsilon) + bias
return y.astype(x.dtype) # type: ignore
def _batchnorm_training_mode(
x: np.ndarray,
s: np.ndarray,
bias: np.ndarray,
mean: np.ndarray,
var: np.ndarray,
momentum: float = 0.9,
epsilon: float = 1e-5,
) -> np.ndarray:
axis = tuple(np.delete(np.arange(len(x.shape)), 1))
saved_mean = x.mean(axis=axis)
saved_var = x.var(axis=axis)
output_mean = mean * momentum + saved_mean * (1 - momentum)
output_var = var * momentum + saved_var * (1 - momentum)
y = _batchnorm_test_mode(x, s, bias, saved_mean, saved_var, epsilon=epsilon)
return ( # type: ignore
y.astype(x.dtype),
saved_mean.astype(x.dtype),
saved_var.astype(x.dtype),
output_mean.astype(x.dtype),
output_var.astype(x.dtype),
)
class BatchNormalization_6(OpRun):
def _run(self, x, scale, bias, mean, var, epsilon=None, is_test=None, momentum=None, spatial=None): # type: ignore
if is_test:
res = _batchnorm_test_mode(x, scale, bias, mean, var, epsilon=epsilon)
else:
res = _batchnorm_training_mode(
x, scale, bias, mean, var, epsilon=epsilon, momentum=momentum
)
return (res,)
class BatchNormalization_9(OpRun):
def _run(self, x, scale, bias, mean, var, epsilon=None): # type: ignore
res = _batchnorm_test_mode(x, scale, bias, mean, var, epsilon=epsilon)
return (res,)
class BatchNormalization_14(OpRun):
def _run( # type: ignore
self, x, scale, bias, mean, var, epsilon=None, momentum=None, training_mode=None
):
if training_mode == 0: # type: ignore
res = _batchnorm_test_mode(x, scale, bias, mean, var, epsilon=epsilon)
return (res,)
res, __, _, output_mean, output_var = _batchnorm_training_mode(
x, scale, bias, mean, var, momentum, epsilon
)
return res, output_mean, output_var
if onnx_opset_version() >= 14:
BatchNormalization = BatchNormalization_14
elif onnx_opset_version() >= 9:
BatchNormalization = BatchNormalization_9 # type: ignore
else:
BatchNormalization = BatchNormalization_6 # type: ignore
|
{
"content_hash": "f36e6b599e7646fb34966194186e0aa0",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 119,
"avg_line_length": 32.36904761904762,
"alnum_prop": 0.6090474439132034,
"repo_name": "onnx/onnx",
"id": "1e62384cec6b2b70ddd4338d2ec74077eac7a9bf",
"size": "2794",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "onnx/reference/ops/op_batch_normalization.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "546"
},
{
"name": "C",
"bytes": "2062"
},
{
"name": "C++",
"bytes": "2003844"
},
{
"name": "CMake",
"bytes": "32553"
},
{
"name": "Jupyter Notebook",
"bytes": "29310"
},
{
"name": "PowerShell",
"bytes": "1157"
},
{
"name": "Python",
"bytes": "2073844"
},
{
"name": "Shell",
"bytes": "2918"
}
],
"symlink_target": ""
}
|
from django.template import Context, Template
from . import WPTestCase
from ..factories import WPSiteFactory, WPPostFactory
class WidgetTestCase(WPTestCase):
def test_wjidget_missing_site_raises_exception(self):
t = Template('{% load wjidget from wjordpress %}{% wjidget "foo" %}')
# TODO ImproperlyConfigured should be raised
with self.assertRaises(Exception):
t.render(Context())
def test_wjidget_renders(self):
num_posts = 3 # XXX this is less than the default limit
wp_site = WPSiteFactory(name='foo')
for i in range(num_posts):
WPPostFactory(
wp=wp_site,
type='post',
status='publish',
title='{}'.format(i),
excerpt='{}'.format(i),
)
t = Template('{% load wjidget from wjordpress %}{% wjidget "foo" %}')
output = t.render(Context())
self.assertIn('wjidget-container', output)
self.assertEqual(output.count('<article>'), num_posts)
def test_wjidget_can_be_limited(self):
wp_site = WPSiteFactory(name='foo')
for i in range(11):
WPPostFactory(
wp=wp_site,
type='post',
status='publish',
title='{}'.format(i),
excerpt='{}'.format(i),
)
t = Template('{% load wjidget from wjordpress %}'
'{% wjidget "foo" limit=5 %}')
output = t.render(Context())
self.assertEqual(output.count('<article>'), 5)
|
{
"content_hash": "8455af78aca9b58510eb75d515f336bc",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 77,
"avg_line_length": 36.372093023255815,
"alnum_prop": 0.5505115089514067,
"repo_name": "texastribune/wjordpress",
"id": "53cb0a3d92e6185aae8bac44964512dce6f77ca0",
"size": "1564",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wjordpress/tests/test_templatetags.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "64805"
}
],
"symlink_target": ""
}
|
import json
import re
from service import *
class GoogleService(Service):
environmentSchema = {
'type': 'object',
'properties': {
'kind': {'type': 'string', 'pattern': '^Google$'},
'project': {'type': 'string'},
'region': {'type': 'string'},
'serviceAccount': {
'type': 'object',
'properties': {
'private_key_id': {'type': 'string'},
'private_key': {'type': 'string'},
'client_email': {'type': 'string'},
'client_id': {'type': 'string'},
'type': {'type': 'string', 'pattern': '^service_account$'},
},
'required': ['private_key', 'client_email'],
'additionalProperties': False,
},
'sshUser': {'type': 'string'},
},
'required': ['project', 'region', 'serviceAccount', 'sshUser'],
'additionalProperties': False,
}
imageSchema = {
'type': 'object',
'properties': {
'source': {'type': 'string'},
'machine_type': {'type': 'string'},
'zone': {'type': 'string'},
'cmds': Service.cmdsSchema,
},
'additionalProperties': False,
}
serviceSchema = {
'type': 'object',
'properties' : {
'environment' : {'type' : 'string'},
'kind' : {'type' : 'string'},
'infrastructure' : {
'type': 'object',
'properties': {
'google_compute_disk': {
'type': 'object',
'additionalProperties': {
'type': 'object',
'properties': {
'image': {
'oneOf': [
{'type': 'string'},
imageSchema,
],
},
},
'additionalProperties': True,
},
},
'google_compute_instance': {
'type': 'object',
'additionalProperties': {
'type': 'object',
'properties': {
'metadata': {
'type': 'object',
'additionalProperties': {'type': 'string'},
},
'disk': {
'type': 'array',
'items': [
{
'oneOf': [
{
'type': 'object',
'properties': {
'image': {
'oneOf': [
{'type': 'string'},
imageSchema,
],
}
},
'required': ['image'],
'additionalItems': True,
},
{
'type': 'object',
'properties': {
'disk': {'type': 'string'},
},
'required': ['disk'],
'additionalItems': True,
},
]
},
],
'additionalItems': True,
'minItems': 1,
},
'cmds': Service.cmdsSchema,
},
'required': ['metadata', 'disk', 'cmds'],
'additionalProperties': True,
},
}
},
'additionalProperties': True,
},
'outputs': {
'type': 'object',
'additionalProperties': {'type': 'string'},
},
},
'required': [
'kind',
'infrastructure',
'outputs',
],
'additionalProperties': False,
}
def compilePackerImage(self, environment_name, environment, service_name, service, image):
image_hash = 0;
image_hash ^= self.hashString(image['machine_type'])
image_hash ^= self.hashString(image['source'])
image_hash ^= self.hashString(image['zone'])
image_hash ^= self.hashProvisioners(image['cmds'])
image_name = 'micromanage-%s' % self.encodeImageHash(image_hash)
provisioners = self.compileProvisioners(image['cmds'])
return {
'builders': [
{
'type': 'googlecompute',
'name': image_name,
'image_name': image_name,
'image_description': 'Image built by micromanage',
'project_id': environment['project'],
'account_file': environment_name + '-service-account-key.json',
'machine_type': image['machine_type'],
'source_image': image['source'],
'instance_name': image_name,
'zone': image['zone'],
'ssh_username': environment['sshUser'],
}
],
'provisioners': provisioners
}, image_name
def compileProvider(self, config, environment_name):
environment = config['environments'][environment_name]
terraform_file = 'environment.%s.tf' % environment_name
service_account_key_file = environment_name + '-service-account-key.json'
return {
'tfs': {
terraform_file: {
'provider': {
'google': {
'alias': environment_name,
'account_file': service_account_key_file,
'project': environment['project'],
'region' : environment['region'],
},
},
},
},
'extras': {
service_account_key_file: json.dumps(environment['serviceAccount'])
},
}
def compile(self, config, service_name):
service = config[service_name]
environment_name = service.get('environment', 'default')
environment = config['environments'][environment_name]
environment_name_tf = 'google.%s' % environment_name
infra = service['infrastructure']
packers = {}
outputs = service['outputs']
def all_resources(sname, s):
other_infra = s['infrastructure']
r = []
for res_kind_name, resources in other_infra.iteritems():
for res_name in resources:
expanded_name = self.translateSelfName(sname, res_name)
r.append('%s.%s' % (res_kind_name, expanded_name))
return r
# Translate ${-} to service name
def recursive_update(c):
if isinstance(c, dict):
return {
recursive_update(k): recursive_update(v)
for k, v in c.iteritems()
}
elif isinstance(c, list):
return [recursive_update(v) for v in c]
elif isinstance(c, basestring):
return self.translateSelfName(service_name, c)
else:
return c
infra = recursive_update(infra)
outputs = recursive_update(outputs)
# Prepend all names
# Add name and provider attributes
# Fix up "depends_on" attributes
for res_kind_name, res_kind_obj in infra.iteritems():
for res_name, res in res_kind_obj.iteritems():
res['provider'] = environment_name_tf
instances = infra.get('google_compute_instance') or {}
disks = infra.get('google_compute_disk') or {}
# Process image configs
for inst_name, inst in instances.iteritems():
image = inst['disk'][0].get('image')
if isinstance(image, dict):
packer, image_name = self.compilePackerImage(environment_name, environment, service_name, service, image)
inst['disk'][0]['image'] = image_name
packers[image_name + '.packer.json'] = packer
for disk_name, disk in disks.iteritems():
image = disk['image']
if isinstance(image, dict):
packer, image_name = self.compilePackerImage(environment_name, environment, service_name, service, image)
disk['image'] = image_name
packers[image_name + '.packer.json'] = packer
# Process commands
for inst_name, inst in instances.iteritems():
cmds = inst['cmds']
metadata = inst['metadata']
def curl_md(k):
md_pref = 'http://169.254.169.254/computeMetadata/v1/instance/attributes'
return 'curl -s -H Metadata-Flavor:Google %s/%s' % (md_pref, k)
if 'startup-script' in metadata:
metadata['micromanage-startup-script'] = metadata['startup-script']
metadata.pop('startup-script', None)
cmds += ['%s | bash' % curl_md('micromanage-startup-script')]
inst['metadata'] = metadata
inst['metadata_startup_script'] = self.compileStartupScript(cmds)
inst.pop('cmds', None)
terraform_file = 'service.%s.tf' % service_name
return {
'packers': packers,
'tfs': {
terraform_file: {
'resource': infra,
'output': {
k: { 'value': outputs[k] }
for k in outputs
}
},
},
'extras': { },
}
|
{
"content_hash": "c9e05878d1698f32f9820ca4d273b6a2",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 121,
"avg_line_length": 41.14869888475837,
"alnum_prop": 0.38693648929442587,
"repo_name": "iqstack/jsonlang",
"id": "029c4f163ce1e8af90cf75651895f212c330a31e",
"size": "11665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/case_studies/micromanage/service_google.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "15671"
},
{
"name": "C++",
"bytes": "247942"
},
{
"name": "CSS",
"bytes": "10171"
},
{
"name": "CoffeeScript",
"bytes": "2851"
},
{
"name": "Go",
"bytes": "6444"
},
{
"name": "HCL",
"bytes": "34418"
},
{
"name": "HTML",
"bytes": "15929"
},
{
"name": "Makefile",
"bytes": "4633"
},
{
"name": "Objective-J",
"bytes": "111"
},
{
"name": "Python",
"bytes": "54842"
},
{
"name": "Ruby",
"bytes": "497"
},
{
"name": "Shell",
"bytes": "6356"
},
{
"name": "VimL",
"bytes": "2497"
}
],
"symlink_target": ""
}
|
"""Test Qiskit's QuantumCircuit class."""
from ddt import ddt, data
import numpy as np
from qiskit import BasicAer
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit
from qiskit import execute
from qiskit.circuit import Gate, Instruction, Parameter, Measure
from qiskit.circuit.bit import Bit
from qiskit.circuit.classicalregister import Clbit
from qiskit.circuit.exceptions import CircuitError
from qiskit.circuit.quantumcircuit import BitLocations
from qiskit.circuit.quantumregister import AncillaQubit, AncillaRegister, Qubit
from qiskit.test import QiskitTestCase
from qiskit.circuit.library.standard_gates import SGate
from qiskit.quantum_info import Operator
from qiskit.pulse import Schedule, Play, Gaussian, DriveChannel
@ddt
class TestCircuitOperations(QiskitTestCase):
"""QuantumCircuit Operations tests."""
@data(0, 1, -1, -2)
def test_append_resolves_integers(self, index):
"""Test that integer arguments to append are correctly resolved."""
# We need to assume that appending ``Bit`` instances will always work, so we have something
# to test against.
qubits = [Qubit(), Qubit()]
clbits = [Clbit(), Clbit()]
test = QuantumCircuit(qubits, clbits)
test.append(Measure(), [index], [index])
expected = QuantumCircuit(qubits, clbits)
expected.append(Measure(), [qubits[index]], [clbits[index]])
self.assertEqual(test, expected)
@data(np.int32(0), np.int8(-1), np.uint64(1))
def test_append_resolves_numpy_integers(self, index):
"""Test that Numpy's integers can be used to reference qubits and clbits."""
qubits = [Qubit(), Qubit()]
clbits = [Clbit(), Clbit()]
test = QuantumCircuit(qubits, clbits)
test.append(Measure(), [index], [index])
expected = QuantumCircuit(qubits, clbits)
expected.append(Measure(), [qubits[int(index)]], [clbits[int(index)]])
self.assertEqual(test, expected)
@data(
slice(0, 2),
slice(None, 1),
slice(1, None),
slice(None, None),
slice(0, 2, 2),
slice(2, -1, -1),
slice(1000, 1003),
)
def test_append_resolves_slices(self, index):
"""Test that slices can be used to reference qubits and clbits with the same semantics that
they have on lists."""
qregs = [QuantumRegister(2), QuantumRegister(1)]
cregs = [ClassicalRegister(1), ClassicalRegister(2)]
test = QuantumCircuit(*qregs, *cregs)
test.append(Measure(), [index], [index])
expected = QuantumCircuit(*qregs, *cregs)
for qubit, clbit in zip(expected.qubits[index], expected.clbits[index]):
expected.append(Measure(), [qubit], [clbit])
self.assertEqual(test, expected)
def test_append_resolves_scalar_numpy_array(self):
"""Test that size-1 Numpy arrays can be used to index arguments. These arrays can be passed
to ``int``, which means they sometimes might be involved in spurious casts."""
test = QuantumCircuit(1, 1)
test.append(Measure(), [np.array([0])], [np.array([0])])
expected = QuantumCircuit(1, 1)
expected.measure(0, 0)
self.assertEqual(test, expected)
@data([3], [-3], [0, 1, 3])
def test_append_rejects_out_of_range_input(self, specifier):
"""Test that append rejects an integer that's out of range."""
test = QuantumCircuit(2, 2)
with self.subTest("qubit"), self.assertRaisesRegex(CircuitError, "out of range"):
opaque = Instruction("opaque", len(specifier), 1, [])
test.append(opaque, specifier, [0])
with self.subTest("clbit"), self.assertRaisesRegex(CircuitError, "out of range"):
opaque = Instruction("opaque", 1, len(specifier), [])
test.append(opaque, [0], specifier)
def test_append_rejects_bits_not_in_circuit(self):
"""Test that append rejects bits that are not in the circuit."""
test = QuantumCircuit(2, 2)
with self.subTest("qubit"), self.assertRaisesRegex(CircuitError, "not in the circuit"):
test.append(Measure(), [Qubit()], [test.clbits[0]])
with self.subTest("clbit"), self.assertRaisesRegex(CircuitError, "not in the circuit"):
test.append(Measure(), [test.qubits[0]], [Clbit()])
with self.subTest("qubit list"), self.assertRaisesRegex(CircuitError, "not in the circuit"):
test.append(Measure(), [[test.qubits[0], Qubit()]], [test.clbits])
with self.subTest("clbit list"), self.assertRaisesRegex(CircuitError, "not in the circuit"):
test.append(Measure(), [test.qubits], [[test.clbits[0], Clbit()]])
def test_append_rejects_bit_of_wrong_type(self):
"""Test that append rejects bits of the wrong type in an argument list."""
qubits = [Qubit(), Qubit()]
clbits = [Clbit(), Clbit()]
test = QuantumCircuit(qubits, clbits)
with self.subTest("c to q"), self.assertRaisesRegex(CircuitError, "Incorrect bit type"):
test.append(Measure(), [clbits[0]], [clbits[1]])
with self.subTest("q to c"), self.assertRaisesRegex(CircuitError, "Incorrect bit type"):
test.append(Measure(), [qubits[0]], [qubits[1]])
with self.subTest("none to q"), self.assertRaisesRegex(CircuitError, "Incorrect bit type"):
test.append(Measure(), [Bit()], [clbits[0]])
with self.subTest("none to c"), self.assertRaisesRegex(CircuitError, "Incorrect bit type"):
test.append(Measure(), [qubits[0]], [Bit()])
with self.subTest("none list"), self.assertRaisesRegex(CircuitError, "Incorrect bit type"):
test.append(Measure(), [[qubits[0], Bit()]], [[clbits[0], Bit()]])
@data(0.0, 1.0, 1.0 + 0.0j, "0")
def test_append_rejects_wrong_types(self, specifier):
"""Test that various bad inputs are rejected, both given loose or in sublists."""
test = QuantumCircuit(2, 2)
# Use a default Instruction to be sure that there's not overridden broadcasting.
opaque = Instruction("opaque", 1, 1, [])
with self.subTest("q"), self.assertRaisesRegex(CircuitError, "Invalid bit index"):
test.append(opaque, [specifier], [0])
with self.subTest("c"), self.assertRaisesRegex(CircuitError, "Invalid bit index"):
test.append(opaque, [0], [specifier])
with self.subTest("q list"), self.assertRaisesRegex(CircuitError, "Invalid bit index"):
test.append(opaque, [[specifier]], [[0]])
with self.subTest("c list"), self.assertRaisesRegex(CircuitError, "Invalid bit index"):
test.append(opaque, [[0]], [[specifier]])
def test_adding_self(self):
"""Test that qc += qc finishes, which can be prone to infinite while-loops.
This can occur e.g. when a user tries
>>> other_qc = qc
>>> other_qc += qc # or qc2.extend(qc)
"""
qc = QuantumCircuit(1)
qc.x(0) # must contain at least one operation to end up in a infinite while-loop
# attempt addition, times out if qc is added via reference
qc += qc
# finally, qc should contain two X gates
self.assertEqual(["x", "x"], [x.operation.name for x in qc.data])
def test_combine_circuit_common(self):
"""Test combining two circuits with same registers (inplace=False)."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc1 = QuantumCircuit(qr, cr)
qc2 = QuantumCircuit(qr, cr)
qc1.h(qr[0])
qc1.measure(qr[0], cr[0])
qc2.measure(qr[1], cr[1])
new_circuit = qc1.combine(qc2)
backend = BasicAer.get_backend("qasm_simulator")
shots = 1024
result = execute(new_circuit, backend=backend, shots=shots, seed_simulator=78).result()
counts = result.get_counts()
target = {"00": shots / 2, "01": shots / 2}
threshold = 0.04 * shots
self.assertDictEqual(qc1.count_ops(), {"h": 1, "measure": 1}) # no changes "in-place"
self.assertDictEqual(qc2.count_ops(), {"measure": 1}) # no changes "in-place"
self.assertDictAlmostEqual(counts, target, threshold)
def test_combine_circuit_common_plus(self):
"""Test combining two circuits with same registers (as plus)."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc1 = QuantumCircuit(qr, cr)
qc2 = QuantumCircuit(qr, cr)
qc1.h(qr[0])
qc1.measure(qr[0], cr[0])
qc2.measure(qr[1], cr[1])
new_circuit = qc1 + qc2
backend = BasicAer.get_backend("qasm_simulator")
shots = 1024
result = execute(new_circuit, backend=backend, shots=shots, seed_simulator=78).result()
counts = result.get_counts()
target = {"00": shots / 2, "01": shots / 2}
threshold = 0.04 * shots
self.assertDictEqual(qc1.count_ops(), {"h": 1, "measure": 1}) # no changes "in-place"
self.assertDictEqual(qc2.count_ops(), {"measure": 1}) # no changes "in-place"
self.assertDictAlmostEqual(counts, target, threshold)
def test_combine_circuit_fail(self):
"""Test combining two circuits fails if registers incompatible.
If two circuits have same name register of different size or type
it should raise a CircuitError.
"""
qr1 = QuantumRegister(1, "q")
qr2 = QuantumRegister(2, "q")
cr1 = ClassicalRegister(1, "q")
qc1 = QuantumCircuit(qr1)
qc2 = QuantumCircuit(qr2)
qcr3 = QuantumCircuit(cr1)
self.assertRaises(CircuitError, qc1.__add__, qc2)
self.assertRaises(CircuitError, qc1.__add__, qcr3)
def test_extend_circuit(self):
"""Test extending a circuit with same registers (in place add)."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc1 = QuantumCircuit(qr, cr)
qc2 = QuantumCircuit(qr, cr)
qc1.h(qr[0])
qc1.measure(qr[0], cr[0])
qc2.measure(qr[1], cr[1])
qc1.extend(qc2)
backend = BasicAer.get_backend("qasm_simulator")
shots = 1024
result = execute(qc1, backend=backend, shots=shots, seed_simulator=78).result()
counts = result.get_counts()
target = {"00": shots / 2, "01": shots / 2}
threshold = 0.04 * shots
self.assertDictEqual(qc1.count_ops(), {"h": 1, "measure": 2}) # changes "in-place"
self.assertDictEqual(qc2.count_ops(), {"measure": 1}) # no changes "in-place"
self.assertDictAlmostEqual(counts, target, threshold)
def test_extend_circuit_iadd(self):
"""Test extending a circuit with same registers (in place add)."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc1 = QuantumCircuit(qr, cr)
qc2 = QuantumCircuit(qr, cr)
qc1.h(qr[0])
qc1.measure(qr[0], cr[0])
qc2.measure(qr[1], cr[1])
qc1 += qc2
backend = BasicAer.get_backend("qasm_simulator")
shots = 1024
result = execute(qc1, backend=backend, shots=shots, seed_simulator=78).result()
counts = result.get_counts()
target = {"00": shots / 2, "01": shots / 2}
threshold = 0.04 * shots
self.assertDictEqual(qc1.count_ops(), {"h": 1, "measure": 2}) # changes "in-place"
self.assertDictEqual(qc2.count_ops(), {"measure": 1}) # no changes "in-place"
self.assertDictAlmostEqual(counts, target, threshold)
def test_extend_circuit_fail(self):
"""Test extending a circuit fails if registers incompatible.
If two circuits have same name register of different size or type
it should raise a CircuitError.
"""
qr1 = QuantumRegister(1, "q")
qr2 = QuantumRegister(2, "q")
cr1 = ClassicalRegister(1, "q")
qc1 = QuantumCircuit(qr1)
qc2 = QuantumCircuit(qr2)
qcr3 = QuantumCircuit(cr1)
self.assertRaises(CircuitError, qc1.__iadd__, qc2)
self.assertRaises(CircuitError, qc1.__iadd__, qcr3)
def test_extend_circuit_adds_qubits(self):
"""Test extending a circuits with differing registers adds the qubits."""
qr = QuantumRegister(1, "q")
qc = QuantumCircuit(qr)
empty = QuantumCircuit()
empty.extend(qc)
self.assertListEqual(empty.qubits, qr[:])
def test_compose_circuit(self):
"""Test composing two circuits"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc1 = QuantumCircuit(qr, cr)
qc2 = QuantumCircuit(qr, cr)
qc1.h(qr[0])
qc1.measure(qr[0], cr[0])
qc2.measure(qr[1], cr[1])
qc3 = qc1.compose(qc2)
backend = BasicAer.get_backend("qasm_simulator")
shots = 1024
result = execute(qc3, backend=backend, shots=shots, seed_simulator=78).result()
counts = result.get_counts()
target = {"00": shots / 2, "01": shots / 2}
threshold = 0.04 * shots
self.assertDictEqual(qc3.count_ops(), {"h": 1, "measure": 2})
self.assertDictEqual(qc1.count_ops(), {"h": 1, "measure": 1}) # no changes "in-place"
self.assertDictEqual(qc2.count_ops(), {"measure": 1}) # no changes "in-place"
self.assertDictAlmostEqual(counts, target, threshold)
def test_compose_circuit_and(self):
"""Test composing two circuits using & operator"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc1 = QuantumCircuit(qr, cr)
qc2 = QuantumCircuit(qr, cr)
qc1.h(qr[0])
qc1.measure(qr[0], cr[0])
qc2.measure(qr[1], cr[1])
qc3 = qc1 & qc2
backend = BasicAer.get_backend("qasm_simulator")
shots = 1024
result = execute(qc3, backend=backend, shots=shots, seed_simulator=78).result()
counts = result.get_counts()
target = {"00": shots / 2, "01": shots / 2}
threshold = 0.04 * shots
self.assertDictEqual(qc3.count_ops(), {"h": 1, "measure": 2})
self.assertDictEqual(qc1.count_ops(), {"h": 1, "measure": 1}) # no changes "in-place"
self.assertDictEqual(qc2.count_ops(), {"measure": 1}) # no changes "in-place"
self.assertDictAlmostEqual(counts, target, threshold)
def test_compose_circuit_iand(self):
"""Test composing circuits using &= operator (in place)"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc1 = QuantumCircuit(qr, cr)
qc2 = QuantumCircuit(qr, cr)
qc1.h(qr[0])
qc1.measure(qr[0], cr[0])
qc2.measure(qr[1], cr[1])
qc1 &= qc2
backend = BasicAer.get_backend("qasm_simulator")
shots = 1024
result = execute(qc1, backend=backend, shots=shots, seed_simulator=78).result()
counts = result.get_counts()
target = {"00": shots / 2, "01": shots / 2}
threshold = 0.04 * shots
self.assertDictEqual(qc1.count_ops(), {"h": 1, "measure": 2}) # changes "in-place"
self.assertDictEqual(qc2.count_ops(), {"measure": 1}) # no changes "in-place"
self.assertDictAlmostEqual(counts, target, threshold)
def test_compose_circuit_fail_circ_size(self):
"""Test composing circuit fails when number of wires in circuit is not enough"""
qr1 = QuantumRegister(2)
qr2 = QuantumRegister(4)
# Creating our circuits
qc1 = QuantumCircuit(qr1)
qc1.x(0)
qc1.h(1)
qc2 = QuantumCircuit(qr2)
qc2.h([1, 2])
qc2.cx(2, 3)
# Composing will fail because qc2 requires 4 wires
self.assertRaises(CircuitError, qc1.compose, qc2)
def test_compose_circuit_fail_arg_size(self):
"""Test composing circuit fails when arg size does not match number of wires"""
qr1 = QuantumRegister(2)
qr2 = QuantumRegister(2)
qc1 = QuantumCircuit(qr1)
qc1.h(0)
qc2 = QuantumCircuit(qr2)
qc2.cx(0, 1)
self.assertRaises(CircuitError, qc1.compose, qc2, qubits=[0])
def test_tensor_circuit(self):
"""Test tensoring two circuits"""
qc1 = QuantumCircuit(1, 1)
qc2 = QuantumCircuit(1, 1)
qc2.h(0)
qc2.measure(0, 0)
qc1.measure(0, 0)
qc3 = qc1.tensor(qc2)
backend = BasicAer.get_backend("qasm_simulator")
shots = 1024
result = execute(qc3, backend=backend, shots=shots, seed_simulator=78).result()
counts = result.get_counts()
target = {"00": shots / 2, "01": shots / 2}
threshold = 0.04 * shots
self.assertDictEqual(qc3.count_ops(), {"h": 1, "measure": 2})
self.assertDictEqual(qc2.count_ops(), {"h": 1, "measure": 1}) # no changes "in-place"
self.assertDictEqual(qc1.count_ops(), {"measure": 1}) # no changes "in-place"
self.assertDictAlmostEqual(counts, target, threshold)
def test_tensor_circuit_xor(self):
"""Test tensoring two circuits using ^ operator"""
qc1 = QuantumCircuit(1, 1)
qc2 = QuantumCircuit(1, 1)
qc2.h(0)
qc2.measure(0, 0)
qc1.measure(0, 0)
qc3 = qc1 ^ qc2
backend = BasicAer.get_backend("qasm_simulator")
shots = 1024
result = execute(qc3, backend=backend, shots=shots, seed_simulator=78).result()
counts = result.get_counts()
target = {"00": shots / 2, "01": shots / 2}
threshold = 0.04 * shots
self.assertDictEqual(qc3.count_ops(), {"h": 1, "measure": 2})
self.assertDictEqual(qc2.count_ops(), {"h": 1, "measure": 1}) # no changes "in-place"
self.assertDictEqual(qc1.count_ops(), {"measure": 1}) # no changes "in-place"
self.assertDictAlmostEqual(counts, target, threshold)
def test_tensor_circuit_ixor(self):
"""Test tensoring two circuits using ^= operator"""
qc1 = QuantumCircuit(1, 1)
qc2 = QuantumCircuit(1, 1)
qc2.h(0)
qc2.measure(0, 0)
qc1.measure(0, 0)
qc1 ^= qc2
backend = BasicAer.get_backend("qasm_simulator")
shots = 1024
result = execute(qc1, backend=backend, shots=shots, seed_simulator=78).result()
counts = result.get_counts()
target = {"00": shots / 2, "01": shots / 2}
threshold = 0.04 * shots
self.assertDictEqual(qc1.count_ops(), {"h": 1, "measure": 2}) # changes "in-place"
self.assertDictEqual(qc2.count_ops(), {"h": 1, "measure": 1}) # no changes "in-place"
self.assertDictAlmostEqual(counts, target, threshold)
def test_measure_args_type_cohesion(self):
"""Test for proper args types for measure function."""
quantum_reg = QuantumRegister(3)
classical_reg_0 = ClassicalRegister(1)
classical_reg_1 = ClassicalRegister(2)
quantum_circuit = QuantumCircuit(quantum_reg, classical_reg_0, classical_reg_1)
quantum_circuit.h(quantum_reg)
with self.assertRaises(CircuitError) as ctx:
quantum_circuit.measure(quantum_reg, classical_reg_1)
self.assertEqual(ctx.exception.message, "register size error")
def test_copy_circuit(self):
"""Test copy method makes a copy"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
self.assertEqual(qc, qc.copy())
def test_copy_copies_registers(self):
"""Test copy copies the registers not via reference."""
qc = QuantumCircuit(1, 1)
copied = qc.copy()
copied.add_register(QuantumRegister(1, "additional_q"))
copied.add_register(ClassicalRegister(1, "additional_c"))
self.assertEqual(len(qc.qregs), 1)
self.assertEqual(len(copied.qregs), 2)
self.assertEqual(len(qc.cregs), 1)
self.assertEqual(len(copied.cregs), 2)
def test_copy_empty_like_circuit(self):
"""Test copy_empty_like method makes a clear copy."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr, global_phase=1.0, name="qc", metadata={"key": "value"})
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
sched = Schedule(Play(Gaussian(160, 0.1, 40), DriveChannel(0)))
qc.add_calibration("h", [0, 1], sched)
copied = qc.copy_empty_like()
qc.clear()
self.assertEqual(qc, copied)
self.assertEqual(qc.global_phase, copied.global_phase)
self.assertEqual(qc.name, copied.name)
self.assertEqual(qc.metadata, copied.metadata)
self.assertEqual(qc.calibrations, copied.calibrations)
copied = qc.copy_empty_like("copy")
self.assertEqual(copied.name, "copy")
def test_clear_circuit(self):
"""Test clear method deletes instructions in circuit."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.h(qr[0])
qc.measure(qr[0], cr[0])
qc.measure(qr[1], cr[1])
qc.clear()
self.assertEqual(len(qc.data), 0)
self.assertEqual(len(qc._parameter_table), 0)
def test_measure_active(self):
"""Test measure_active
Applies measurements only to non-idle qubits. Creates a ClassicalRegister of size equal to
the amount of non-idle qubits to store the measured values.
"""
qr = QuantumRegister(4)
cr = ClassicalRegister(2, "measure")
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.h(qr[2])
circuit.measure_active()
expected = QuantumCircuit(qr)
expected.h(qr[0])
expected.h(qr[2])
expected.add_register(cr)
expected.barrier()
expected.measure([qr[0], qr[2]], [cr[0], cr[1]])
self.assertEqual(expected, circuit)
def test_measure_active_copy(self):
"""Test measure_active copy
Applies measurements only to non-idle qubits. Creates a ClassicalRegister of size equal to
the amount of non-idle qubits to store the measured values.
"""
qr = QuantumRegister(4)
cr = ClassicalRegister(2, "measure")
circuit = QuantumCircuit(qr)
circuit.h(qr[0])
circuit.h(qr[2])
new_circuit = circuit.measure_active(inplace=False)
expected = QuantumCircuit(qr)
expected.h(qr[0])
expected.h(qr[2])
expected.add_register(cr)
expected.barrier()
expected.measure([qr[0], qr[2]], [cr[0], cr[1]])
self.assertEqual(expected, new_circuit)
self.assertFalse("measure" in circuit.count_ops().keys())
def test_measure_active_repetition(self):
"""Test measure_active in a circuit with a 'measure' creg.
measure_active should be aware that the creg 'measure' might exists.
"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2, "measure")
circuit = QuantumCircuit(qr, cr)
circuit.h(qr)
circuit.measure_active()
self.assertEqual(len(circuit.cregs), 2) # Two cregs
self.assertEqual(len(circuit.cregs[0]), 2) # Both length 2
self.assertEqual(len(circuit.cregs[1]), 2)
def test_measure_all(self):
"""Test measure_all applies measurements to all qubits.
Creates a ClassicalRegister of size equal to the total amount of qubits to
store those measured values.
"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2, "meas")
circuit = QuantumCircuit(qr)
circuit.measure_all()
expected = QuantumCircuit(qr, cr)
expected.barrier()
expected.measure(qr, cr)
self.assertEqual(expected, circuit)
def test_measure_all_not_add_bits_equal(self):
"""Test measure_all applies measurements to all qubits.
Does not create a new ClassicalRegister if the existing one is big enough.
"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2, "meas")
circuit = QuantumCircuit(qr, cr)
circuit.measure_all(add_bits=False)
expected = QuantumCircuit(qr, cr)
expected.barrier()
expected.measure(qr, cr)
self.assertEqual(expected, circuit)
def test_measure_all_not_add_bits_bigger(self):
"""Test measure_all applies measurements to all qubits.
Does not create a new ClassicalRegister if the existing one is big enough.
"""
qr = QuantumRegister(2)
cr = ClassicalRegister(3, "meas")
circuit = QuantumCircuit(qr, cr)
circuit.measure_all(add_bits=False)
expected = QuantumCircuit(qr, cr)
expected.barrier()
expected.measure(qr, cr[0:2])
self.assertEqual(expected, circuit)
def test_measure_all_not_add_bits_smaller(self):
"""Test measure_all applies measurements to all qubits.
Raises an error if there are not enough classical bits to store the measurements.
"""
qr = QuantumRegister(3)
cr = ClassicalRegister(2, "meas")
circuit = QuantumCircuit(qr, cr)
with self.assertRaisesRegex(CircuitError, "The number of classical bits"):
circuit.measure_all(add_bits=False)
def test_measure_all_copy(self):
"""Test measure_all with inplace=False"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2, "meas")
circuit = QuantumCircuit(qr)
new_circuit = circuit.measure_all(inplace=False)
expected = QuantumCircuit(qr, cr)
expected.barrier()
expected.measure(qr, cr)
self.assertEqual(expected, new_circuit)
self.assertFalse("measure" in circuit.count_ops().keys())
def test_measure_all_repetition(self):
"""Test measure_all in a circuit with a 'measure' creg.
measure_all should be aware that the creg 'measure' might exists.
"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2, "measure")
circuit = QuantumCircuit(qr, cr)
circuit.measure_all()
self.assertEqual(len(circuit.cregs), 2) # Two cregs
self.assertEqual(len(circuit.cregs[0]), 2) # Both length 2
self.assertEqual(len(circuit.cregs[1]), 2)
def test_remove_final_measurements(self):
"""Test remove_final_measurements
Removes all measurements at end of circuit.
"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2, "meas")
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr, cr)
circuit.remove_final_measurements()
expected = QuantumCircuit(qr)
self.assertEqual(expected, circuit)
def test_remove_final_measurements_copy(self):
"""Test remove_final_measurements on copy
Removes all measurements at end of circuit.
"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2, "meas")
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr, cr)
new_circuit = circuit.remove_final_measurements(inplace=False)
expected = QuantumCircuit(qr)
self.assertEqual(expected, new_circuit)
self.assertTrue("measure" in circuit.count_ops().keys())
def test_remove_final_measurements_copy_with_parameters(self):
"""Test remove_final_measurements doesn't corrupt ParameterTable
See https://github.com/Qiskit/qiskit-terra/issues/6108 for more details
"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2, "meas")
theta = Parameter("theta")
circuit = QuantumCircuit(qr, cr)
circuit.rz(theta, qr)
circuit.measure(qr, cr)
circuit.remove_final_measurements()
copy = circuit.copy()
self.assertEqual(copy, circuit)
def test_remove_final_measurements_multiple_measures(self):
"""Test remove_final_measurements only removes measurements at the end of the circuit
remove_final_measurements should not remove measurements in the beginning or middle of the
circuit.
"""
qr = QuantumRegister(2)
cr = ClassicalRegister(1)
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr[0], cr)
circuit.h(0)
circuit.measure(qr[0], cr)
circuit.h(0)
circuit.measure(qr[0], cr)
circuit.remove_final_measurements()
expected = QuantumCircuit(qr, cr)
expected.measure(qr[0], cr)
expected.h(0)
expected.measure(qr[0], cr)
expected.h(0)
self.assertEqual(expected, circuit)
def test_remove_final_measurements_5802(self):
"""Test remove_final_measurements removes classical bits
https://github.com/Qiskit/qiskit-terra/issues/5802.
"""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
circuit = QuantumCircuit(qr, cr)
circuit.measure(qr, cr)
circuit.remove_final_measurements()
self.assertEqual(circuit.cregs, [])
self.assertEqual(circuit.clbits, [])
def test_remove_final_measurements_7089(self):
"""Test remove_final_measurements removes resulting unused registers
even if not all bits were measured into.
https://github.com/Qiskit/qiskit-terra/issues/7089.
"""
circuit = QuantumCircuit(2, 5)
circuit.measure(0, 0)
circuit.measure(1, 1)
circuit.remove_final_measurements(inplace=True)
self.assertEqual(circuit.cregs, [])
self.assertEqual(circuit.clbits, [])
def test_remove_final_measurements_bit_locations(self):
"""Test remove_final_measurements properly recalculates clbit indicies
and preserves order of remaining cregs and clbits.
"""
c0 = ClassicalRegister(1)
c1_0 = Clbit()
c2 = ClassicalRegister(1)
c3 = ClassicalRegister(1)
# add an individual bit that's not in any register of this circuit
circuit = QuantumCircuit(QuantumRegister(1), c0, [c1_0], c2, c3)
circuit.measure(0, c1_0)
circuit.measure(0, c2[0])
# assert cregs and clbits before measure removal
self.assertEqual(circuit.cregs, [c0, c2, c3])
self.assertEqual(circuit.clbits, [c0[0], c1_0, c2[0], c3[0]])
# assert clbit indices prior to measure removal
self.assertEqual(circuit.find_bit(c0[0]), BitLocations(0, [(c0, 0)]))
self.assertEqual(circuit.find_bit(c1_0), BitLocations(1, []))
self.assertEqual(circuit.find_bit(c2[0]), BitLocations(2, [(c2, 0)]))
self.assertEqual(circuit.find_bit(c3[0]), BitLocations(3, [(c3, 0)]))
circuit.remove_final_measurements()
# after measure removal, creg c2 should be gone, as should lone bit c1_0
# and c0 should still come before c3
self.assertEqual(circuit.cregs, [c0, c3])
self.assertEqual(circuit.clbits, [c0[0], c3[0]])
# there should be no gaps in clbit indices
# e.g. c3[0] is now the second clbit
self.assertEqual(circuit.find_bit(c0[0]), BitLocations(0, [(c0, 0)]))
self.assertEqual(circuit.find_bit(c3[0]), BitLocations(1, [(c3, 0)]))
def test_reverse(self):
"""Test reverse method reverses but does not invert."""
qc = QuantumCircuit(2, 2)
qc.h(0)
qc.s(1)
qc.cx(0, 1)
qc.measure([0, 1], [0, 1])
qc.x(0)
qc.y(1)
expected = QuantumCircuit(2, 2)
expected.y(1)
expected.x(0)
expected.measure([0, 1], [0, 1])
expected.cx(0, 1)
expected.s(1)
expected.h(0)
self.assertEqual(qc.reverse_ops(), expected)
def test_repeat(self):
"""Test repeating the circuit works."""
qr = QuantumRegister(2)
cr = ClassicalRegister(2)
qc = QuantumCircuit(qr, cr)
qc.h(0)
qc.cx(0, 1)
qc.barrier()
qc.h(0).c_if(cr, 1)
with self.subTest("repeat 0 times"):
rep = qc.repeat(0)
self.assertEqual(rep, QuantumCircuit(qr, cr))
with self.subTest("repeat 3 times"):
inst = qc.to_instruction()
ref = QuantumCircuit(qr, cr)
for _ in range(3):
ref.append(inst, ref.qubits, ref.clbits)
rep = qc.repeat(3)
self.assertEqual(rep, ref)
@data(0, 1, 4)
def test_repeat_global_phase(self, num):
"""Test the global phase is properly handled upon repeat."""
phase = 0.123
qc = QuantumCircuit(1, global_phase=phase)
expected = np.exp(1j * phase * num) * np.identity(2)
np.testing.assert_array_almost_equal(Operator(qc.repeat(num)).data, expected)
def test_bind_global_phase(self):
"""Test binding global phase."""
x = Parameter("x")
circuit = QuantumCircuit(1, global_phase=x)
self.assertEqual(circuit.parameters, {x})
bound = circuit.bind_parameters({x: 2})
self.assertEqual(bound.global_phase, 2)
self.assertEqual(bound.parameters, set())
def test_bind_parameter_in_phase_and_gate(self):
"""Test binding a parameter present in the global phase and the gates."""
x = Parameter("x")
circuit = QuantumCircuit(1, global_phase=x)
circuit.rx(x, 0)
self.assertEqual(circuit.parameters, {x})
ref = QuantumCircuit(1, global_phase=2)
ref.rx(2, 0)
bound = circuit.bind_parameters({x: 2})
self.assertEqual(bound, ref)
self.assertEqual(bound.parameters, set())
def test_power(self):
"""Test taking the circuit to a power works."""
qc = QuantumCircuit(2)
qc.cx(0, 1)
qc.rx(0.2, 1)
gate = qc.to_gate()
with self.subTest("power(int >= 0) equals repeat"):
self.assertEqual(qc.power(4), qc.repeat(4))
with self.subTest("explicit matrix power"):
self.assertEqual(qc.power(4, matrix_power=True).data[0].operation, gate.power(4))
with self.subTest("float power"):
self.assertEqual(qc.power(1.23).data[0].operation, gate.power(1.23))
with self.subTest("negative power"):
self.assertEqual(qc.power(-2).data[0].operation, gate.power(-2))
def test_power_parameterized_circuit(self):
"""Test taking a parameterized circuit to a power."""
theta = Parameter("th")
qc = QuantumCircuit(2)
qc.cx(0, 1)
qc.rx(theta, 1)
with self.subTest("power(int >= 0) equals repeat"):
self.assertEqual(qc.power(4), qc.repeat(4))
with self.subTest("cannot to matrix power if parameterized"):
with self.assertRaises(CircuitError):
_ = qc.power(0.5)
def test_control(self):
"""Test controlling the circuit."""
qc = QuantumCircuit(2, name="my_qc")
qc.cry(0.2, 0, 1)
c_qc = qc.control()
with self.subTest("return type is circuit"):
self.assertIsInstance(c_qc, QuantumCircuit)
with self.subTest("test name"):
self.assertEqual(c_qc.name, "c_my_qc")
with self.subTest("repeated control"):
cc_qc = c_qc.control()
self.assertEqual(cc_qc.num_qubits, c_qc.num_qubits + 1)
with self.subTest("controlled circuit has same parameter"):
param = Parameter("p")
qc.rx(param, 0)
c_qc = qc.control()
self.assertEqual(qc.parameters, c_qc.parameters)
with self.subTest("non-unitary operation raises"):
qc.reset(0)
with self.assertRaises(CircuitError):
_ = qc.control()
def test_control_implementation(self):
"""Run a test case for controlling the circuit, which should use ``Gate.control``."""
qc = QuantumCircuit(3)
qc.cx(0, 1)
qc.cry(0.2, 0, 1)
qc.t(0)
qc.append(SGate().control(2), [0, 1, 2])
qc.iswap(2, 0)
c_qc = qc.control(2, ctrl_state="10")
cgate = qc.to_gate().control(2, ctrl_state="10")
ref = QuantumCircuit(*c_qc.qregs)
ref.append(cgate, ref.qubits)
self.assertEqual(ref, c_qc)
@data("gate", "instruction")
def test_repeat_appended_type(self, subtype):
"""Test repeat appends Gate if circuit contains only gates and Instructions otherwise."""
sub = QuantumCircuit(2)
sub.x(0)
if subtype == "gate":
sub = sub.to_gate()
else:
sub = sub.to_instruction()
qc = QuantumCircuit(2)
qc.append(sub, [0, 1])
rep = qc.repeat(3)
if subtype == "gate":
self.assertTrue(all(isinstance(op.operation, Gate) for op in rep.data))
else:
self.assertTrue(all(isinstance(op.operation, Instruction) for op in rep.data))
def test_reverse_bits(self):
"""Test reversing order of bits."""
qc = QuantumCircuit(3, 2)
qc.h(0)
qc.s(1)
qc.cx(0, 1)
qc.measure(0, 1)
qc.x(0)
qc.y(1)
qc.global_phase = -1
expected = QuantumCircuit(3, 2)
expected.h(2)
expected.s(1)
expected.cx(2, 1)
expected.measure(2, 0)
expected.x(2)
expected.y(1)
expected.global_phase = -1
self.assertEqual(qc.reverse_bits(), expected)
def test_reverse_bits_boxed(self):
"""Test reversing order of bits in a hierarchical circuit."""
wide_cx = QuantumCircuit(3)
wide_cx.cx(0, 1)
wide_cx.cx(1, 2)
wide_cxg = wide_cx.to_gate()
cx_box = QuantumCircuit(3)
cx_box.append(wide_cxg, [0, 1, 2])
expected = QuantumCircuit(3)
expected.cx(2, 1)
expected.cx(1, 0)
self.assertEqual(cx_box.reverse_bits().decompose(), expected)
self.assertEqual(cx_box.decompose().reverse_bits(), expected)
# box one more layer to be safe.
cx_box_g = cx_box.to_gate()
cx_box_box = QuantumCircuit(4)
cx_box_box.append(cx_box_g, [0, 1, 2])
cx_box_box.cx(0, 3)
expected2 = QuantumCircuit(4)
expected2.cx(3, 2)
expected2.cx(2, 1)
expected2.cx(3, 0)
self.assertEqual(cx_box_box.reverse_bits().decompose().decompose(), expected2)
def test_reverse_bits_with_registers(self):
"""Test reversing order of bits when registers are present."""
qr1 = QuantumRegister(3, "a")
qr2 = QuantumRegister(2, "b")
qc = QuantumCircuit(qr1, qr2)
qc.h(qr1[0])
qc.cx(qr1[0], qr1[1])
qc.cx(qr1[1], qr1[2])
qc.cx(qr1[2], qr2[0])
qc.cx(qr2[0], qr2[1])
expected = QuantumCircuit(qr2, qr1)
expected.h(qr1[2])
expected.cx(qr1[2], qr1[1])
expected.cx(qr1[1], qr1[0])
expected.cx(qr1[0], qr2[1])
expected.cx(qr2[1], qr2[0])
self.assertEqual(qc.reverse_bits(), expected)
def test_reverse_bits_with_overlapped_registers(self):
"""Test reversing order of bits when registers are overlapped."""
qr1 = QuantumRegister(2, "a")
qr2 = QuantumRegister(bits=[qr1[0], qr1[1], Qubit()], name="b")
qc = QuantumCircuit(qr1, qr2)
qc.h(qr1[0])
qc.cx(qr1[0], qr1[1])
qc.cx(qr1[1], qr2[2])
qr2 = QuantumRegister(bits=[Qubit(), qr1[0], qr1[1]], name="b")
expected = QuantumCircuit(qr2, qr1)
expected.h(qr1[1])
expected.cx(qr1[1], qr1[0])
expected.cx(qr1[0], qr2[0])
self.assertEqual(qc.reverse_bits(), expected)
def test_reverse_bits_with_registerless_bits(self):
"""Test reversing order of registerless bits."""
q0 = Qubit()
q1 = Qubit()
c0 = Clbit()
c1 = Clbit()
qc = QuantumCircuit([q0, q1], [c0, c1])
qc.h(0)
qc.cx(0, 1)
qc.x(0).c_if(1, True)
qc.measure(0, 0)
expected = QuantumCircuit([c1, c0], [q1, q0])
expected.h(1)
expected.cx(1, 0)
expected.x(1).c_if(0, True)
expected.measure(1, 1)
self.assertEqual(qc.reverse_bits(), expected)
def test_reverse_bits_with_registers_and_bits(self):
"""Test reversing order of bits with registers and registerless bits."""
qr = QuantumRegister(2, "a")
q = Qubit()
qc = QuantumCircuit(qr, [q])
qc.h(qr[0])
qc.cx(qr[0], qr[1])
qc.cx(qr[1], q)
expected = QuantumCircuit([q], qr)
expected.h(qr[1])
expected.cx(qr[1], qr[0])
expected.cx(qr[0], q)
self.assertEqual(qc.reverse_bits(), expected)
def test_reverse_bits_with_mixed_overlapped_registers(self):
"""Test reversing order of bits with overlapped registers and registerless bits."""
q = Qubit()
qr1 = QuantumRegister(bits=[q, Qubit()], name="qr1")
qr2 = QuantumRegister(bits=[qr1[1], Qubit()], name="qr2")
qc = QuantumCircuit(qr1, qr2, [Qubit()])
qc.h(q)
qc.cx(qr1[0], qr1[1])
qc.cx(qr1[1], qr2[1])
qc.cx(2, 3)
qr2 = QuantumRegister(2, "qr2")
qr1 = QuantumRegister(bits=[qr2[1], q], name="qr1")
expected = QuantumCircuit([Qubit()], qr2, qr1)
expected.h(qr1[1])
expected.cx(qr1[1], qr1[0])
expected.cx(qr1[0], qr2[0])
expected.cx(1, 0)
self.assertEqual(qc.reverse_bits(), expected)
def test_cnot_alias(self):
"""Test that the cnot method alias adds a cx gate."""
qc = QuantumCircuit(2)
qc.cnot(0, 1)
expected = QuantumCircuit(2)
expected.cx(0, 1)
self.assertEqual(qc, expected)
def test_inverse(self):
"""Test inverse circuit."""
qr = QuantumRegister(2)
qc = QuantumCircuit(qr, global_phase=0.5)
qc.h(0)
qc.barrier(qr)
qc.t(1)
expected = QuantumCircuit(qr)
expected.tdg(1)
expected.barrier(qr)
expected.h(0)
expected.global_phase = -0.5
self.assertEqual(qc.inverse(), expected)
def test_compare_two_equal_circuits(self):
"""Test to compare that 2 circuits are equal."""
qc1 = QuantumCircuit(2, 2)
qc1.h(0)
qc2 = QuantumCircuit(2, 2)
qc2.h(0)
self.assertTrue(qc1 == qc2)
def test_compare_two_different_circuits(self):
"""Test to compare that 2 circuits are different."""
qc1 = QuantumCircuit(2, 2)
qc1.h(0)
qc2 = QuantumCircuit(2, 2)
qc2.x(0)
self.assertFalse(qc1 == qc2)
def test_compare_a_circuit_with_none(self):
"""Test to compare that a circuit is different to None."""
qc1 = QuantumCircuit(2, 2)
qc1.h(0)
qc2 = None
self.assertFalse(qc1 == qc2)
def test_overlapped_add_bits_and_add_register(self):
"""Test add registers whose bits have already been added by add_bits."""
qc = QuantumCircuit()
for bit_type, reg_type in (
[Qubit, QuantumRegister],
[Clbit, ClassicalRegister],
[AncillaQubit, AncillaRegister],
):
bits = [bit_type() for _ in range(10)]
reg = reg_type(bits=bits)
qc.add_bits(bits)
qc.add_register(reg)
self.assertEqual(qc.num_qubits, 20)
self.assertEqual(qc.num_clbits, 10)
self.assertEqual(qc.num_ancillas, 10)
def test_overlapped_add_register_and_add_register(self):
"""Test add registers whose bits have already been added by add_register."""
qc = QuantumCircuit()
for bit_type, reg_type in (
[Qubit, QuantumRegister],
[Clbit, ClassicalRegister],
[AncillaQubit, AncillaRegister],
):
bits = [bit_type() for _ in range(10)]
reg1 = reg_type(bits=bits)
reg2 = reg_type(bits=bits)
qc.add_register(reg1)
qc.add_register(reg2)
self.assertEqual(qc.num_qubits, 20)
self.assertEqual(qc.num_clbits, 10)
self.assertEqual(qc.num_ancillas, 10)
def test_deprecated_measure_function(self):
"""Test that the deprecated version of the loose 'measure' function works correctly."""
from qiskit.circuit.measure import measure
test = QuantumCircuit(1, 1)
with self.assertWarnsRegex(DeprecationWarning, r".*Qiskit Terra 0\.19.*"):
measure(test, 0, 0)
expected = QuantumCircuit(1, 1)
expected.measure(0, 0)
self.assertEqual(test, expected)
def test_deprecated_reset_function(self):
"""Test that the deprecated version of the loose 'reset' function works correctly."""
from qiskit.circuit.reset import reset
test = QuantumCircuit(1, 1)
with self.assertWarnsRegex(DeprecationWarning, r".*Qiskit Terra 0\.19.*"):
reset(test, 0)
expected = QuantumCircuit(1, 1)
expected.reset(0)
self.assertEqual(test, expected)
class TestCircuitPrivateOperations(QiskitTestCase):
"""Direct tests of some of the private methods of QuantumCircuit. These do not represent
functionality that we want to expose to users, but there are some cases where private methods
are used internally (similar to "protected" access in .NET or "friend" access in C++), and we
want to make sure they work in those cases."""
def test_previous_instruction_in_scope_failures(self):
"""Test the failure paths of the peek and pop methods for retrieving the most recent
instruction in a scope."""
test = QuantumCircuit(1, 1)
with self.assertRaisesRegex(CircuitError, r"This circuit contains no instructions\."):
test._peek_previous_instruction_in_scope()
with self.assertRaisesRegex(CircuitError, r"This circuit contains no instructions\."):
test._pop_previous_instruction_in_scope()
with test.for_loop(range(2)):
with self.assertRaisesRegex(CircuitError, r"This scope contains no instructions\."):
test._peek_previous_instruction_in_scope()
with self.assertRaisesRegex(CircuitError, r"This scope contains no instructions\."):
test._pop_previous_instruction_in_scope()
def test_pop_previous_instruction_removes_parameters(self):
"""Test that the private "pop instruction" method removes parameters from the parameter
table if that instruction is the only instance."""
x, y = Parameter("x"), Parameter("y")
test = QuantumCircuit(1, 1)
test.rx(y, 0)
last_instructions = test.u(x, y, 0, 0)
self.assertEqual({x, y}, set(test.parameters))
instruction = test._pop_previous_instruction_in_scope()
self.assertEqual(list(last_instructions), [instruction])
self.assertEqual({y}, set(test.parameters))
def test_decompose_gate_type(self):
"""Test decompose specifying gate type."""
circuit = QuantumCircuit(1)
circuit.append(SGate(label="s_gate"), [0])
decomposed = circuit.decompose(gates_to_decompose=SGate)
self.assertNotIn("s", decomposed.count_ops())
|
{
"content_hash": "0389f07c59f6971c2a2c1ef12012f7a3",
"timestamp": "",
"source": "github",
"line_count": 1261,
"max_line_length": 100,
"avg_line_length": 37.47422680412371,
"alnum_prop": 0.6025605756004656,
"repo_name": "QISKit/qiskit-sdk-py",
"id": "426683e62a1ac6dd660e9ed5b3fbaf5a25e54a3e",
"size": "47740",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "test/python/circuit/test_circuit_operations.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2582"
},
{
"name": "C++",
"bytes": "327518"
},
{
"name": "CMake",
"bytes": "19294"
},
{
"name": "Makefile",
"bytes": "5608"
},
{
"name": "Pascal",
"bytes": "2444"
},
{
"name": "Python",
"bytes": "1312801"
},
{
"name": "Shell",
"bytes": "8385"
}
],
"symlink_target": ""
}
|
import sys
import os
import os.path
import codecs
import cgi
from plasTeX.DOM import Node
from plasTeX.Base import Command
from plasTeX.Base import DimenCommand
from plasTeX.Logging import getLogger
import plasTeX.Packages.graphics as graphics
log = getLogger()
status = getLogger('status')
# Ugly hack: assume textwidth is 600pt. True for Kattis but not in
# general.
class textwidth(DimenCommand):
value = DimenCommand.new('600pt')
# Convert an expression of the form "X\textwidth" to 100*x%
# (Used in ugly hack to handle illustrations)
def clean_width(width):
if not isinstance(width, Node):
return width
nodes = width.childNodes
if len(nodes) != 2 or nodes[1].nodeName != 'textwidth':
return width
return u'%.2f%%' % (100*float(nodes[0]))
# \problemheader
class problemheader(Command):
args = 'title id:str'
def invoke(self, tex):
res = Command.invoke(self, tex)
timelimfile = os.path.join(os.path.dirname(tex.filename),
'..', '.timelimit')
if os.path.isfile(timelimfile):
self.attributes['timelim'] = open(timelimfile, 'r').read()
# \sampleheader
class sampletable(Command):
args = 'header1 file1:str header2 file2:str'
def read_sample_file(self, filename):
data = open(filename, 'r').read().decode('utf8')
data = cgi.escape(data)
return data
def invoke(self, tex):
res = Command.invoke(self, tex)
dir = os.path.dirname(tex.filename)
file1 = os.path.join(dir, self.attributes['file1'])
file2 = os.path.join(dir, self.attributes['file2'])
try:
status.info(' ( verbatim %s ' % file1)
self.attributes['data1'] = self.read_sample_file(file1)
status.info(') ( verbatim %s ' % file2)
self.attributes['data2'] = self.read_sample_file(file2)
status.info(') ')
except (OSError, IOError):
log.warning('\nProblem opening files "%s" and "%s"', file1, file2)
# Any command including a picture, such as \illustration and our
# re-implementation of \includegraphics. (Based on plasTeX's
# \includegraphics implementation)
class _graphics_command(Command):
def invoke(self, tex):
res = Command.invoke(self, tex)
# Overcome plasTeX bug by looking for love in the right place
basetex = self.ownerDocument.userdata['base_tex_instance']
f = self.attributes['file']
ext = self.ownerDocument.userdata.getPath(
'packages/graphicx/extensions',
['.png', '.jpg', '.jpeg', '.gif', '.pdf'])
paths = self.ownerDocument.userdata.getPath(
'packages/graphicx/paths', [os.path.dirname(basetex.filename)])
img = None
# Check for file using graphicspath
for p in paths:
for e in ['']+ext:
fname = os.path.join(p, f+e)
if os.path.isfile(fname):
img = os.path.abspath(fname)
break
if img is not None:
break
# Check for file using kpsewhich
if img is None:
for e in ['']+ext:
try:
img = os.path.abspath(basetex.kpsewhich(f+e))
break
except (OSError, IOError):
pass
if not os.path.isfile(img):
log.warning('Could not identify image "%s"' % f)
self.imageoverride = img
return res
# \illustration
class illustration(_graphics_command):
args = 'width:double file:str description'
def invoke(self, tex):
res = _graphics_command.invoke(self, tex)
self.style['width'] = u'%.2f%%' % (100*self.attributes['width'])
return res
# Dummy for \fontencoding to suppress warnings
class fontencoding(Command):
args = 'charset:str'
# Dummy for \selectfont to suppress warnings.
class selectfont(Command):
pass
# Dummy for \ExecuteOptions to suppress warnings.
class ExecuteOptions(Command):
pass
def init(tex):
# Dirty hack #25783 to get plasTeX to work properly:
# any subprocess of the tex instance won't remember things like,
# say, the name of the .tex file being processed, which is needed
# for kpsewhich to work. So we'll keep a pointer to the original
# tex instance in the document's userdata.
tex.ownerDocument.userdata['base_tex_instance'] = tex
# Import the macros
tex.ownerDocument.context.importMacros(vars(sys.modules[__name__]))
# So apparently this is how to communicate to Plastex where to
# search for modules... Eugch.
sys.path = [os.path.dirname(__file__)] + sys.path
|
{
"content_hash": "bd18593a2b6998dac2c92ac204515276",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 87,
"avg_line_length": 32.689655172413794,
"alnum_prop": 0.6151898734177215,
"repo_name": "godmar/problemtools",
"id": "f66460d8974521849872bc39a0193897c67b5911",
"size": "4740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "problemtools/ProblemPlasTeX/ProblemsetMacros.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "13280"
},
{
"name": "CSS",
"bytes": "908"
},
{
"name": "Dockerfile",
"bytes": "702"
},
{
"name": "HTML",
"bytes": "1081"
},
{
"name": "Makefile",
"bytes": "853"
},
{
"name": "Python",
"bytes": "138314"
},
{
"name": "Shell",
"bytes": "2496"
},
{
"name": "TeX",
"bytes": "16750"
}
],
"symlink_target": ""
}
|
"""Classes defining structures and unions."""
import abc
import collections
import collections.abc
import copy
import io
import typing
import warnings
from typing import Any
from typing import BinaryIO
from typing import Dict
from typing import Iterator
from typing import List
from typing import Mapping
from typing import MutableMapping
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Tuple
from typing import Type
from typing import TypeVar
import attr
from binobj import decorators
from binobj import errors
from binobj import fields
from binobj.typedefs import MethodFieldValidator
from binobj.typedefs import MutableStrDict
from binobj.typedefs import StrDict
from binobj.typedefs import StructValidator
__all__ = ["Struct"]
T = TypeVar("T")
K = TypeVar("K")
V = TypeVar("V")
TStruct = TypeVar("TStruct", covariant=True, bound="Struct")
@attr.s(kw_only=True)
class StructMetadata:
"""Info about the :class:`.Struct` it belongs to, like its fields and validators.
This class should be considered part of how Structs are implemented. It's only of
use to people writing wrapper classes or otherwise enhancing the behavior of the
default :class:`.Struct` class.
.. versionadded:: 0.7.1
.. versionchanged:: 0.10.0
Arguments are now keyword-only.
"""
name = attr.ib(type=str)
"""The name of the class this metadata is for.
.. versionadded:: 0.10.0
"""
components = attr.ib(
type=MutableMapping[str, fields.Field[Any]], factory=collections.OrderedDict
)
"""A mapping of field names to the actual field object."""
struct_validators = attr.ib(type=List[StructValidator], factory=list)
"""A list of validators for this struct."""
field_validators = attr.ib(
type=MutableMapping[str, List[MethodFieldValidator]], factory=dict
)
"""A mapping of field names to validators to execute for that field."""
num_own_fields = attr.ib(type=int, default=0)
"""The number of fields defined in this class (i.e. excluding superclasses).
.. versionadded:: 0.9.0
"""
size_bytes = attr.ib(type=Optional[int], default=0)
"""The total size of the struct in bytes, if it's a fixed value.
This is only used for classes declared using PEP 526 type annotations and should
otherwise be ignored.
.. versionadded:: 0.9.0
"""
argument_defaults = attr.ib(type=MutableStrDict, factory=dict)
"""A mapping of argument names or derived keys to their default values.
Keys can take several forms:
* A class name, followed by two underscores, then the field name. Class names are
case-sensitive.
* A single attribute name. This has the lowest precedence but the broadest reach.
"""
def load_meta_options(self, meta: type) -> None:
self.argument_defaults = dict(getattr(meta, "argument_defaults", {}))
def collect_assigned_fields(
class_name: str,
namespace: StrDict,
class_metadata: StructMetadata,
byte_offset: Optional[int],
) -> int:
"""Collect all fields defined by class variable assignment to a struct.
Arguments:
class_name (str):
The name of the Struct class. Only used in error messages.
namespace (dict):
The class namespace, as passed to :meth:`StructMeta.__new__`.
class_metadata (StructMetadata):
The metadata object for the Struct being created.
byte_offset (int):
The byte offset to start at, typically 0 unless this struct inherits from
another one. Will be ``None`` if the struct this class inherits from is of
variable size.
Returns
int: The number of fields found.
.. versionadded:: 0.9.0
.. versionchanged:: 0.10.0
The function now takes the entire class metadata as the third argument instead
of just a mapping of the declared fields.
"""
field_index = len(class_metadata.components)
n_fields_found = 0
# It's HIGHLY important that we don't accidentally bind the superclass' fields to
# this struct. That's why we're iterating over ``namespace`` and adding the field
# into the ``components`` dict *inside* the loop.
for item_name, item in namespace.items():
if not isinstance(item, fields.Field):
continue
if item_name in class_metadata.components:
# Field was already defined in the superclass
raise errors.FieldRedefinedError(struct=class_name, field=item)
item.bind_to_container(class_metadata, item_name, field_index, byte_offset)
if byte_offset is not None and item.has_fixed_size:
byte_offset += typing.cast(int, item.size)
else:
byte_offset = None
class_metadata.components[item_name] = item
field_index += 1
n_fields_found += 1
return n_fields_found
def bind_validators_to_struct(namespace: StrDict, metadata: StructMetadata) -> None:
"""Find all defined validators and assign them to their fields.
.. versionadded:: 0.9.0
"""
for item in namespace.values():
if not isinstance(item, decorators.ValidatorMethodWrapper):
continue
if item.field_names:
# Attach this validator to each named field.
for field_name in item.field_names:
metadata.field_validators[field_name].append(item)
else:
# Validator doesn't define any fields, must be a validator for the entire
# struct.
metadata.struct_validators.append(item)
class StructMeta(abc.ABCMeta):
"""The metaclass for all serializable objects made of other serializable objects.
It defines the ``__binobj_struct__`` class variables and sets some values on the
:class:`~binobj.fields.base.Field` components such as its name and index.
"""
# MyPy is kinda forcing me to use dunderscore names for arguments just because
# typeshed does it this way. The stupidity... >:[
@classmethod
def __prepare__(
mcs, __name: str, __bases: Tuple[type, ...], **_kwargs: Any
) -> MutableStrDict:
return collections.OrderedDict()
def __new__(
mcs: Type["StructMeta"],
class_name: str,
bases: Tuple[type, ...],
namespace: Dict[str, Any],
) -> "StructMeta":
# Build a list of all of the base classes that appear to be Structs. If anything
# else uses StructMeta as a metaclass then we're in trouble, since this will
# detect that as a second base class.
struct_bases = [b for b in bases if issubclass(type(b), mcs)]
if len(struct_bases) > 1:
raise errors.MultipleInheritanceError(struct=class_name)
metadata = StructMetadata(name=class_name)
if struct_bases:
# Build a dictionary of all of the fields in the parent struct first, then
# add in the fields defined in this struct.
base = typing.cast(Type["Struct"], struct_bases[0])
for comp_name, item in base.__binobj_struct__.components.items():
if isinstance(item, fields.Field):
metadata.components[comp_name] = item
# Copy the dict of field validators for the parent struct, making a separate
# copy of the validator list for this class. This is so that child classes
# can add validators for fields defined in the parent class without
# affecting the parent class.
metadata.field_validators = {
f_name: list(v_list)
for f_name, v_list in base.__binobj_struct__.field_validators.items()
}
# Similarly, make a copy of the struct validators of the parent class.
metadata.struct_validators = list(base.__binobj_struct__.struct_validators)
# Start the byte offset at the end of the base class. We won't be able to do
# this if the base class has variable-length fields.
byte_offset = base.get_size()
else:
# Else: This struct doesn't inherit from another struct, so we're starting
# at offset 0. There are no field or struct validators to copy.
byte_offset = 0
metadata.field_validators.update(
{
name: []
for name, obj in namespace.items()
if isinstance(obj, fields.Field)
}
)
# Load any construction options the caller may have defined.
if "Meta" in namespace:
metadata.load_meta_options(namespace["Meta"])
# Enumerate the declared fields and bind them to this struct.
metadata.num_own_fields = collect_assigned_fields(
class_name, namespace, metadata, byte_offset
)
bind_validators_to_struct(namespace, metadata)
namespace["__binobj_struct__"] = metadata
struct_class = super().__new__(mcs, class_name, bases, namespace)
# Set __objclass__ on all fields to aid type introspection. The `inspect` module
# uses this as an aid.
for field in metadata.components.values():
# TODO (dargueta): Declare __objclass__ once we drop support for 3.5
# We need PEP-526 for that unfortunately.
field.__objclass__ = struct_class # type: ignore[attr-defined]
# TODO (dargueta): Figure out how metaclasses are supposed to work with MyPy
return struct_class # type: ignore[return-value]
@overload
def recursive_to_dicts(item: "Struct") -> MutableMapping[str, Any]:
...
@overload
def recursive_to_dicts(item: Mapping[K, V]) -> MutableMapping[K, V]:
...
@overload
def recursive_to_dicts(item: Sequence[T]) -> List[T]:
...
def recursive_to_dicts(item):
"""Ensure that any nested structures are also converted to dictionaries.
This is used when a :class:`Struct` is converted to a dictionary.
:param item:
Anything. If it's an unsupported type it'll get returned as is.
"""
if isinstance(item, Struct):
return item.to_dict()
if isinstance(item, collections.abc.Mapping):
return collections.OrderedDict(
(recursive_to_dicts(k), recursive_to_dicts(v)) for k, v in item.items()
)
if isinstance(item, collections.abc.Sequence) and not isinstance(
item, (str, bytes, bytearray)
):
return [recursive_to_dicts(v) for v in item]
return item
class Struct(metaclass=StructMeta):
"""An ordered collection of fields and other structures.
.. attribute:: __binobj_struct__
A class attribute defining features of the struct, such as its fields,
validators, default values, etc. It's only of use for code that inspects struct
definitions.
:type: binobj.structures.StructMetadata
.. versionchanged:: 0.5.0
A Struct will compare equal to :data:`~binobj.fields.base.UNDEFINED` if and only
if all of its fields are also undefined.
.. versionchanged:: 0.7.1
Removed the private-ish ``__components__`` and ``__validators__`` attributes.
Field definitions, validators, and other metadata can be found in the new
``__binobj_struct__`` class attribute. However, it should be considered an
implementation detail and is subject to change.
.. versionchanged:: 0.10.0
The ``__objclass__`` attribute is set on all fields.
"""
__binobj_struct__ = StructMetadata(name=typing.cast(str, None))
def __init__(self, **values: Any):
extra_keys = set(values.keys() - self.__binobj_struct__.components.keys())
if extra_keys:
raise errors.UnexpectedValueError(struct=self, name=extra_keys)
self.__values__ = values
def validate_contents(self) -> None:
"""Validate the stored values in this struct.
:raise ~binobj.errors.ValidationError: Validation failed.
.. versionadded:: 0.4.0
"""
for f_name, validators in self.__binobj_struct__.field_validators.items():
f_obj = self.__binobj_struct__.components[f_name]
value = self[f_name]
# First, invoke the validators defined on the field object.
for instance_validator in f_obj.validators:
instance_validator(value)
# Second, invoke the validator methods for the field defined on this
# Struct.
for method_validator in validators:
method_validator(self, f_obj, value)
# Validate the entirety of the struct.
for struct_validator in self.__binobj_struct__.struct_validators:
struct_validator(self, typing.cast(StrDict, self))
def to_stream(self, stream: BinaryIO, context: Any = None) -> None:
"""Convert the given data into bytes and write it to ``stream``.
:param BinaryIO stream:
The stream to write the serialized data into.
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
"""
self.validate_contents()
# We can't pass `self` to all_fields because Structs can't be used with
# dictionary expansion (e.g. **kwargs). It'd be a nasty surprise for
# fields expecting a dictionary.
all_fields = self.to_dict()
for field in self.__binobj_struct__.components.values():
value = field.compute_value_for_dump(all_fields)
if value is not fields.NOT_PRESENT:
field.to_stream(stream, value, context=context, all_fields=all_fields)
def to_bytes(self, context: Any = None) -> bytes:
"""Convert the given data into bytes.
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
:return: The serialized data.
:rtype: bytes
"""
stream = io.BytesIO()
self.to_stream(stream, context)
return stream.getvalue()
def to_dict(self, keep_discardable: bool = False) -> MutableMapping[str, Any]:
"""Convert this struct into an ordered dictionary.
The primary use for this method is converting a loaded :class:`Struct` into
native Python types. As such, validation is *not* performed since that was done
while loading.
:param bool keep_discardable:
If True, don't exclude fields marked with ``discard=True`` from the result.
:rtype: collections.OrderedDict
:raise MissingRequiredValueError:
One or more fields don't have assigned values.
.. versionchanged:: 0.3.0
This now recursively calls :meth:`.to_dict` on all nested structs and arrays
so that the returned dictionary is completely converted, not just the first
level.
.. versionchanged:: 0.6.0
Fields with ``discard`` set are not included in the returned dict by
default.
.. versionchanged:: 0.6.1
The ``keep_discardable`` argument was added.
"""
dct = collections.OrderedDict(
(field.name, field.compute_value_for_dump(typing.cast(StrDict, self)))
for field in self.__binobj_struct__.components.values()
if keep_discardable or not field.discard
)
return recursive_to_dicts(dct)
@classmethod
def from_stream(
cls: Type[TStruct],
stream: BinaryIO,
context: Any = None,
init_kwargs: StrDict = None,
) -> TStruct:
"""Load a struct from the given stream.
:param BinaryIO stream:
The stream to load data from.
:param context:
Additional data to pass to the components'
:meth:`~binobj.fields.base.Field.from_stream` methods. Subclasses must
ignore anything they don't recognize.
:param dict init_kwargs:
Additional keyword arguments to pass to the struct's constructor, for
subclasses that take additional arguments beyond the fields that comprise
the struct. You can also use this to *override* field values; anything given
in here takes precedence over loaded values.
:return: The loaded struct.
.. versionadded:: 0.7.0
The ``init_kwargs`` argument.
"""
if init_kwargs:
results = typing.cast(MutableStrDict, copy.deepcopy(init_kwargs))
else:
results = {}
for name, field in cls.__binobj_struct__.components.items():
# We use setdefault() so we don't overwrite anything the caller may have
# passed to us in `init_kwargs`.
results.setdefault(name, field.from_stream(stream, context, results))
instance = cls(**results)
instance.validate_contents()
for field in cls.__binobj_struct__.components.values():
if field.discard:
del instance[field.name]
return instance
@classmethod
def from_bytes(
cls: Type[TStruct],
data: bytes,
context: Any = None,
exact: bool = True,
init_kwargs: StrDict = None,
) -> TStruct:
"""Load a struct from the given byte string.
:param bytes data:
A bytes-like object to get the data from.
:param context:
Additional data to pass to this method. Subclasses must ignore anything they
don't recognize.
:param bool exact:
``data`` must contain exactly the number of bytes required. If not all the
bytes in ``data`` were used when reading the struct, throw an exception.
:param dict init_kwargs:
Additional keyword arguments to pass to the struct's constructor, for
subclasses that take additional arguments beyond the fields that comprise
the struct. You can also use this to *override* field values; anything given
in here takes precedence over loaded values.
:return: The loaded struct.
:raise ExtraneousDataError:
``exact`` is True and there's data left over at the end of the byte string.
.. versionadded:: 0.7.0
The ``init_kwargs`` argument.
"""
stream = io.BytesIO(data)
loaded_data = cls.from_stream(stream, context, init_kwargs)
if exact and (stream.tell() < len(data) - 1):
raise errors.ExtraneousDataError(
"Read %d bytes, but there are %d in the input data."
% (stream.tell() + 1, len(data)),
offset=stream.tell(),
)
return loaded_data
@classmethod
def partial_load(
cls: Type[TStruct],
stream: BinaryIO,
last_field: str = None,
context: Any = None,
) -> TStruct:
"""Partially load this object, either until EOF or the named field.
All fields up to and including the field named in ``last_field`` will be loaded
from ``stream``.
If ``last_field`` isn't given, as many complete fields as possible will be
loaded from ``stream``. Any partially loaded fields will be discarded and the
stream pointer will be reset to the end of the last complete field read.
.. note::
Because the struct is only partially loaded, struct-level validators are
*not* executed. Individual fields still are.
:param BinaryIO stream:
The stream to load from.
:param str last_field:
The name of the last field to load in the object. If given, enough bytes for
this and all previous fields *must* be present in the stream.
:param context:
Any object containing extra information to pass to the fields'
:meth:`~binobj.fields.base.Field.from_stream` method.
:return: The loaded struct.
""" # noqa: D401
if (
last_field is not None
and last_field not in cls.__binobj_struct__.components
):
raise ValueError(
"%s doesn't have a field named %r." % (cls.__name__, last_field)
)
result = {} # type: MutableStrDict
for field in cls.__binobj_struct__.components.values():
offset = stream.tell()
try:
value = field.from_stream(stream, context=context, loaded_fields=result)
except errors.UnexpectedEOFError:
if last_field is not None:
# Hit EOF before we read all the fields we were supposed to.
raise
# Hit EOF in the middle of reading a field. Since the caller didn't
# specify how far we should read, this isn't an error. Go back to the
# beginning of this field and return.
stream.seek(offset)
break
if not field.discard:
result[field.name] = value
if field.name == last_field:
break
return cls(**result)
@classmethod
def get_field(cls, stream: BinaryIO, name: str, context: Any = None) -> Any:
"""Return the value of a single field.
If the field is at a fixed byte offset from the beginning of the struct, it'll
be read directly.
If the field isn't at a fixed offset from the beginning of the struct (e.g. a
variable-length field occurs before it) then the entire struct up to and
including this field must be read. Unfortunately, this means that unrelated
validation errors can be thrown if other fields have problems.
:param BinaryIO stream:
The stream to read from. It's assumed that the stream pointer is positioned
at the start of a struct. The stream pointer is returned to its original
position even if an exception occurred.
:param str name:
The name of the field to retrieve.
:param context:
Optional. Any object containing extra information to pass to the
:meth:`~binobj.fields.base.Field.from_stream` method of the field. For
fields located at a variable offset, this will be passed to the
:meth:`~binobj.fields.base.Field.from_stream` method of *each* field read.
:return: The value of the field in the struct data.
:raise UnexpectedEOFError:
The end of the stream was reached before the requested field could be
completely read.
"""
if name not in cls.__binobj_struct__.components:
raise ValueError("%s doesn't have a field named %r." % (cls.__name__, name))
field = cls.__binobj_struct__.components[name]
original_offset = stream.tell()
# If the field is at a fixed offset from the beginning of the struct, then we
# can read and return it directly.
if field.offset is not None:
try:
stream.seek(original_offset + field.offset)
return field.from_stream(stream, context, {})
finally:
stream.seek(original_offset)
# If we get here then the field is *not* at a fixed offset from the beginning of
# the struct and we have to read everything up to it. This can unfortunately
# result in validation errors if there is data before the desired field that's
# invalid.
try:
loaded_data = cls.partial_load(stream, name, context)
finally:
stream.seek(original_offset)
return loaded_data[name]
def partial_dump(
self, stream: BinaryIO, last_field: Optional[str] = None, context: Any = None
) -> None:
"""Partially dump the object, up to and including the last named field.
All fields up to and including the field named in ``last_field`` will be
serialized.
If ``last_field`` isn't given, as many fields will be serialized as possible up
to the first missing one.
:param BinaryIO stream:
The stream to dump into.
:param str last_field:
The name of the last field in the object to dump.
:param context:
Any object containing extra information to pass to the fields'
:meth:`~binobj.fields.base.Field.from_stream` methods.
""" # noqa: D401
data = self.__values__
for field in self.__binobj_struct__.components.values():
value = data.get(field.name, field.default)
if value is fields.UNDEFINED:
# Field is missing from the dump data. If the caller wants us to dump
# only the fields that're defined, we can bail out now.
if last_field is None:
return
if field.required:
# Caller wants us to dump up to and including ``last_field`` so we
# need to crash.
raise errors.MissingRequiredValueError(field=field)
field.to_stream(stream, value, context)
if field.name == last_field:
return
@classmethod
def get_size(cls) -> Optional[int]:
"""Return the size of this struct in bytes, if possible.
If there are variable-sized fields that can't be resolved, this function returns
``None`` instead.
Do *not* use this on instances; use ``len(instance)`` instead.
:return: The struct's size, in bytes.
:rtype: int
.. versionadded:: 0.3.0
"""
field_objects = cls.__binobj_struct__.components.values()
try:
return sum(f.get_expected_size({}) for f in field_objects)
except (errors.UndefinedSizeError, errors.MissingRequiredValueError):
return None
# Container methods
def __getitem__(self, field_name: str) -> Any:
if field_name not in self.__binobj_struct__.components:
raise KeyError(
"Struct %r has no field named %r." % (type(self).__name__, field_name)
)
return getattr(self, field_name)
def __setitem__(self, field_name: str, value: Any) -> None:
if field_name not in self.__binobj_struct__.components:
raise KeyError(
"Struct %r has no field named %r." % (type(self).__name__, field_name)
)
setattr(self, field_name, value)
def __delitem__(self, field_name: str) -> Any:
if field_name not in self.__binobj_struct__.components:
raise KeyError(
"Struct %r has no field named %r." % (type(self).__name__, field_name)
)
self.__values__.pop(field_name, None)
def __iter__(self) -> Iterator[str]:
for name, value in self.__values__.items():
if value is not fields.UNDEFINED:
yield name
def __len__(self) -> int:
size = 0
for field in self.__binobj_struct__.components.values():
if field.has_fixed_size:
size += typing.cast(int, field.size)
else:
field_value = field.compute_value_for_dump(typing.cast(StrDict, self))
size += len(field.to_bytes(field_value))
return size
def __eq__(self, other: Any) -> bool:
# Allow comparison to UNDEFINED. The result is True if all fields in this
# struct are undefined, False otherwise.
if other is fields.UNDEFINED:
warnings.warn(
"Comparing a struct to UNDEFINED to see if all its fields are undefined"
" is deprecated. Starting version 1.0 this will always return False.",
DeprecationWarning,
)
return all(v is fields.UNDEFINED for v in self.__values__.values())
# Compare only defined values by using __iter__ to get the keys that are
# defined.
self_values = recursive_to_dicts({n: self[n] for n in list(self)})
if not isinstance(other, (Struct, collections.abc.Mapping)):
return False
other_values = recursive_to_dicts({n: other[n] for n in list(other)})
return other_values == self_values
def __bytes__(self) -> bytes:
return self.to_bytes()
def __repr__(self) -> str:
return "%s(%s)" % (
type(self).__qualname__,
", ".join("%s=%r" % kv for kv in self.__values__.items()),
)
|
{
"content_hash": "79fc0455596ee82ede53513e189eba66",
"timestamp": "",
"source": "github",
"line_count": 766,
"max_line_length": 88,
"avg_line_length": 37.40208877284595,
"alnum_prop": 0.6154275741710297,
"repo_name": "dargueta/binobj",
"id": "fc82737b9a045747aefa5cf148d057e6c0cd8734",
"size": "28650",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "binobj/structures.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1754"
},
{
"name": "Python",
"bytes": "229794"
}
],
"symlink_target": ""
}
|
'''
Created on Nov 3, 2016
@author: Admin
'''
from WidgetFactory import *
def create_window(factory):
window = factory.make_diagram(30, 7)
return window
ms_window = create_window(MSWindowWidgetFactory())
macOSX_window = create_window(MacOSXWindowWidgetFactory())
|
{
"content_hash": "261ec2c11aa1c9454b1d97b588a8ac3f",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 60,
"avg_line_length": 19.857142857142858,
"alnum_prop": 0.7266187050359713,
"repo_name": "lucafon/DesignPatterns",
"id": "1f25b7cb8acfe6be7385ceb9a4fe37303766eb46",
"size": "278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lucafontanili.designpatterns.python/src/creational/abstractfactory/AbstractFactoryClient.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "68472"
},
{
"name": "Python",
"bytes": "30085"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from django import http
from mox import IsA # noqa
from openstack_dashboard.api import cinder
from openstack_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:admin:volumes:index')
class VolumeSnapshotsViewTests(test.BaseAdminViewTests):
@test.create_stubs({cinder: ('volume_snapshot_reset_state',
'volume_snapshot_get')})
def test_update_snapshot_status(self):
snapshot = self.cinder_volume_snapshots.first()
state = 'error'
cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id) \
.AndReturn(snapshot)
cinder.volume_snapshot_reset_state(IsA(http.HttpRequest),
snapshot.id,
state)
self.mox.ReplayAll()
formData = {'status': state}
url = reverse('horizon:admin:volumes:snapshots:update_status',
args=(snapshot.id,))
res = self.client.post(url, formData)
self.assertNoFormErrors(res)
@test.create_stubs({cinder: ('volume_snapshot_get',
'volume_get')})
def test_get_volume_snapshot_details(self):
volume = self.cinder_volumes.first()
snapshot = self.cinder_volume_snapshots.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id). \
AndReturn(volume)
cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id). \
AndReturn(snapshot)
self.mox.ReplayAll()
url = reverse('horizon:admin:volumes:snapshots:detail',
args=[snapshot.id])
res = self.client.get(url)
self.assertContains(res,
"<h2>Volume Snapshot Details: %s</h2>" %
snapshot.name,
1, 200)
self.assertContains(res, "<dd>test snapshot</dd>", 1, 200)
self.assertContains(res, "<dd>%s</dd>" % snapshot.id, 1, 200)
self.assertContains(res, "<dd>Available</dd>", 1, 200)
@test.create_stubs({cinder: ('volume_snapshot_get',
'volume_get')})
def test_get_volume_snapshot_details_with_snapshot_exception(self):
# Test to verify redirect if get volume snapshot fails
snapshot = self.cinder_volume_snapshots.first()
cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id).\
AndRaise(self.exceptions.cinder)
self.mox.ReplayAll()
url = reverse('horizon:admin:volumes:snapshots:detail',
args=[snapshot.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
@test.create_stubs({cinder: ('volume_snapshot_get',
'volume_get')})
def test_get_volume_snapshot_details_with_volume_exception(self):
# Test to verify redirect if get volume fails
volume = self.cinder_volumes.first()
snapshot = self.cinder_volume_snapshots.first()
cinder.volume_get(IsA(http.HttpRequest), volume.id). \
AndRaise(self.exceptions.cinder)
cinder.volume_snapshot_get(IsA(http.HttpRequest), snapshot.id). \
AndReturn(snapshot)
self.mox.ReplayAll()
url = reverse('horizon:admin:volumes:snapshots:detail',
args=[snapshot.id])
res = self.client.get(url)
self.assertNoFormErrors(res)
self.assertMessageCount(error=1)
self.assertRedirectsNoFollow(res, INDEX_URL)
|
{
"content_hash": "9af90b1f2399cdea6a701fd3c120b615",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 73,
"avg_line_length": 38.260416666666664,
"alnum_prop": 0.5973318812959434,
"repo_name": "jumpstarter-io/horizon",
"id": "84a5abc11171a1e75d408b4c81e78f182fb9ca59",
"size": "4219",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/volumes/snapshots/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""
SSH Agent interface
"""
import os
import socket
import struct
import sys
import threading
import time
import tempfile
import stat
from select import select
from paramiko.common import asbytes, io_sleep
from paramiko.py3compat import byte_chr
from paramiko.ssh_exception import SSHException, AuthenticationException
from paramiko.message import Message
from paramiko.pkey import PKey
from paramiko.util import retry_on_signal
cSSH2_AGENTC_REQUEST_IDENTITIES = byte_chr(11)
SSH2_AGENT_IDENTITIES_ANSWER = 12
cSSH2_AGENTC_SIGN_REQUEST = byte_chr(13)
SSH2_AGENT_SIGN_RESPONSE = 14
class AgentSSH(object):
def __init__(self):
self._conn = None
self._keys = ()
def get_keys(self):
"""
Return the list of keys available through the SSH agent, if any. If
no SSH agent was running (or it couldn't be contacted), an empty list
will be returned.
:return:
a tuple of `.AgentKey` objects representing keys available on the
SSH agent
"""
return self._keys
def _connect(self, conn):
self._conn = conn
ptype, result = self._send_message(cSSH2_AGENTC_REQUEST_IDENTITIES)
if ptype != SSH2_AGENT_IDENTITIES_ANSWER:
raise SSHException('could not get keys from ssh-agent')
keys = []
for i in range(result.get_int()):
keys.append(AgentKey(self, result.get_binary()))
result.get_string()
self._keys = tuple(keys)
def _close(self):
if self._conn is not None:
self._conn.close()
self._conn = None
self._keys = ()
def _send_message(self, msg):
msg = asbytes(msg)
self._conn.send(struct.pack('>I', len(msg)) + msg)
l = self._read_all(4)
msg = Message(self._read_all(struct.unpack('>I', l)[0]))
return ord(msg.get_byte()), msg
def _read_all(self, wanted):
result = self._conn.recv(wanted)
while len(result) < wanted:
if len(result) == 0:
raise SSHException('lost ssh-agent')
extra = self._conn.recv(wanted - len(result))
if len(extra) == 0:
raise SSHException('lost ssh-agent')
result += extra
return result
class AgentProxyThread(threading.Thread):
"""
Class in charge of communication between two channels.
"""
def __init__(self, agent):
threading.Thread.__init__(self, target=self.run)
self._agent = agent
self._exit = False
def run(self):
try:
(r, addr) = self.get_connection()
# Found that r should be either a socket from the socket library or None
self.__inr = r
self.__addr = addr # This should be an IP address as a string? or None
self._agent.connect()
if not isinstance(self._agent, int) and (self._agent._conn is None or not hasattr(self._agent._conn, 'fileno')):
raise AuthenticationException("Unable to connect to SSH agent")
self._communicate()
except:
#XXX Not sure what to do here ... raise or pass ?
raise
def _communicate(self):
import fcntl
oldflags = fcntl.fcntl(self.__inr, fcntl.F_GETFL)
fcntl.fcntl(self.__inr, fcntl.F_SETFL, oldflags | os.O_NONBLOCK)
while not self._exit:
events = select([self._agent._conn, self.__inr], [], [], 0.5)
for fd in events[0]:
if self._agent._conn == fd:
data = self._agent._conn.recv(512)
if len(data) != 0:
self.__inr.send(data)
else:
self._close()
break
elif self.__inr == fd:
data = self.__inr.recv(512)
if len(data) != 0:
self._agent._conn.send(data)
else:
self._close()
break
time.sleep(io_sleep)
def _close(self):
self._exit = True
self.__inr.close()
self._agent._conn.close()
class AgentLocalProxy(AgentProxyThread):
"""
Class to be used when wanting to ask a local SSH Agent being
asked from a remote fake agent (so use a unix socket for ex.)
"""
def __init__(self, agent):
AgentProxyThread.__init__(self, agent)
def get_connection(self):
"""
Return a pair of socket object and string address.
May block!
"""
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
conn.bind(self._agent._get_filename())
conn.listen(1)
(r, addr) = conn.accept()
return r, addr
except:
raise
class AgentRemoteProxy(AgentProxyThread):
"""
Class to be used when wanting to ask a remote SSH Agent
"""
def __init__(self, agent, chan):
AgentProxyThread.__init__(self, agent)
self.__chan = chan
def get_connection(self):
return self.__chan, None
class AgentClientProxy(object):
"""
Class proxying request as a client:
#. client ask for a request_forward_agent()
#. server creates a proxy and a fake SSH Agent
#. server ask for establishing a connection when needed,
calling the forward_agent_handler at client side.
#. the forward_agent_handler launch a thread for connecting
the remote fake agent and the local agent
#. Communication occurs ...
"""
def __init__(self, chanRemote):
self._conn = None
self.__chanR = chanRemote
self.thread = AgentRemoteProxy(self, chanRemote)
self.thread.start()
def __del__(self):
self.close()
def connect(self):
"""
Method automatically called by ``AgentProxyThread.run``.
"""
if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
retry_on_signal(lambda: conn.connect(os.environ['SSH_AUTH_SOCK']))
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == 'win32':
import paramiko.win_pageant as win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._conn = conn
def close(self):
"""
Close the current connection and terminate the agent
Should be called manually
"""
if hasattr(self, "thread"):
self.thread._exit = True
self.thread.join(1000)
if self._conn is not None:
self._conn.close()
class AgentServerProxy(AgentSSH):
"""
:param .Transport t: Transport used for SSH Agent communication forwarding
:raises SSHException: mostly if we lost the agent
"""
def __init__(self, t):
AgentSSH.__init__(self)
self.__t = t
self._dir = tempfile.mkdtemp('sshproxy')
os.chmod(self._dir, stat.S_IRWXU)
self._file = self._dir + '/sshproxy.ssh'
self.thread = AgentLocalProxy(self)
self.thread.start()
def __del__(self):
self.close()
def connect(self):
conn_sock = self.__t.open_forward_agent_channel()
if conn_sock is None:
raise SSHException('lost ssh-agent')
conn_sock.set_name('auth-agent')
self._connect(conn_sock)
def close(self):
"""
Terminate the agent, clean the files, close connections
Should be called manually
"""
os.remove(self._file)
os.rmdir(self._dir)
self.thread._exit = True
self.thread.join(1000)
self._close()
def get_env(self):
"""
Helper for the environnement under unix
:return:
a dict containing the ``SSH_AUTH_SOCK`` environnement variables
"""
return {'SSH_AUTH_SOCK': self._get_filename()}
def _get_filename(self):
return self._file
class AgentRequestHandler(object):
def __init__(self, chanClient):
self._conn = None
self.__chanC = chanClient
chanClient.request_forward_agent(self._forward_agent_handler)
self.__clientProxys = []
def _forward_agent_handler(self, chanRemote):
self.__clientProxys.append(AgentClientProxy(chanRemote))
def __del__(self):
self.close()
def close(self):
for p in self.__clientProxys:
p.close()
class Agent(AgentSSH):
"""
Client interface for using private keys from an SSH agent running on the
local machine. If an SSH agent is running, this class can be used to
connect to it and retreive `.PKey` objects which can be used when
attempting to authenticate to remote SSH servers.
Upon initialization, a session with the local machine's SSH agent is
opened, if one is running. If no agent is running, initialization will
succeed, but `get_keys` will return an empty tuple.
:raises SSHException:
if an SSH agent is found, but speaks an incompatible protocol
"""
def __init__(self):
AgentSSH.__init__(self)
if ('SSH_AUTH_SOCK' in os.environ) and (sys.platform != 'win32'):
conn = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
conn.connect(os.environ['SSH_AUTH_SOCK'])
except:
# probably a dangling env var: the ssh agent is gone
return
elif sys.platform == 'win32':
from . import win_pageant
if win_pageant.can_talk_to_agent():
conn = win_pageant.PageantConnection()
else:
return
else:
# no agent support
return
self._connect(conn)
def close(self):
"""
Close the SSH agent connection.
"""
self._close()
class AgentKey(PKey):
"""
Private key held in a local SSH agent. This type of key can be used for
authenticating to a remote server (signing). Most other key operations
work as expected.
"""
def __init__(self, agent, blob):
self.agent = agent
self.blob = blob
self.name = Message(blob).get_text()
def asbytes(self):
return self.blob
def __str__(self):
return self.asbytes()
def get_name(self):
return self.name
def sign_ssh_data(self, data):
msg = Message()
msg.add_byte(cSSH2_AGENTC_SIGN_REQUEST)
msg.add_string(self.blob)
msg.add_string(data)
msg.add_int(0)
ptype, result = self.agent._send_message(msg)
if ptype != SSH2_AGENT_SIGN_RESPONSE:
raise SSHException('key cannot be used for signing')
return result.get_binary()
|
{
"content_hash": "03420724378afa065d93f9470a49fee6",
"timestamp": "",
"source": "github",
"line_count": 363,
"max_line_length": 124,
"avg_line_length": 30.53443526170799,
"alnum_prop": 0.5701912666907254,
"repo_name": "frankyrumple/smc",
"id": "f928881e2124c9046db8db55b6e16204ad497d2f",
"size": "11898",
"binary": false,
"copies": "26",
"ref": "refs/heads/master",
"path": "modules/paramiko/agent.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "263"
},
{
"name": "C",
"bytes": "631"
},
{
"name": "CSS",
"bytes": "48816"
},
{
"name": "HTML",
"bytes": "155252"
},
{
"name": "JavaScript",
"bytes": "339188"
},
{
"name": "Python",
"bytes": "2976944"
}
],
"symlink_target": ""
}
|
"""Unit tests for coders that must be consistent across all Beam SDKs.
"""
from __future__ import absolute_import
from __future__ import print_function
import json
import logging
import os.path
import sys
import unittest
from builtins import map
import yaml
from apache_beam.coders import coder_impl
from apache_beam.coders import coders
from apache_beam.transforms import window
from apache_beam.transforms.window import IntervalWindow
from apache_beam.utils import windowed_value
from apache_beam.utils.timestamp import Timestamp
STANDARD_CODERS_YAML = os.path.join(
os.path.dirname(__file__), '..', 'testing', 'data', 'standard_coders.yaml')
def _load_test_cases(test_yaml):
"""Load test data from yaml file and return an iterable of test cases.
See ``standard_coders.yaml`` for more details.
"""
if not os.path.exists(test_yaml):
raise ValueError('Could not find the test spec: %s' % test_yaml)
for ix, spec in enumerate(yaml.load_all(open(test_yaml))):
spec['index'] = ix
name = spec.get('name', spec['coder']['urn'].split(':')[-2])
yield [name, spec]
class StandardCodersTest(unittest.TestCase):
_urn_to_coder_class = {
'beam:coder:bytes:v1': coders.BytesCoder,
'beam:coder:varint:v1': coders.VarIntCoder,
'beam:coder:kv:v1': lambda k, v: coders.TupleCoder((k, v)),
'beam:coder:interval_window:v1': coders.IntervalWindowCoder,
'beam:coder:iterable:v1': lambda t: coders.IterableCoder(t),
'beam:coder:global_window:v1': coders.GlobalWindowCoder,
'beam:coder:windowed_value:v1':
lambda v, w: coders.WindowedValueCoder(v, w)
}
_urn_to_json_value_parser = {
'beam:coder:bytes:v1': lambda x: x,
'beam:coder:varint:v1': lambda x: x,
'beam:coder:kv:v1':
lambda x, key_parser, value_parser: (key_parser(x['key']),
value_parser(x['value'])),
'beam:coder:interval_window:v1':
lambda x: IntervalWindow(
start=Timestamp(micros=(x['end'] - x['span']) * 1000),
end=Timestamp(micros=x['end'] * 1000)),
'beam:coder:iterable:v1': lambda x, parser: list(map(parser, x)),
'beam:coder:global_window:v1': lambda x: window.GlobalWindow(),
'beam:coder:windowed_value:v1':
lambda x, value_parser, window_parser: windowed_value.create(
value_parser(x['value']), x['timestamp'] * 1000,
tuple([window_parser(w) for w in x['windows']]))
}
def test_standard_coders(self):
for name, spec in _load_test_cases(STANDARD_CODERS_YAML):
logging.info('Executing %s test.', name)
self._run_standard_coder(name, spec)
def _run_standard_coder(self, name, spec):
coder = self.parse_coder(spec['coder'])
parse_value = self.json_value_parser(spec['coder'])
nested_list = [spec['nested']] if 'nested' in spec else [True, False]
for nested in nested_list:
for expected_encoded, json_value in spec['examples'].items():
value = parse_value(json_value)
expected_encoded = expected_encoded.encode('latin1')
if not spec['coder'].get('non_deterministic', False):
actual_encoded = encode_nested(coder, value, nested)
if self.fix and actual_encoded != expected_encoded:
self.to_fix[spec['index'], expected_encoded] = actual_encoded
else:
self.assertEqual(expected_encoded, actual_encoded)
self.assertEqual(decode_nested(coder, expected_encoded, nested),
value)
else:
# Only verify decoding for a non-deterministic coder
self.assertEqual(decode_nested(coder, expected_encoded, nested),
value)
def parse_coder(self, spec):
return self._urn_to_coder_class[spec['urn']](
*[self.parse_coder(c) for c in spec.get('components', ())])
def json_value_parser(self, coder_spec):
component_parsers = [
self.json_value_parser(c) for c in coder_spec.get('components', ())]
return lambda x: self._urn_to_json_value_parser[coder_spec['urn']](
x, *component_parsers)
# Used when --fix is passed.
fix = False
to_fix = {}
@classmethod
def tearDownClass(cls):
if cls.fix and cls.to_fix:
print("FIXING", len(cls.to_fix), "TESTS")
doc_sep = '\n---\n'
docs = open(STANDARD_CODERS_YAML).read().split(doc_sep)
def quote(s):
return json.dumps(s.decode('latin1')).replace(r'\u0000', r'\0')
for (doc_ix, expected_encoded), actual_encoded in cls.to_fix.items():
print(quote(expected_encoded), "->", quote(actual_encoded))
docs[doc_ix] = docs[doc_ix].replace(
quote(expected_encoded) + ':', quote(actual_encoded) + ':')
open(STANDARD_CODERS_YAML, 'w').write(doc_sep.join(docs))
def encode_nested(coder, value, nested=True):
out = coder_impl.create_OutputStream()
coder.get_impl().encode_to_stream(value, out, nested)
return out.get()
def decode_nested(coder, encoded, nested=True):
return coder.get_impl().decode_from_stream(
coder_impl.create_InputStream(encoded), nested)
if __name__ == '__main__':
if '--fix' in sys.argv:
StandardCodersTest.fix = True
sys.argv.remove('--fix')
unittest.main()
|
{
"content_hash": "4b862b3b6cebc97711525f76bfc10948",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 79,
"avg_line_length": 37.16197183098591,
"alnum_prop": 0.638809929884404,
"repo_name": "tgroh/incubator-beam",
"id": "f704c490c9753e311455f361ab6740be9fccc6f7",
"size": "6062",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/coders/standard_coders_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22449"
},
{
"name": "Java",
"bytes": "9720078"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
}
|
class RevisionDocumentNotFound(RuntimeError):
pass
def __str__(self):
return self.__class__.__name__
|
{
"content_hash": "e1fd726d2d6093aa6ab6d864b9c28eed",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 45,
"avg_line_length": 23.6,
"alnum_prop": 0.6271186440677966,
"repo_name": "eranroz/revscoring",
"id": "594d6f057449217f41ead9d097cf48f0dd8a668d",
"size": "118",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "revscoring/errors.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "249956"
}
],
"symlink_target": ""
}
|
class NagiosProperties(object):
NUM_COLUMNS = 7
RESOURCE_TYPE = 'resource_type'
RESOURCE_NAME = 'resource_name'
SERVICE = 'service'
STATUS = 'status'
LAST_CHECK = 'last_check'
DURATION = 'duration'
ATTEMPT = 'attempt'
STATUS_INFO = 'status_info'
NAGIOS = 'nagios'
class NagiosTestStatus(object):
OK = 'OK'
WARNING = 'WARNING'
CRITICAL = 'CRITICAL'
UNKNOWN = 'UNKNOWN'
|
{
"content_hash": "14a50e65c1ac969201d8789e6fe22314",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 35,
"avg_line_length": 23.666666666666668,
"alnum_prop": 0.6267605633802817,
"repo_name": "openstack/vitrage",
"id": "e901c6e8ef73de03927029e0e8e94ca2405d78c1",
"size": "1000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vitrage/datasources/nagios/properties.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "26541"
},
{
"name": "Mako",
"bytes": "896"
},
{
"name": "Python",
"bytes": "2074427"
},
{
"name": "Shell",
"bytes": "17668"
}
],
"symlink_target": ""
}
|
"""Command-line skeleton application for the I-Go calendar API.
Usage:
$ python i-go.py
You can also get help on all the command-line flags the program understands
by running:
$ python i-go.py --help
"""
import argparse
import httplib2
import os
import sys
import re
from datetime import datetime as dt
from urllib import quote
from indicoAPI import *
from configobj import ConfigObj
from hashlib import sha256 as sha
from apiclient import discovery
from oauth2client import file
from oauth2client import client
from oauth2client import tools
# Parser for command-line arguments.
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[tools.argparser])
parser.add_argument('-c', '--category',
type=int,
help='Consider only event belonging to the specified category.')
parser.add_argument('-f', '--force-past-update',
action='store_true',
default=False,
help='Try to force events that are in the past with respect to the runtime.')
CLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')
FLOW = client.flow_from_clientsecrets(CLIENT_SECRETS,
scope=[
'https://www.googleapis.com/auth/calendar',
'https://www.googleapis.com/auth/calendar.readonly',
],
message=tools.message_if_missing(CLIENT_SECRETS))
def check_if_updated(indico_event, google_event):
blacklist = ['reminders', 'description']
updated = False
for k in indico_event.keys():
# Skip reminders since they could be ordered differently
if k in blacklist:
continue
if not indico_event[k] == google_event[k]:
updated = True
google_event[k] = indico_event[k]
return (updated, google_event)
def main(argv):
# Parse the command-line flags.
flags = parser.parse_args(argv[1:])
config = ConfigObj('i-go.cfg')
# If the credentials don't exist or are invalid run through the native client
# flow. The Storage object will ensure that if successful the good
# credentials will get written back to the file.
storage = file.Storage('sample.dat')
credentials = storage.get()
if credentials is None or credentials.invalid:
credentials = tools.run_flow(FLOW, storage, flags)
# Create an httplib2.Http object to handle our HTTP requests and authorize it
# with our good Credentials.
http = httplib2.Http()
http = credentials.authorize(http)
# Construct the service object for the interacting with the Calendar API.
service = discovery.build('calendar', 'v3', http=http)
cfg = config.dict()
# The sha of the calendar id is used to 'localize' each event to a
# specific calendar, so that the user can add the same events to
# multiple calendars avoid conflicts in the unique event's id.
google_calendar_id = cfg['GoogleCalendar']['id']
google_calendar_sha = sha(google_calendar_id).hexdigest()
indico_cfg = cfg['IndicoCalendars']
# Get events already present in Google Calendar.
google_events = None
page_token = None
try:
while True:
google_events = service.events().list(calendarId = google_calendar_id,
timeMin = dt.now().strftime('%Y-%m-%dT00:00:01Z'),
showDeleted = True,
pageToken = page_token).execute()
page_token = google_events.get('nextPageToken')
if not page_token:
break
except client.AccessTokenRefreshError:
print ("The credentials have been revoked or expired, please re-run"
"the application to re-authorize")
for c in indico_cfg.keys():
if flags.category and flags.category != int(indico_cfg[c]['cat']):
print("Ignoring category %s" % indico_cfg[c]['cat'])
continue
indico_events = []
print "Querying %s [cat_id %s]" % (indico_cfg[c]['name'], indico_cfg[c]['cat'])
# Get user's selected events from indico
indico_api = IndicoAPI(indico_cfg[c], 'json')
events = indico_api.request()
if events['count'] > 0:
print 'Found %d event(s)' % events['count']
for ev in events['results']:
print 'Studying ... %s' % ev['title']
if indico_cfg[c]['params'].get('detail', None):
if len(ev['contributions']) > 0:
# Indico currently does not apply the 'from' filtering to
# the contribution list, so that we can happen to receive
# events in the past if they belong to a contribution that
# extends in the future. Do a hard-coded check here to
# discard such events.
for contr in ev['contributions']:
try:
if dt.strptime('%sT%s' % (contr['startDate']['date'],
contr['startDate']['time']),
'%Y-%m-%dT%H:%M:%S') < dt.now() and not flags.force_past_update:
print 'Skipping past contribution ... "%s"' % contr['title']
continue
indico_events.append(
indico_api.indico_to_google_format(contr,
google_calendar_sha,
ev['id'], contr['id']))
except:
print "skipping malformed event: ", contr
else:
indico_events.append(
indico_api.indico_to_google_format(ev,
google_calendar_sha,
ev['id']))
for indico_e in indico_events:
new_event = True
for google_e in google_events['items']:
if google_e['iCalUID'] == indico_e['iCalUID']:
new_event = False
(updated, new_google_e) = check_if_updated(indico_e, google_e)
if updated:
new_google_e['sequence'] = google_e['sequence'] + 1
print 'Event "%s" already existing ... updating' % new_google_e['summary']
imported_event = service.events().update(calendarId = google_calendar_id,
eventId = new_google_e['id'],
body = new_google_e).execute()
else:
print "Event NOT updated"
break;
if new_event:
print 'New Event, creating...'
created_event = service.events().insert(calendarId = google_calendar_id,
body=indico_e).execute()
# For more information on the Calendar API you can visit:
#
# https://developers.google.com/google-apps/calendar/firstapp
#
# For more information on the Calendar API Python library surface you
# can visit:
#
# https://developers.google.com/resources/api-libraries/documentation/calendar/v3/python/latest/
#
# For information on the Python Client Library visit:
#
# https://developers.google.com/api-client-library/python/start/get_started
if __name__ == '__main__':
main(sys.argv)
|
{
"content_hash": "376e77f5be94681a0e72e3a9fd13fd5d",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 98,
"avg_line_length": 39.31666666666667,
"alnum_prop": 0.6053412462908012,
"repo_name": "rovere/I-Go",
"id": "8d6bf46f69f4fdd9b76e7931fdc9fd1901b45761",
"size": "7790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i-go.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "12763"
}
],
"symlink_target": ""
}
|
import MySQLdb
class DB_MYSQL :
def __init__(self):
self.conn = None
self.cur = None
def connect(self, host, port, user, passwd, db, charset='utf8') :
self.conn = MySQLdb.connect(host, user, passwd, db, port, charset='utf8')
self.cur = self.conn.cursor()
return True
def execute(self, sql):
return self.cur.execute(sql)
def commit(self):
return self.conn.commit();
def close(self):
self.cur.close()
self.conn.close()
class DB_CONFIG:
host = ip
port = port
user = 'user'
password = 'password'
db = 'db_name'
|
{
"content_hash": "29f6001fc1f786450535d3c0aada8266",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 81,
"avg_line_length": 23.424242424242426,
"alnum_prop": 0.46183699870633893,
"repo_name": "guoyuyanggyy/distributed-transcoding",
"id": "6c0338f3bf1868b5fb94f033baa23b288dcdd95c",
"size": "788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22647"
}
],
"symlink_target": ""
}
|
"""Class to hold all cover accessories."""
import logging
from pyhap.const import CATEGORY_GARAGE_DOOR_OPENER, CATEGORY_WINDOW_COVERING
from homeassistant.components.cover import (
ATTR_CURRENT_POSITION, ATTR_POSITION, DOMAIN, SUPPORT_STOP)
from homeassistant.const import (
ATTR_ENTITY_ID, ATTR_SUPPORTED_FEATURES, SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER, SERVICE_SET_COVER_POSITION, SERVICE_STOP_COVER,
STATE_CLOSED, STATE_OPEN)
from . import TYPES
from .accessories import HomeAccessory, debounce
from .const import (
CHAR_CURRENT_DOOR_STATE, CHAR_CURRENT_POSITION, CHAR_POSITION_STATE,
CHAR_TARGET_DOOR_STATE, CHAR_TARGET_POSITION, SERV_GARAGE_DOOR_OPENER,
SERV_WINDOW_COVERING)
_LOGGER = logging.getLogger(__name__)
@TYPES.register('GarageDoorOpener')
class GarageDoorOpener(HomeAccessory):
"""Generate a Garage Door Opener accessory for a cover entity.
The cover entity must be in the 'garage' device class
and support no more than open, close, and stop.
"""
def __init__(self, *args):
"""Initialize a GarageDoorOpener accessory object."""
super().__init__(*args, category=CATEGORY_GARAGE_DOOR_OPENER)
self._flag_state = False
serv_garage_door = self.add_preload_service(SERV_GARAGE_DOOR_OPENER)
self.char_current_state = serv_garage_door.configure_char(
CHAR_CURRENT_DOOR_STATE, value=0)
self.char_target_state = serv_garage_door.configure_char(
CHAR_TARGET_DOOR_STATE, value=0, setter_callback=self.set_state)
def set_state(self, value):
"""Change garage state if call came from HomeKit."""
_LOGGER.debug('%s: Set state to %d', self.entity_id, value)
self._flag_state = True
params = {ATTR_ENTITY_ID: self.entity_id}
if value == 0:
if self.char_current_state.value != value:
self.char_current_state.set_value(3)
self.call_service(DOMAIN, SERVICE_OPEN_COVER, params)
elif value == 1:
if self.char_current_state.value != value:
self.char_current_state.set_value(2)
self.call_service(DOMAIN, SERVICE_CLOSE_COVER, params)
def update_state(self, new_state):
"""Update cover state after state changed."""
hass_state = new_state.state
if hass_state in (STATE_OPEN, STATE_CLOSED):
current_state = 0 if hass_state == STATE_OPEN else 1
self.char_current_state.set_value(current_state)
if not self._flag_state:
self.char_target_state.set_value(current_state)
self._flag_state = False
@TYPES.register('WindowCovering')
class WindowCovering(HomeAccessory):
"""Generate a Window accessory for a cover entity.
The cover entity must support: set_cover_position.
"""
def __init__(self, *args):
"""Initialize a WindowCovering accessory object."""
super().__init__(*args, category=CATEGORY_WINDOW_COVERING)
self._homekit_target = None
serv_cover = self.add_preload_service(SERV_WINDOW_COVERING)
self.char_current_position = serv_cover.configure_char(
CHAR_CURRENT_POSITION, value=0)
self.char_target_position = serv_cover.configure_char(
CHAR_TARGET_POSITION, value=0, setter_callback=self.move_cover)
@debounce
def move_cover(self, value):
"""Move cover to value if call came from HomeKit."""
_LOGGER.debug('%s: Set position to %d', self.entity_id, value)
self._homekit_target = value
params = {ATTR_ENTITY_ID: self.entity_id, ATTR_POSITION: value}
self.call_service(DOMAIN, SERVICE_SET_COVER_POSITION, params, value)
def update_state(self, new_state):
"""Update cover position after state changed."""
current_position = new_state.attributes.get(ATTR_CURRENT_POSITION)
if isinstance(current_position, int):
self.char_current_position.set_value(current_position)
if self._homekit_target is None or \
abs(current_position - self._homekit_target) < 6:
self.char_target_position.set_value(current_position)
self._homekit_target = None
@TYPES.register('WindowCoveringBasic')
class WindowCoveringBasic(HomeAccessory):
"""Generate a Window accessory for a cover entity.
The cover entity must support: open_cover, close_cover,
stop_cover (optional).
"""
def __init__(self, *args):
"""Initialize a WindowCovering accessory object."""
super().__init__(*args, category=CATEGORY_WINDOW_COVERING)
features = self.hass.states.get(self.entity_id) \
.attributes.get(ATTR_SUPPORTED_FEATURES)
self._supports_stop = features & SUPPORT_STOP
serv_cover = self.add_preload_service(SERV_WINDOW_COVERING)
self.char_current_position = serv_cover.configure_char(
CHAR_CURRENT_POSITION, value=0)
self.char_target_position = serv_cover.configure_char(
CHAR_TARGET_POSITION, value=0, setter_callback=self.move_cover)
self.char_position_state = serv_cover.configure_char(
CHAR_POSITION_STATE, value=2)
@debounce
def move_cover(self, value):
"""Move cover to value if call came from HomeKit."""
_LOGGER.debug('%s: Set position to %d', self.entity_id, value)
if self._supports_stop:
if value > 70:
service, position = (SERVICE_OPEN_COVER, 100)
elif value < 30:
service, position = (SERVICE_CLOSE_COVER, 0)
else:
service, position = (SERVICE_STOP_COVER, 50)
else:
if value >= 50:
service, position = (SERVICE_OPEN_COVER, 100)
else:
service, position = (SERVICE_CLOSE_COVER, 0)
params = {ATTR_ENTITY_ID: self.entity_id}
self.call_service(DOMAIN, service, params)
# Snap the current/target position to the expected final position.
self.char_current_position.set_value(position)
self.char_target_position.set_value(position)
self.char_position_state.set_value(2)
def update_state(self, new_state):
"""Update cover position after state changed."""
position_mapping = {STATE_OPEN: 100, STATE_CLOSED: 0}
hk_position = position_mapping.get(new_state.state)
if hk_position is not None:
self.char_current_position.set_value(hk_position)
self.char_target_position.set_value(hk_position)
self.char_position_state.set_value(2)
|
{
"content_hash": "ed83b306e6090ebcfbbaa148921adfdc",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 77,
"avg_line_length": 40.851851851851855,
"alnum_prop": 0.6441523118766999,
"repo_name": "jamespcole/home-assistant",
"id": "5273480b6cef00d8a09f1ee48429bf518e03d90a",
"size": "6618",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "homeassistant/components/homekit/type_covers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
import os
from tautils import debug_output, error_output
from tapalette import palette_names
from taconstants import ICON_SIZE, CATEGORY_LAYER, TAB_LAYER
from tasprite_factory import SVG, svg_from_file, svg_str_to_pixbuf
from sprites import Sprite
def create_toolbar_background(sprite_list, width):
# Create the toolbar background for the selectors
spr = Sprite(sprite_list, 0, 0,
svg_str_to_pixbuf(SVG().toolbar(2 * width, ICON_SIZE)))
spr.type = 'toolbar'
spr.set_layer(CATEGORY_LAYER)
return spr
class Selector():
''' Selector class abstraction '''
def __init__(self, turtle_window, n):
'''This class handles the display of palette selectors (Only relevant
to GNOME version and very old versions of Sugar).
'''
self.shapes = []
self.spr = None
self._turtle_window = turtle_window
self._index = n
if not n < len(palette_names):
# Shouldn't happen, but hey...
debug_output('palette index %d is out of range' % n,
self._turtle_window.running_sugar)
self._name = 'extras'
else:
self._name = palette_names[n]
icon_pathname = None
for path in self._turtle_window.icon_paths:
if os.path.exists(os.path.join(path, '%soff.svg' % (self._name))):
icon_pathname = os.path.join(path, '%soff.svg' % (self._name))
break
if icon_pathname is not None:
off_shape = svg_str_to_pixbuf(svg_from_file(icon_pathname))
else:
off_shape = svg_str_to_pixbuf(svg_from_file(os.path.join(
self._turtle_window.icon_paths[0], 'extrasoff.svg')))
error_output('Unable to open %soff.svg' % (self._name),
self._turtle_window.running_sugar)
icon_pathname = None
for path in self._turtle_window.icon_paths:
if os.path.exists(os.path.join(path, '%son.svg' % (self._name))):
icon_pathname = os.path.join(path, '%son.svg' % (self._name))
break
if icon_pathname is not None:
on_shape = svg_str_to_pixbuf(svg_from_file(icon_pathname))
else:
on_shape = svg_str_to_pixbuf(svg_from_file(os.path.join(
self._turtle_window.icon_paths[0], 'extrason.svg')))
error_output('Unable to open %son.svg' % (self._name),
self._turtle_window.running_sugar)
self.shapes.append(off_shape)
self.shapes.append(on_shape)
x = int(ICON_SIZE * self._index)
self.spr = Sprite(self._turtle_window.sprite_list, x, 0, off_shape)
self.spr.type = 'selector'
self.spr.name = self._name
self.set_layer()
def set_shape(self, i):
if self.spr is not None and i in [0, 1]:
self.spr.set_shape(self.shapes[i])
def set_layer(self, layer=TAB_LAYER):
if self.spr is not None:
self.spr.set_layer(layer)
def hide(self):
if self.spr is not None:
self.spr.hide()
|
{
"content_hash": "b2a7179fb38ca40e8bd1e39ee66704a9",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 78,
"avg_line_length": 35.46590909090909,
"alnum_prop": 0.5776994553027875,
"repo_name": "walterbender/turtleconfusion",
"id": "87707060a73985c1fc0b0439c5f0bc077b0a2d93",
"size": "4236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TurtleArt/taselector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1298645"
}
],
"symlink_target": ""
}
|
from .visualize import *
from .pp import pp
from .inspect import *
from .expr import *
from .exploration import *
from .util import *
|
{
"content_hash": "1892fe1447d56f2c0362f89e5f60d3c0",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 26,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.7388059701492538,
"repo_name": "axt/angr-utils",
"id": "6dc45e14c7eeb32fa22b9919ff2c2bd2e743d1a7",
"size": "134",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angrutils/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "15374"
}
],
"symlink_target": ""
}
|
from robot.errors import DataError
from robot.utils import (get_error_details, is_string,
split_args_from_name_or_path, type_name, Importer)
from .visitor import SuiteVisitor
class ModelModifier(SuiteVisitor):
def __init__(self, visitors, empty_suite_ok, logger):
self._log_error = logger.error
self._empty_suite_ok = empty_suite_ok
self._visitors = list(self._yield_visitors(visitors))
def visit_suite(self, suite):
for visitor in self._visitors:
try:
suite.visit(visitor)
except:
message, details = get_error_details()
self._log_error("Executing model modifier '%s' failed: %s\n%s"
% (type_name(visitor), message, details))
if not (suite.test_count or self._empty_suite_ok):
raise DataError("Suite '%s' contains no tests after model "
"modifiers." % suite.name)
def _yield_visitors(self, visitors):
importer = Importer('model modifier')
for visitor in visitors:
try:
if not is_string(visitor):
yield visitor
else:
name, args = split_args_from_name_or_path(visitor)
yield importer.import_class_or_module(name, args)
except DataError as err:
self._log_error(err.message)
|
{
"content_hash": "db75e0abf8f37666f2bc3feab44a487e",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 78,
"avg_line_length": 39.08108108108108,
"alnum_prop": 0.5601659751037344,
"repo_name": "jaloren/robotframework",
"id": "9ee8bf5e57ebdb32e4435474e7d3b44c2888e8f7",
"size": "2090",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "src/robot/model/modifier.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "245"
},
{
"name": "CSS",
"bytes": "23490"
},
{
"name": "HTML",
"bytes": "140926"
},
{
"name": "Java",
"bytes": "58264"
},
{
"name": "JavaScript",
"bytes": "160797"
},
{
"name": "Python",
"bytes": "2241544"
},
{
"name": "RobotFramework",
"bytes": "2074646"
},
{
"name": "Shell",
"bytes": "281"
}
],
"symlink_target": ""
}
|
from __future__ import division, unicode_literals
"""
This module implements Compatibility corrections for mixing runs of different
functionals.
"""
import six
from six.moves import filter, map
__author__ = "Shyue Ping Ong, Anubhav Jain, Stephen Dacek, Sai Jayaraman"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__date__ = "Mar 19, 2012"
import os
from collections import defaultdict
from monty.design_patterns import cached_class
from monty.serialization import loadfn
from pymatgen.io.vasp.sets import MITVaspInputSet, MPVaspInputSet
from pymatgen.core.periodic_table import Element
from pymatgen.analysis.structure_analyzer import oxide_type
import abc
class CompatibilityError(Exception):
"""
Exception class for Compatibility. Raised by attempting correction
on incompatible calculation
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class Correction(six.with_metaclass(abc.ABCMeta, object)):
"""
A Correction class is a pre-defined scheme for correction a computed
entry based on the type and chemistry of the structure and the
calculation parameters. All Correction classes must implement a
correct_entry method.
"""
@abc.abstractmethod
def get_correction(self, entry):
"""
Returns correction for a single entry.
Args:
entry: A ComputedEntry object.
Returns:
The energy correction to be applied.
Raises:
CompatibilityError if entry is not compatible.
"""
return
def correct_entry(self, entry):
"""
Corrects a single entry.
Args:
entry: A ComputedEntry object.
Returns:
An processed entry.
Raises:
CompatibilityError if entry is not compatible.
"""
entry.correction += self.get_correction(entry)
return entry
class PotcarCorrection(Correction):
"""
Checks that POTCARs are valid within a pre-defined input set. This
ensures that calculations performed using different InputSets are not
compared against each other.
Entry.parameters must contain a "potcar_symbols" key that is a list of
all POTCARs used in the run. Again, using the example of an Fe2O3 run
using Materials Project parameters, this would look like
entry.parameters["potcar_symbols"] = ['PAW_PBE Fe_pv 06Sep2000',
'PAW_PBE O 08Apr2002'].
Args:
input_set: InputSet object used to generate the runs (used to check
for correct potcar symbols)
check_hash (bool): If true, uses the potcar hash to check for valid
potcars. If false, uses the potcar symbol (Less reliable).
Defaults to True
Raises:
ValueError if entry do not contain "potcar_symbols" key.
CombatibilityError if wrong potcar symbols
"""
def __init__(self, input_set, check_hash=False):
if isinstance(list(input_set.potcar_settings.values())[-1], dict):
if check_hash:
self.valid_potcars = {k: d["hash"] for k, d in
input_set.potcar_settings.items()}
else:
self.valid_potcars = {k: d["symbol"] for k, d in
input_set.potcar_settings.items()}
else:
if check_hash:
raise ValueError('Cannot check hashes of potcars,'
' hashes are not set')
else:
self.valid_potcars = {k: d for k, d in
input_set.potcar_settings.items()}
self.input_set = input_set
self.check_hash = check_hash
def get_correction(self, entry):
if self.check_hash:
if entry.parameters.get("potcar_spec"):
psp_settings = set([d.get("hash")
for d in entry.parameters[
"potcar_spec"] if d])
else:
raise ValueError('Cannot check hash '
'without potcar_spec field')
else:
if entry.parameters.get("potcar_spec"):
psp_settings = set([d.get("titel").split()[1]
for d in entry.parameters[
"potcar_spec"] if d])
else:
psp_settings = set([sym.split()[1]
for sym in entry.parameters[
"potcar_symbols"] if sym])
if {self.valid_potcars[str(el)] for el in
entry.composition.elements} != psp_settings:
raise CompatibilityError('Incompatible potcar')
return 0
def __str__(self):
return "{} Potcar Correction".format(self.input_set.name)
@cached_class
class GasCorrection(Correction):
"""
Correct gas energies to obtain the right formation energies. Note that
this depends on calculations being run within the same input set.
Args:
config_file: Path to the selected compatibility.yaml config file.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
"""
def __init__(self, config_file, correct_peroxide=True):
c = loadfn(config_file)
self.cpd_energies = c['Advanced']['CompoundEnergies']
self.oxide_correction = c['OxideCorrections']
self.name = c['Name']
self.correct_peroxide = correct_peroxide
def get_correction(self, entry):
comp = entry.composition
rform = entry.composition.reduced_formula
if rform in self.cpd_energies:
return self.cpd_energies[rform] * comp.num_atoms \
- entry.uncorrected_energy
correction = 0
#Check for oxide, peroxide, superoxide, and ozonide corrections.
if self.correct_peroxide:
if len(comp) >= 2 and Element("O") in comp:
if entry.data.get("oxide_type"):
if entry.data["oxide_type"] in self.oxide_correction:
ox_corr = self.oxide_correction[
entry.data["oxide_type"]]
correction += ox_corr * comp["O"]
if entry.data["oxide_type"] == "hydroxide":
ox_corr = self.oxide_correction["oxide"]
correction += ox_corr * comp["O"]
elif hasattr(entry, "structure"):
ox_type, nbonds = oxide_type(entry.structure, 1.05,
return_nbonds=True)
if ox_type in self.oxide_correction:
correction += self.oxide_correction[ox_type] * \
nbonds
elif ox_type == "hydroxide":
correction += self.oxide_correction["oxide"] * comp["O"]
else:
if rform in UCorrection.common_peroxides:
correction += self.oxide_correction["peroxide"] * \
comp["O"]
elif rform in UCorrection.common_superoxides:
correction += self.oxide_correction["superoxide"] * \
comp["O"]
elif rform in UCorrection.ozonides:
correction += self.oxide_correction["ozonide"] * \
comp["O"]
elif Element("O") in comp.elements and len(comp.elements)\
> 1:
correction += self.oxide_correction['oxide'] * comp["O"]
else:
correction += self.oxide_correction['oxide'] * comp["O"]
return correction
def __str__(self):
return "{} Gas Correction".format(self.name)
@cached_class
class AqueousCorrection(Correction):
"""
This class implements aqueous phase compound corrections for elements
and H2O.
Args:
config_file: Path to the selected compatibility.yaml config file.
"""
def __init__(self, config_file):
c = loadfn(config_file)
self.cpd_energies = c['AqueousCompoundEnergies']
self.name = c["Name"]
def get_correction(self, entry):
comp = entry.composition
rform = comp.reduced_formula
cpdenergies = self.cpd_energies
correction = 0
if rform in cpdenergies:
if rform in ["H2", "H2O"]:
correction = cpdenergies[rform] * comp.num_atoms \
- entry.uncorrected_energy - entry.correction
else:
correction += cpdenergies[rform] * comp.num_atoms
if not rform == "H2O":
correction += 0.5 * 2.46 * min(comp["H"]/2.0, comp["O"])
return correction
def __str__(self):
return "{} Aqueous Correction".format(self.name)
@cached_class
class UCorrection(Correction):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Entry.parameters must contain a "hubbards" key which is a dict
of all non-zero Hubbard U values used in the calculation. For example,
if you ran a Fe2O3 calculation with Materials Project parameters,
this would look like entry.parameters["hubbards"] = {"Fe": 5.3}
If the "hubbards" key is missing, a GGA run is assumed.
It should be noted that ComputedEntries assimilated using the
pymatgen.apps.borg package and obtained via the MaterialsProject REST
interface using the pymatgen.matproj.rest package will automatically have
these fields populated.
Args:
config_file: Path to the selected compatibility.yaml config file.
input_set: InputSet object (to check for the +U settings)
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
"""
common_peroxides = ["Li2O2", "Na2O2", "K2O2", "Cs2O2", "Rb2O2", "BeO2",
"MgO2", "CaO2", "SrO2", "BaO2"]
common_superoxides = ["LiO2", "NaO2", "KO2", "RbO2", "CsO2"]
ozonides = ["LiO3", "NaO3", "KO3", "NaO5"]
def __init__(self, config_file, input_set, compat_type):
if compat_type not in ['GGA', 'Advanced']:
raise CompatibilityError("Invalid compat_type {}"
.format(compat_type))
c = loadfn(config_file)
self.input_set = input_set
if compat_type == 'Advanced':
self.u_settings = self.input_set.incar_settings["LDAUU"]
self.u_corrections = c["Advanced"]["UCorrections"]
else:
self.u_settings = {}
self.u_corrections = {}
self.name = c["Name"]
self.compat_type = compat_type
def get_correction(self, entry):
if entry.parameters.get("run_type", "GGA") == "HF":
raise CompatibilityError('Invalid run type')
calc_u = entry.parameters.get("hubbards", None)
calc_u = defaultdict(int) if calc_u is None else calc_u
comp = entry.composition
elements = sorted([el for el in comp.elements if comp[el] > 0],
key=lambda el: el.X)
most_electroneg = elements[-1].symbol
correction = 0
ucorr = self.u_corrections.get(most_electroneg, {})
usettings = self.u_settings.get(most_electroneg, {})
for el in comp.elements:
sym = el.symbol
#Check for bad U values
if calc_u.get(sym, 0) != usettings.get(sym, 0):
raise CompatibilityError('Invalid U value on {}'.format(sym))
if sym in ucorr:
correction += float(ucorr[sym]) * comp[el]
return correction
def __str__(self):
return "{} {} Correction".format(self.name, self.compat_type)
class Compatibility(object):
"""
The Compatibility class combines a list of corrections to be applied to
an entry or a set of entries. Note that some of the Corrections have
interdependencies. For example, PotcarCorrection must always be used
before any other compatibility. Also, GasCorrection("MP") must be used
with PotcarCorrection("MP") (similarly with "MIT"). Typically,
you should use the specific MaterialsProjectCompatibility and
MITCompatibility subclasses instead.
Args:
corrections: List of corrections to apply.
"""
def __init__(self, corrections):
self.corrections = corrections
def process_entry(self, entry):
"""
Process a single entry with the chosen Corrections.
Args:
entry: A ComputedEntry object.
Returns:
An adjusted entry if entry is compatible, otherwise None is
returned.
"""
try:
corrections = self.get_corrections_dict(entry)
except CompatibilityError:
return None
entry.correction = sum(corrections.values())
return entry
def get_corrections_dict(self, entry):
"""
Returns the corrections applied to a particular entry.
Args:
entry: A ComputedEntry object.
Returns:
({correction_name: value})
"""
corrections = {}
for c in self.corrections:
val = c.get_correction(entry)
if val != 0:
corrections[str(c)] = val
return corrections
def process_entries(self, entries):
"""
Process a sequence of entries with the chosen Compatibility scheme.
Args:
entries: A sequence of entries.
Returns:
An list of adjusted entries. Entries in the original list which
are not compatible are excluded.
"""
return list(filter(None, map(self.process_entry, entries)))
def get_explanation_dict(self, entry):
"""
Provides an explanation dict of the corrections that are being applied
for a given compatibility scheme. Inspired by the "explain" methods
in many database methodologies.
Args:
entry: A ComputedEntry.
Returns:
(dict) of the form
{"Compatibility": "string",
"Uncorrected_energy": float,
"Corrected_energy": float,
"Corrections": [{"Name of Correction": {
"Value": float, "Explanation": "string"}]}
"""
centry = self.process_entry(entry)
if centry is None:
uncorrected_energy = entry.uncorrected_energy
corrected_energy = None
else:
uncorrected_energy = centry.uncorrected_energy
corrected_energy = centry.energy
d = {"compatibility": self.__class__.__name__,
"uncorrected_energy": uncorrected_energy,
"corrected_energy": corrected_energy}
corrections = []
corr_dict = self.get_corrections_dict(entry)
for c in self.corrections:
cd = {"name": str(c)}
cd["description"] = c.__doc__.split("Args")[0].strip()
cd["value"] = corr_dict.get(str(c), 0)
corrections.append(cd)
d["corrections"] = corrections
return d
def explain(self, entry):
"""
Prints an explanation of the corrections that are being applied for a
given compatibility scheme. Inspired by the "explain" methods in many
database methodologies.
Args:
entry: A ComputedEntry.
"""
d = self.get_explanation_dict(entry)
print("The uncorrected value of the energy of %s is %f eV" % (
entry.composition, d["uncorrected_energy"]))
print("The following corrections / screening are applied for %s:\n" %\
d["compatibility"])
for c in d["corrections"]:
print("%s correction: %s\n" % (c["name"],
c["description"]))
print("For the entry, this correction has the value %f eV." % c[
"value"])
print("-" * 30)
print("The final energy after corrections is %f" % d[
"corrected_energy"])
class MaterialsProjectCompatibility(Compatibility):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MaterialsProject parameters (see pymatgen.io.vaspio_set.MPVaspInputSet).
Using this compatibility scheme on runs with different parameters is not
valid.
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True,
check_potcar_hash=False):
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, "MPCompatibility.yaml")
i_s = MPVaspInputSet()
super(MaterialsProjectCompatibility, self).__init__(
[PotcarCorrection(i_s, check_hash=check_potcar_hash),
GasCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, i_s, compat_type)])
class MITCompatibility(Compatibility):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using
this compatibility scheme on runs with different parameters is not valid.
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True,
check_potcar_hash=False):
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, "MITCompatibility.yaml")
i_s = MITVaspInputSet()
super(MITCompatibility, self).__init__(
[PotcarCorrection(i_s, check_hash=check_potcar_hash),
GasCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, i_s, compat_type)])
class MITAqueousCompatibility(Compatibility):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MIT parameters (see pymatgen.io.vaspio_set MITVaspInputSet). Using
this compatibility scheme on runs with different parameters is not valid.
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True,
check_potcar_hash=False):
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, "MITCompatibility.yaml")
i_s = MITVaspInputSet()
super(MITAqueousCompatibility, self).__init__(
[PotcarCorrection(i_s, check_hash=check_potcar_hash),
GasCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, i_s, compat_type), AqueousCorrection(fp)])
class MaterialsProjectAqueousCompatibility(Compatibility):
"""
This class implements the GGA/GGA+U mixing scheme, which allows mixing of
entries. Note that this should only be used for VASP calculations using the
MaterialsProject parameters (see pymatgen.io.vaspio_set.MPVaspInputSet).
Using this compatibility scheme on runs with different parameters is not
valid.
Args:
compat_type: Two options, GGA or Advanced. GGA means all GGA+U
entries are excluded. Advanced means mixing scheme is
implemented to make entries compatible with each other,
but entries which are supposed to be done in GGA+U will have the
equivalent GGA entries excluded. For example, Fe oxides should
have a U value under the Advanced scheme. A GGA Fe oxide run
will therefore be excluded under the scheme.
correct_peroxide: Specify whether peroxide/superoxide/ozonide
corrections are to be applied or not.
check_potcar_hash (bool): Use potcar hash to verify potcars are correct.
"""
def __init__(self, compat_type="Advanced", correct_peroxide=True,
check_potcar_hash=False):
module_dir = os.path.dirname(os.path.abspath(__file__))
fp = os.path.join(module_dir, "MPCompatibility.yaml")
i_s = MPVaspInputSet()
super(MaterialsProjectAqueousCompatibility, self).__init__(
[PotcarCorrection(i_s, check_hash=check_potcar_hash),
GasCorrection(fp, correct_peroxide=correct_peroxide),
UCorrection(fp, i_s, compat_type), AqueousCorrection(fp)])
|
{
"content_hash": "3efd26d8baf68b3c44d00a2ef1e81a0c",
"timestamp": "",
"source": "github",
"line_count": 589,
"max_line_length": 80,
"avg_line_length": 39.36332767402377,
"alnum_prop": 0.6041406081518222,
"repo_name": "sonium0/pymatgen",
"id": "c1b47a19d3bd86e692315bb743cfee54919e9123",
"size": "23295",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymatgen/entries/compatibility.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Common Lisp",
"bytes": "3029065"
},
{
"name": "Groff",
"bytes": "868"
},
{
"name": "Perl",
"bytes": "229104"
},
{
"name": "Propeller Spin",
"bytes": "4026362"
},
{
"name": "Python",
"bytes": "3590333"
}
],
"symlink_target": ""
}
|
import os
# the blockdevice information about a target
# can be found in directory'/sys/class/iscsi_session/'
#
# iscsi_session
# |
# -----------------------------
# | | ..... |
# session1 session2 ..... sessionN
# |----------
# | |
# device targetname #here check the targetname
# |
# targetNo:0:0
# |
# No:0:0:LunNo
# |
# block
# |
# blockdeviceName
def check_targetname(targetfile_path, target):
targetfile = file(targetfile_path).readlines()
for name in targetfile:
if name.find(target) > -1:
return True
return False
def get_blockdev_by_targetname(target):
path = '/sys/class/iscsi_session/'
for session in os.listdir(path):
if session.find('session') > -1:
path_session = path+session+'/'
if check_targetname(path_session+'targetname', target) is True:
path_session_dev = path_session + 'device/'
for tar in os.listdir(path_session_dev):
if tar.find('target') > -1:
path_session_dev_tar = path_session_dev+tar+'/'
for fin in os.listdir(path_session_dev_tar):
if fin.find(':0:0:') > -1 and fin.find(':0:0:0') is -1:
path_session_dev_tar_final = path_session_dev_tar+fin+'/block/'
while os.path.isdir(path_session_dev_tar_final) is False:
pass
device = os.listdir(path_session_dev_tar_final)
return '/dev/' + device[0];
print 'target not found'
return None
|
{
"content_hash": "b2ce5f66b3075abe3fbee97b72ad5f11",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 71,
"avg_line_length": 29.958333333333332,
"alnum_prop": 0.5959666203059806,
"repo_name": "lihuiba/SoftSAN",
"id": "92ab004db7aa5ac3181392cf8205b8ed31dd7bd6",
"size": "1438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scandev.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "21187"
},
{
"name": "Python",
"bytes": "113482"
},
{
"name": "Shell",
"bytes": "184"
}
],
"symlink_target": ""
}
|
import os
import sys
from inspect import ismethod
from time import sleep
from OSEncryptionState import *
from distutils.version import LooseVersion
class EncryptBlockDeviceState(OSEncryptionState):
def __init__(self, context):
super(EncryptBlockDeviceState, self).__init__('EncryptBlockDeviceState', context)
def should_enter(self):
self.context.logger.log("Verifying if machine should enter encrypt_block_device state")
if not super(EncryptBlockDeviceState, self).should_enter():
return False
self.context.logger.log("Performing enter checks for encrypt_block_device state")
return True
def enter(self):
if not self.should_enter():
return
self.context.logger.log("Entering encrypt_block_device state")
self.command_executor.Execute('mount /boot', False)
# self._find_bek_and_execute_action('_dump_passphrase')
self._find_bek_and_execute_action('_luks_format')
self._find_bek_and_execute_action('_luks_open')
self.context.hutil.do_status_report(operation='EnableEncryptionDataVolumes',
status=CommonVariables.extension_success_status,
status_code=str(CommonVariables.success),
message='OS disk encryption started')
# Enable used space encryption on RHEL 7.3 and above
distro_info = self.context.distro_patcher.distro_info
if LooseVersion(distro_info[1]) >= LooseVersion('7.3'):
self.command_executor.Execute('dd if={0} of=/dev/mapper/osencrypt conv=sparse bs=64K'.format(self.rootfs_block_device), True)
else:
self.command_executor.Execute('dd if={0} of=/dev/mapper/osencrypt bs=52428800'.format(self.rootfs_block_device), True)
def should_exit(self):
self.context.logger.log("Verifying if machine should exit encrypt_block_device state")
if not os.path.exists('/dev/mapper/osencrypt'):
self._find_bek_and_execute_action('_luks_open')
self.command_executor.Execute('mount /dev/mapper/osencrypt /oldroot', True)
self.command_executor.Execute('umount /oldroot', True)
return super(EncryptBlockDeviceState, self).should_exit()
def _luks_format(self, bek_path):
self.command_executor.Execute('mkdir /boot/luks', True)
self.command_executor.Execute('dd if=/dev/zero of=/boot/luks/osluksheader bs=33554432 count=1', True)
self.command_executor.Execute('cryptsetup luksFormat --header /boot/luks/osluksheader -d {0} {1} -q'.format(bek_path,
self.rootfs_block_device),
raise_exception_on_failure=True)
def _luks_open(self, bek_path):
self.command_executor.Execute('cryptsetup luksOpen --header /boot/luks/osluksheader {0} osencrypt -d {1}'.format(self.rootfs_block_device,
bek_path),
raise_exception_on_failure=True)
def _dump_passphrase(self, bek_path):
proc_comm = ProcessCommunicator()
self.command_executor.Execute(command_to_execute="od -c {0}".format(bek_path),
raise_exception_on_failure=True,
communicator=proc_comm)
self.context.logger.log("Passphrase:")
self.context.logger.log(proc_comm.stdout)
def _find_bek_and_execute_action(self, callback_method_name):
callback_method = getattr(self, callback_method_name)
if not ismethod(callback_method):
raise Exception("{0} is not a method".format(callback_method_name))
bek_path = self.bek_util.get_bek_passphrase_file(self.encryption_config)
callback_method(bek_path)
|
{
"content_hash": "16263bc6198813b588adaee2b200c0a0",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 146,
"avg_line_length": 48.392857142857146,
"alnum_prop": 0.601230012300123,
"repo_name": "Azure/azure-linux-extensions",
"id": "362066d511370b6c77a050289f64eba9892f325c",
"size": "4726",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "VMEncryption/main/oscrypto/rhel_72/encryptstates/EncryptBlockDeviceState.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "81542"
},
{
"name": "C++",
"bytes": "1038973"
},
{
"name": "CMake",
"bytes": "11642"
},
{
"name": "Dockerfile",
"bytes": "1539"
},
{
"name": "Go",
"bytes": "136483"
},
{
"name": "HTML",
"bytes": "32736"
},
{
"name": "JavaScript",
"bytes": "22883"
},
{
"name": "Makefile",
"bytes": "11405"
},
{
"name": "PowerShell",
"bytes": "22400"
},
{
"name": "Python",
"bytes": "5124041"
},
{
"name": "Roff",
"bytes": "3827"
},
{
"name": "Shell",
"bytes": "66718"
}
],
"symlink_target": ""
}
|
"""
Update message catalogs for the given doc project.
(currently only available for sphinx projects).
Requires sphinx-intl.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
import qisys.parsers
import qidoc.parsers
def configure_parser(parser):
""" Configure parser for this action """
qisys.parsers.worktree_parser(parser)
qisys.parsers.project_parser(parser)
qidoc.parsers.build_doc_parser(parser)
def do(args):
""" Main entry point """
doc_builder = qidoc.parsers.get_doc_builder(args)
doc_builder.configure()
doc_builder.intl_update()
|
{
"content_hash": "6245e8a7ca6592ab43a1a5ef53c2fd51",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 53,
"avg_line_length": 25.96,
"alnum_prop": 0.7288135593220338,
"repo_name": "aldebaran/qibuild",
"id": "38f94e4a9cf2325b1326c7bc16280ce8cce06975",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/qidoc/actions/intl_update.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "6892"
},
{
"name": "C++",
"bytes": "23130"
},
{
"name": "CMake",
"bytes": "292637"
},
{
"name": "Makefile",
"bytes": "755"
},
{
"name": "Nix",
"bytes": "563"
},
{
"name": "Python",
"bytes": "1581825"
},
{
"name": "SWIG",
"bytes": "306"
},
{
"name": "Shell",
"bytes": "888"
}
],
"symlink_target": ""
}
|
"""
Display and control a Pomodoro countdown.
Button 1 starts/pauses countdown.
Button 2 switch Pomodoro/Break.
Button 3 resets timer.
Configuration parameters:
display_bar: display time in bars when True, otherwise in seconds
format: define custom display format. See placeholders below
format_separator: separator between minutes:seconds
max_breaks: maximum number of breaks
num_progress_bars: number of progress bars
sound_break_end: break end sound (file path) (requires pyglet
or pygame)
sound_pomodoro_end: pomodoro end sound (file path) (requires pyglet
or pygame)
sound_pomodoro_start: pomodoro start sound (file path) (requires pyglet
od pygame)
timer_break: normal break time (seconds)
timer_long_break: long break time (seconds)
timer_pomodoro: pomodoro time (seconds)
Format of status string placeholders:
{bar} display time in bars
{ss} display time in total seconds (1500)
{mm} display time in total minutes (25)
{mmss} display time in (hh-)mm-ss (25:00)
i3status.conf example:
```
pomodoro {
format = "{mmss} {bar}"
}
```
@author Fandekasp (Adrien Lemaire), rixx, FedericoCeratto, schober-ch
"""
from math import ceil
from threading import Timer
from time import time
import os
try:
from pygame import mixer as pygame_mixer
except ImportError:
pygame_mixer = None
try:
import pyglet
except ImportError:
pyglet = None
class Player(object):
_default = '_silence'
def __init__(self):
if pyglet is not None:
pyglet.options['audio'] = ('pulse', 'silent')
self._player = pyglet.media.Player()
self._default = '_pyglet'
elif pygame_mixer is not None:
pygame_mixer.init()
self._default = '_pygame'
def _silence(self, sound_fname):
pass
def _pygame(self, sound_fname):
pygame_mixer.music.load(sound_fname)
pygame_mixer.music.play()
def _pyglet(self, sound_fname):
res_dir, f = os.path.split(sound_fname)
if res_dir not in pyglet.resource.path:
pyglet.resource.path = [res_dir]
pyglet.resource.reindex()
self._player.queue(pyglet.resource.media(f, streaming=False))
self._player.play()
@property
def available(self):
return self._default != '_silence'
def __call__(self, sound_fname):
getattr(self, self._default)(os.path.expanduser(sound_fname))
PROGRESS_BAR_ITEMS = u"▏▎▍▌▋▊▉"
class Py3status:
"""
"""
# available configuration parameters
display_bar = False
format = u'{ss}'
format_separator = u":"
max_breaks = 4
num_progress_bars = 5
sound_break_end = None
sound_pomodoro_end = None
sound_pomodoro_start = None
timer_break = 5 * 60
timer_long_break = 15 * 60
timer_pomodoro = 25 * 60
def __init__(self):
self._initialized = False
def _init(self):
self._break_number = 0
self._active = True
self._running = False
self._time_left = self.timer_pomodoro
self._section_time = self.timer_pomodoro
self._prefix = 'Pomodoro'
self._timer = None
self._end_time = None
self._player = Player()
self._format = 'Pomodoro {time}'
self._alert = False
if self.display_bar is True:
self.format = u'{bar}'
self._initialized = True
def _time_up(self):
self.py3.notify_user('{} time is up !'.format(self._prefix))
self._alert = True
self._advance()
def _advance(self, user_action=False):
self._running = False
if self._active:
if not user_action:
self._play_sound(self.sound_pomodoro_end)
# start break
self._time_left = self.timer_break
self._section_time = self.timer_break
self._break_number += 1
self._format = 'Break #{} {{time}}'.format(self._break_number)
self._prefix = 'Break #{}'.format(self._break_number)
if self._break_number > self.max_breaks:
self._time_left = self.timer_long_break
self._section_time = self.timer_long_break
self._break_number = 0
self._active = False
else:
if not user_action:
self._play_sound(self.sound_break_end)
self._time_left = self.timer_pomodoro
self._section_time = self.timer_pomodoro
self._format = 'Pomodoro {time}'
self._prefix = 'Pomodoro'
self._active = True
def kill(self):
'''
cancel any timer
'''
if self._timer:
self._timer.cancel()
def on_click(self, event):
"""
Handles click events:
- left click starts an inactive counter and pauses a running
Pomodoro
- middle click resets everything
- right click starts (and ends, if needed) a break
"""
if event['button'] == 1:
if self._running:
self._running = False
self._time_left = self._end_time - time()
if self._timer:
self._timer.cancel()
else:
self._running = True
self._end_time = time() + self._time_left
if self._timer:
self._timer.cancel()
self._timer = Timer(self._time_left, self._time_up)
self._timer.start()
if self._active:
self._play_sound(self.sound_pomodoro_start)
elif event['button'] == 2:
# reset
self._init()
if self._timer:
self._timer.cancel()
elif event['button'] == 3:
# advance
self._advance(user_action=True)
if self._timer:
self._timer.cancel()
def _setup_bar(self):
"""
Setup the process bar.
"""
bar = u''
items_cnt = len(PROGRESS_BAR_ITEMS)
bar_val = float(self._time_left) / self._section_time * \
self.num_progress_bars
while bar_val > 0:
selector = int(bar_val * items_cnt)
selector = min(selector, items_cnt - 1)
bar += PROGRESS_BAR_ITEMS[selector]
bar_val -= 1
bar = bar.ljust(self.num_progress_bars)
return bar
def pomodoro(self, i3s_output_list, i3s_config):
"""
Pomodoro response handling and countdown
"""
if not self._initialized:
self._init()
cached_until = 0
if self._running:
self._time_left = ceil(self._end_time - time())
time_left = ceil(self._time_left)
else:
time_left = ceil(self._time_left)
vals = {
'ss': int(time_left),
'mm': int(ceil(time_left / 60)),
}
if '{mmss}' in self.format:
hours, rest = divmod(time_left, 3600)
mins, seconds = divmod(rest, 60)
if hours:
vals['mmss'] = u'%d%s%02d%s%02d' % (hours,
self.format_separator,
mins,
self.format_separator,
seconds)
else:
vals['mmss'] = u'%d%s%02d' % (mins,
self.format_separator,
seconds)
if '{bar}' in self.format:
vals['bar'] = self._setup_bar()
if self._running:
format = u'{{prefix}} [{}]'.format(self.format)
else:
format = u'{{prefix}} ({})'.format(self.format)
cached_until = self.py3.CACHE_FOREVER
response = {
'full_text': format.format(prefix=self._prefix, **vals),
'cached_until': cached_until,
}
if self._alert:
response['urgent'] = True
self._alert = False
if not self._running:
response['color'] = i3s_config['color_bad']
else:
if self._active:
response['color'] = i3s_config['color_good']
else:
response['color'] = i3s_config['color_degraded']
return response
def _play_sound(self, sound_fname):
"""Play sound if required
"""
if not sound_fname:
return
if not self._player.available:
self.py3.log("pomodoro module: the pyglet or pygame "
"library are required to play sounds")
return
try:
self._player(sound_fname)
except Exception:
return
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
{
"content_hash": "9a2a32a281cfd02bc65162e19df06d56",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 75,
"avg_line_length": 29.87828947368421,
"alnum_prop": 0.5286799515578553,
"repo_name": "Spirotot/py3status",
"id": "0e59cc7aefbf5ce9303f2827c786baa552a974e0",
"size": "9121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "py3status/modules/pomodoro.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "334150"
}
],
"symlink_target": ""
}
|
import cStringIO
TOKEN_NUMBER = "TOKEN_NUMBER"
TOKEN_IDENTIFIER = "TOKEN_IDENTIFIER"
TOKEN_STRING = "TOKEN_STRING"
TOKEN_EQUALS = "TOKEN_EQUALS"
TOKEN_COMMENT = "TOKEN_COMMENT"
TOKEN_COMA = "TOKEN_COMA"
TOKEN_SEMICOLON = "TOKEN_SEMICOLON"
TOKEN_OPEN_DICT = "TOKEN_OPEN_DICT"
TOKEN_OPEN_LIST = "TOKEN_OPEN_LIST"
TOKEN_CLOSE_DICT = "TOKEN_CLOSE_DICT"
TOKEN_CLOSE_LIST = "TOKEN_CLOSE_LIST"
TOKEN_ERROR = "TOKEN_ERROR"
TOKEN_END = "TOKEN_END"
def is_delimiter(ch):
return ch in ",/;(){}='\"" or ch.isspace()
class Parser(object):
"""
This class enables the parsing of an plist stream (in the old format) and returns
a dictionary or list object that is at the root of the plist stream.
To use the scanner simply do: ::
obj = Parser().parse(<string_or_stream>)
"""
def parseFile(self, path):
return self.parse(open(path))
def parse(self, string_or_stream):
"""
Parses the input stream and returns a dictionary or list at the root of the stream.
>>> Parser().parse("3")
3
>>> Parser().parse("'abc'")
'abc'
>>> Parser().parse("()")
[]
>>> Parser().parse("(1)")
[1]
>>> Parser().parse("(1;)")
[1]
>>> Parser().parse("(1; 2; 3)")
[1, 2, 3]
>>> Parser().parse("(1; (a; b; c;))")
[1, [a, b, c]]
>>> Parser().parse("{}")
{}
>>> Parser().parse("{'a' = 1;}")
{'a': 1}
>>> Parser().parse("{ident = (1; 2)}")
{ident: [1, 2]}
>>> Parser().parse("{ident1 = 1; ident2 = 2}")
{ident2: 2, ident1: 1}
>>> Parser().parse("{ident = ()}")
{ident: []}
>>> Parser().parse("{ident = hello/world}")
{ident: hello/world}
"""
self.scanner = Scanner(string_or_stream)
self.lookahead = None
self.tokenizer = self.scanner.tokenize()
return self.parse_value()
def next_token(self, peek=False):
out = self.lookahead
if out:
if not peek:
self.lookahead = None
else:
out = self.tokenizer.next()
if peek:
self.lookahead = out
# if not peek: print out
return out
def parse_value(self):
token = self.next_token()
if token.toktype in (TOKEN_NUMBER, TOKEN_STRING, TOKEN_IDENTIFIER):
return token
elif token.toktype == TOKEN_OPEN_LIST:
return self.parse_list()
elif token.toktype == TOKEN_OPEN_DICT:
return self.parse_dict()
else:
self.parse_exception("Invalid token found: %s", token)
def parse_list(self):
out = []
token = self.next_token(peek=True)
while token.toktype != TOKEN_END:
if token.toktype == TOKEN_CLOSE_LIST:
if self.lookahead: # consume the token if it is a lookahead one
self.next_token()
break
out.append(self.parse_value())
token = self.next_token()
if token.toktype not in (TOKEN_CLOSE_LIST, TOKEN_COMA, TOKEN_SEMICOLON):
self.parse_exception("Expected ';', ',' or ')', Found: %s", token)
elif token.toktype in (TOKEN_COMA, TOKEN_SEMICOLON):
token = self.next_token(peek=True)
return out
def parse_dict(self):
out = PlistDict()
token = self.next_token()
while token.toktype != TOKEN_END:
if token.toktype == TOKEN_CLOSE_DICT:
break
elif token.toktype not in (TOKEN_NUMBER, TOKEN_STRING, TOKEN_IDENTIFIER):
self.parse_exception("Expected string or identifier, Found: %s", token)
else:
key = token
token = self.next_token()
if token.toktype != TOKEN_EQUALS:
self.parse_exception("Expected '=', Found: %s", token)
out[key] = self.parse_value()
token = self.next_token()
if token.toktype not in (TOKEN_CLOSE_DICT, TOKEN_COMA, TOKEN_SEMICOLON):
self.parse_exception("Expected ',', ';', or '}', Found: %s", token)
elif token.toktype in (TOKEN_COMA, TOKEN_SEMICOLON):
token = self.next_token()
return out
def parse_exception(self, fmt, *args):
raise ParseException(self.scanner.line, self.scanner.column, fmt, *args)
class ParseException(Exception):
def __init__(self, line, column, fmt, *args):
super(ParseException, self).__init__(("Line: %d, Column: %d, %s" % (line, column, fmt)) % args)
class Scanner(object):
"""
This class enables the tokenization of the old plist formats which are essentially
JSON with comments. To use the scanner simply do: ::
scanner = Scanner(<string_or_stream>)
for token in scanner.tokenize():
user_token(token)
Ideally this class will not have to be called, instead use the Parser
for to return a parsed plist stream.
"""
def __init__(self, string_or_stream):
self.instream = string_or_stream
if type(self.instream) in (str, unicode):
self.instream = cStringIO.StringIO()
self.instream.write(string_or_stream)
self.instream.reset()
self.line = 0
self.column = 0
self.incomment = False
self.started_identifier = False
self.currtoken = ""
def next_char(self):
ch = self.instream.read(1)
if ch == '\n':
self.column = 0
self.line += 1
else:
self.column += 1
return ch
def make_error(self, fmt, *args):
self.started_identifier = False
self.currtoken = ""
return Token(TOKEN_ERROR, (fmt % args))
def tokenize(self):
"""
A scanner for a plist string or stream.
>>> list(Scanner("3").tokenize())
[3, END]
>>> list(Scanner("'3'").tokenize())
['3', END]
>>> list(Scanner("(1)").tokenize())
[OPEN_LIST, 1, CLOSE_LIST, END]
>>> list(Scanner("3 4 5").tokenize())
[3, 4, 5, END]
>>> list(Scanner("3/*asdfasdf*/5").tokenize())
[3, 5, END]
>>> list(Scanner("\\"3/4/5\\"").tokenize())
['3/4/5', END]
>>> list(Scanner("\\\"3/4/5").tokenize())
[ERROR('Missing "')]
>>> list(Scanner("hello/world").tokenize())
[hello/world, END]
"""
parse_failed = False
currchar = self.next_char()
while currchar:
if self.incomment:
if currchar == '*':
nextchar = self.next_char()
if nextchar == '/':
self.incomment = False
else:
if is_delimiter(currchar):
if currchar == '/': # possible start of a comment
nextchar = self.next_char()
if nextchar not in "*/":
# not a comment so as we were
self.currtoken += "/" + nextchar
else: # beginning of a comment
if self.currtoken and self.started_identifier:
self.started_identifier = False
yield self.make_value_token()
if nextchar == '/':
# skip till end of line
self.instream.readline()
self.column = 0
self.line += 1
elif nextchar == '*':
self.incomment = True
elif self.currtoken and self.started_identifier:
self.started_identifier = False
yield self.make_value_token()
if currchar == ';':
yield Token(TOKEN_SEMICOLON)
elif currchar == ',':
yield Token(TOKEN_COMA)
elif currchar == '=':
yield Token(TOKEN_EQUALS)
elif currchar == '{':
yield Token(TOKEN_OPEN_DICT)
elif currchar == '(':
yield Token(TOKEN_OPEN_LIST)
elif currchar == '}':
yield Token(TOKEN_CLOSE_DICT)
elif currchar == ')':
yield Token(TOKEN_CLOSE_LIST)
elif currchar == '"' or currchar == "'": # start of quoted string
startchar = currchar
self.currtoken = currchar
currchar = self.next_char()
while currchar and currchar != startchar:
if currchar == "\\":
self.currtoken += currchar
currchar = self.next_char()
self.currtoken += currchar
currchar = self.next_char()
if currchar != startchar:
yield self.make_error("Missing %s" % startchar)
parse_failed = True
else:
self.currtoken += startchar
yield Token(TOKEN_STRING, eval(self.currtoken))
elif currchar.isspace():
# do nothing
pass
else:
if not self.started_identifier:
self.currtoken = ""
self.started_identifier = True
self.currtoken += currchar
currchar = self.next_char()
if self.currtoken and self.started_identifier:
self.started_identifier = False
yield self.make_value_token()
if not parse_failed:
yield Token(TOKEN_END)
def make_value_token(self):
if False and self.currtoken.isdigit():
return Token(TOKEN_NUMBER, int(self.currtoken))
else:
return Token(TOKEN_IDENTIFIER, self.currtoken)
class Token(object):
def __init__(self, toktype, value=None):
self.toktype = toktype
self.value = value
def __eq__(self, other):
othertype = type(other)
if othertype == Token:
return self.toktype == other.toktype and self.value == other.value
elif othertype in (str, unicode):
return self.value == other
def __hash__(self):
if self.value:
return hash(self.toktype + ":" + self.value)
else:
return hash(self.toktype)
def __repr__(self):
if self.value:
if self.toktype == TOKEN_NUMBER:
return "'%s'" % self.value
elif self.toktype == TOKEN_STRING:
return "'%s'" % self.value
elif self.toktype == TOKEN_ERROR:
return "ERROR('%s')" % self.value
else:
return "%s" % self.value
else:
return "%s" % (self.toktype.replace("TOKEN_", ""))
def startswith(self, prefix): return self.value.startswith(prefix)
def endswith(self, suffix): return self.value.endswith(suffix)
class PlistDict(dict):
"""
Overides to ensure that identifier Tokens are hashable by their value.
"""
def __getitem__(self, key):
if not super(PlistDict, self).__contains__(key):
if type(key) is Token:
key = key.value
elif type(key) in (str, unicode):
key = Token(TOKEN_IDENTIFIER, key)
return super(PlistDict, self).__getitem__(key)
def __contains__(self, key):
keytype = type(key)
contains = super(PlistDict, self).__contains__(key)
if not contains:
if keytype is Token:
contains = super(PlistDict, self).__contains__(key.value)
elif keytype in (str, unicode):
contains = super(PlistDict, self).__contains__(Token(TOKEN_IDENTIFIER, key))
return contains
def strkeys(self):
return [k.value for k in self.keys()]
|
{
"content_hash": "98f5c6db1d4badb7f0d8937336398045",
"timestamp": "",
"source": "github",
"line_count": 364,
"max_line_length": 103,
"avg_line_length": 34.107142857142854,
"alnum_prop": 0.49617398308497784,
"repo_name": "panyam/plists",
"id": "dd5b583585c0d98f6715e051bd0ea4b2c8320c7e",
"size": "12415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plists/v1parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22533"
}
],
"symlink_target": ""
}
|
import jasy.js.parse.Node as Node
import jasy.core.Console as Console
__all__ = ["optimize", "Error"]
#
# Public API
#
class Error(Exception):
def __init__(self, line):
self.__line = line
def optimize(node):
Console.debug("Combining declarations...")
Console.indent()
result = __optimize(node)
Console.outdent()
return result
def __optimize(node):
# stabilize list during processing modifyable stuff
copy = node
if node.type in ("script", "block"):
copy = list(node)
for child in copy:
# None children are allowed sometimes e.g. during array_init like [1,2,,,7,8]
if child != None:
__optimize(child)
if node.type in ("script", "block"):
__combineSiblings(node)
if node.type == "script":
__combineVarStatements(node)
#
# Merge direct variable siblings
#
def __combineSiblings(node):
"""Backwards processing and insertion into previous sibling if both are declarations"""
length = len(node)
pos = length-1
while pos > 0:
child = node[pos]
prevChild = node[pos-1]
# Special FOR loop optimization, emulate faked VAR
if child.type == "for" and prevChild.type == "var":
setup = getattr(child, "setup", None)
if setup and setup.type == "var":
Console.debug("Removing for-loop setup section at line %s" % setup.line)
child.remove(setup)
child = setup
# Combine declarations of VAR statements
if child.type == "var" and prevChild.type == "var":
# debug("Combining var statement at line %s" % child.line)
# Fix loop through casting node to list()
for variable in list(child):
prevChild.append(variable)
if child in node:
node.remove(child)
pos -= 1
#
# Merge var statements, convert in-place to assignments in other locations (quite complex)
#
def __combineVarStatements(node):
"""Top level method called to optimize a script node"""
if len(node.scope.declared) == 0:
return
firstVar = __findFirstVarStatement(node)
# Special case, when a node has variables, but no valid "var" block to hold them
# This happens in cases where there is a for-loop which contains a "var", but
# there are no other variable declarations anywhere. In this case we are not able
# to optimize the code further and just exit at this point
# Only size-saving when there are multiple for-in loops, but no other var statement or first
# "free" var declaration is after for-loops.
if not firstVar:
firstVar = Node.Node(None, "var")
node.insert(0, firstVar)
__patchVarStatements(node, firstVar)
__cleanFirst(firstVar)
# Remove unused "var"
if len(firstVar) == 0:
firstVar.parent.remove(firstVar)
else:
# When there is a classical for loop immediately after our
# first var statement, then we try to move the var declaration
# into there as a setup expression
firstVarParent = firstVar.parent
firstVarPos = firstVarParent.index(firstVar)
if len(firstVarParent) > firstVarPos+1:
possibleForStatement = firstVarParent[firstVarPos+1]
if possibleForStatement.type == "for" and not hasattr(possibleForStatement, "setup"):
possibleForStatement.append(firstVar, "setup")
def __findFirstVarStatement(node):
"""Returns the first var statement of the given node. Ignores inner functions."""
if node.type == "var":
# Ignore variable blocks which are used as an iterator in for-in loops
# In this case we return False, so that a new collector "var" is being created
if getattr(node, "rel", None) == "iterator":
return False
else:
return node
for child in node:
if child.type == "function":
continue
result = __findFirstVarStatement(child)
if result:
return result
elif result is False:
return False
return None
def __cleanFirst(first):
"""
Should remove double declared variables which have no initializer e.g.
var s=3,s,s,t,s; => var s=3,t;
"""
# Add all with initializer first
known = set()
for child in first:
if hasattr(child, "initializer"):
varName = getattr(child, "name", None)
if varName != None:
known.add(varName)
else:
# JS 1.7 Destructing Expression
for varIdentifier in child.names:
known.add(varIdentifier.value)
# Then add all remaining ones which are not added before
# This implementation omits duplicates even if the assignments
# are listed later in the original node.
for child in list(first):
# JS 1.7 Destructing Expression always have a initializer
if not hasattr(child, "initializer"):
if child.name in known:
first.remove(child)
else:
known.add(child.name)
def __createSimpleAssignment(identifier, valueNode):
assignNode = Node.Node(None, "assign")
identNode = Node.Node(None, "identifier")
identNode.value = identifier
assignNode.append(identNode)
assignNode.append(valueNode)
return assignNode
def __createMultiAssignment(names, valueNode):
assignNode = Node.Node(None, "assign")
assignNode.append(names)
assignNode.append(valueNode)
return assignNode
def __createDeclaration(name):
declNode = Node.Node(None, "declaration")
declNode.name = name
declNode.readOnly = False
return declNode
def __createIdentifier(value):
identifier = Node.Node(None, "identifier")
identifier.value = value
return identifier
def __patchVarStatements(node, firstVarStatement):
"""Patches all variable statements in the given node (works recursively) and replace them with assignments."""
if node is firstVarStatement:
return
elif node.type == "function":
# Don't process inner functions/scopes
return
elif node.type == "var":
__rebuildAsAssignment(node, firstVarStatement)
else:
# Recursion into children
# Create a cast to list() to keep loop stable during modification
for child in list(node):
__patchVarStatements(child, firstVarStatement)
def __rebuildAsAssignment(node, firstVarStatement):
"""Rebuilds the items of a var statement into a assignment list and moves declarations to the given var statement"""
assignment = Node.Node(node.tokenizer, "semicolon")
assignmentList = Node.Node(node.tokenizer, "comma")
assignment.append(assignmentList, "expression")
# Casting to list() creates a copy during the process (keeps loop stable)
for child in list(node):
if hasattr(child, "name"):
# Cleanup initializer and move to assignment
if hasattr(child, "initializer"):
assign = __createSimpleAssignment(child.name, child.initializer)
assignmentList.append(assign)
firstVarStatement.append(child)
else:
# JS 1.7 Destructing Expression
for identifier in child.names:
firstVarStatement.append(__createDeclaration(identifier.value))
if hasattr(child, "initializer"):
assign = __createMultiAssignment(child.names, child.initializer)
assignmentList.append(assign)
node.remove(child)
# Patch parent node to contain assignment instead of declaration
if len(assignmentList) > 0:
node.parent.replace(node, assignment)
# Special process for "for-in" loops
# It is OK to be second because of assignments are not allowed at
# all in for-in loops and so the first if basically does nothing
# for these kind of statements.
elif getattr(node, "rel", None) == "iterator":
if hasattr(child, "name"):
node.parent.replace(node, __createIdentifier(child.name))
else:
# JS 1.7 Destructing Expressions
node.parent.replace(node, child.names)
# Edge case. Not yet found if this happen realistically
else:
if hasattr(node, "rel"):
Console.warn("Remove related node (%s) from parent: %s" % (node.rel, node))
node.parent.remove(node)
# Minor post-cleanup. Remove useless comma statement when only one expression is the result
if len(assignmentList) == 1:
assignment.replace(assignmentList, assignmentList[0])
|
{
"content_hash": "4ee2595ef13fc6f0d043efa8da8bc00e",
"timestamp": "",
"source": "github",
"line_count": 280,
"max_line_length": 120,
"avg_line_length": 32.18928571428572,
"alnum_prop": 0.61322534117386,
"repo_name": "zynga/jasy",
"id": "582057f54d7b23c5bed9022fb77bb41091189939",
"size": "9082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jasy/js/optimize/CombineDeclarations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "913"
},
{
"name": "Python",
"bytes": "771966"
},
{
"name": "Shell",
"bytes": "7530"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from niftynet.utilities.download import download
from niftynet.utilities.niftynet_global_config import NiftyNetGlobalConfig
from tests.niftynet_testcase import NiftyNetTestCase
MODEL_HOME = NiftyNetGlobalConfig().get_niftynet_home_folder()
TEST_CASE_1 = 'dense_vnet_abdominal_ct_model_zoo'
TEST_CASE_1_TARGET = os.path.join(
MODEL_HOME, 'models', 'dense_vnet_abdominal_ct')
TEST_CASE_2 = 'default'
TEST_CASE_2_TARGET = os.path.join(MODEL_HOME, 'examples', TEST_CASE_2)
TEST_CASE_3 = 'default_multimodal'
TEST_CASE_3_TARGET = os.path.join(MODEL_HOME, 'examples', TEST_CASE_3)
TEST_WRONG_ID = '42'
class NetDownloadTest(NiftyNetTestCase):
def test_download(self):
self.assertTrue(download(TEST_CASE_1, False))
self.assertTrue(os.path.isdir(TEST_CASE_1_TARGET))
if os.path.isdir(TEST_CASE_1_TARGET):
print('skipping tests: %s folder exists' % TEST_CASE_1_TARGET)
else:
self.assertTrue(download(TEST_CASE_1, True))
self.assertTrue(os.path.isdir(TEST_CASE_1_TARGET))
def test_wrong_ids(self):
self.assertFalse(download([], False))
self.assertFalse(download((), False))
self.assertFalse(download(None, False))
self.assertFalse(download([], True))
self.assertFalse(download((), True))
self.assertFalse(download(None, True))
self.assertFalse(download(TEST_WRONG_ID, True))
self.assertFalse(download(TEST_WRONG_ID, False))
def test_multiple_ids(self):
self.assertTrue(
download([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3], False))
self.assertTrue(os.path.isdir(TEST_CASE_1_TARGET))
self.assertTrue(os.path.isdir(TEST_CASE_2_TARGET))
self.assertTrue(os.path.isdir(TEST_CASE_3_TARGET))
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "6a360e54084969336b6f8be7fa55926f",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 74,
"avg_line_length": 35.089285714285715,
"alnum_prop": 0.6824427480916031,
"repo_name": "NifTK/NiftyNet",
"id": "5dca36af90fcc7e8a05afcd27e600239a7ce60ab",
"size": "1989",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "tests/download_test.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "381956"
},
{
"name": "C++",
"bytes": "182582"
},
{
"name": "CMake",
"bytes": "3500"
},
{
"name": "Cuda",
"bytes": "69664"
},
{
"name": "Python",
"bytes": "2340002"
},
{
"name": "Shell",
"bytes": "1792"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render
import urllib.request, json
from django.http import HttpResponse, JsonResponse
def _get_request(url):
req = urllib.request.Request(url)
resp_json = urllib.request.urlopen(req).read().decode('utf-8')
return json.loads(resp_json)
def _success(stat, data):
return JsonResponse({ 'stat': stat, **data })
def index(request):
ret = {}
top = []
for i in range(1, 18):
article = _get_request('http://models-api:8000/api/article?id='+str(i))
news = {}
news["title"] = article["title"]
news["author"] = article["author"]
news["link"] = "/doc/"+str(i)
intro = article["content"][0]
news["id"] = i%5
for j in article['images']:
if j:
news['image']=j["src"]
break
if len(intro) > 60 :
intro = intro[:60] + '...'
news["intro"] = intro
news["readingtime"] = article["num_sentences"]//6
top.append(news)
ret["news"] = top
return render(request, 'homePage.html', ret)
def docs(request, doc_id):
link = "http://models-api:8000/api/article?id=" + str(doc_id)
next_id = int(doc_id) + 1
d = _get_request(link)
if next_id > 97:
next_link = None
next_title = None
else :
nextlink = "http://models-api:8000/api/article?id=" + str(next_id)
n = _get_request(nextlink)
next_title = n['title']
next_link = str(next_id)
ret = {'title': d['title'], 'author': d['author'], 'next_link': next_link, 'next_title': next_title, 'body': []}
num_comments=0
for i in range(len(d['content'])):
if d['comments'][i]:
short_str = d['comments'][i]['content']
pos = short_str.find(' ', 140)
if pos > 0: short_str = short_str[:pos] + ' ...[more]'
short_dict = {'content': short_str}
num_comments+=1
else:
short_dict = None
collide = False
if d['images'][i]:
collide = (i >= 2 and d['comments'][i-2]) \
or (i >= 1 and d['comments'][i-1]) \
or d['comments'][i] \
or (i+1 < len(d['content']) and d['comments'][i+1]) \
or (i+2 < len(d['content']) and d['comments'][i+2])
ret['body'].append({
'collide':collide,
'content': d['content'][i],
'style': d['style'][i],
'comments': d['comments'][i],
'comments_short': short_dict,
'images': d['images'][i],
'pos': i+1,
})
ret["num_comment"]=num_comments
return render(request, 'articlePage.html', ret)
|
{
"content_hash": "1fa841e19fd45e379cd1c3b57e7fee79",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 118,
"avg_line_length": 30.066666666666666,
"alnum_prop": 0.5070214338507022,
"repo_name": "shadowgamefly/news-Digest",
"id": "e0339af095122daf077129bdbaa097687d21880a",
"size": "2706",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/web/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "406944"
},
{
"name": "HTML",
"bytes": "82418"
},
{
"name": "Java",
"bytes": "2013199"
},
{
"name": "JavaScript",
"bytes": "2896870"
},
{
"name": "Python",
"bytes": "63940"
},
{
"name": "Shell",
"bytes": "1029"
}
],
"symlink_target": ""
}
|
import sys
from ged4py.parser import GedcomReader
# open GEDCOM file
with GedcomReader(sys.argv[1]) as parser:
# iterate over each INDI record in a file
for i, indi in enumerate(parser.records0("INDI")):
# Print a name (one of many possible representations)
print(f"{i}: {indi.name.format()}")
father = indi.father
if father:
print(f" father: {father.name.format()}")
mother = indi.mother
if mother:
print(f" mother: {mother.name.format()}")
# Get _value_ of the BIRT/DATE tag
birth_date = indi.sub_tag_value("BIRT/DATE")
if birth_date:
print(f" birth date: {birth_date}")
# Get _value_ of the BIRT/PLAC tag
birth_place = indi.sub_tag_value("BIRT/PLAC")
if birth_place:
print(f" birth place: {birth_place}")
|
{
"content_hash": "294978e2be6e01d085eddce341e2b29c",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 61,
"avg_line_length": 32.55555555555556,
"alnum_prop": 0.5813424345847554,
"repo_name": "andy-z/ged4py",
"id": "a103af8fa9e2ec733e6807cc3a1d6aeb637eccb5",
"size": "879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/example_code/example1.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2283"
},
{
"name": "Python",
"bytes": "118486"
}
],
"symlink_target": ""
}
|
import logging
import socket
import sys
try:
from urllib2 import Request, urlopen
except ImportError:
from urllib.request import Request, urlopen
from opbeat.conf import defaults
from opbeat.contrib.async_worker import AsyncWorker
from opbeat.transport.base import Transport, AsyncTransport, TransportException
from opbeat.utils import six
from opbeat.utils.compat import HTTPError
logger = logging.getLogger('opbeat')
class HTTPTransport(Transport):
scheme = ['http', 'https']
def __init__(self, parsed_url):
self.check_scheme(parsed_url)
self._parsed_url = parsed_url
self._url = parsed_url.geturl()
def send(self, data, headers, timeout=None):
"""
Sends a request to a remote webserver using HTTP POST.
"""
req = Request(self._url, headers=headers)
if timeout is None:
timeout = defaults.TIMEOUT
try:
try:
response = urlopen(req, data, timeout)
except TypeError:
response = urlopen(req, data)
except Exception as e:
if isinstance(e, socket.timeout):
message = (
"Connection to Opbeat server timed out "
"(url: %s, timeout: %d seconds)" % (self._url, timeout)
)
elif isinstance(e, HTTPError):
body = e.read()
message = (
'Unable to reach Opbeat server: '
'%s (url: %s, body: %s)' % (e, self._url, body)
)
else:
message = 'Unable to reach Opbeat server: %s (url: %s)' % (
e, self._url
)
raise TransportException(message, data)
return response
class AsyncHTTPTransport(AsyncTransport, HTTPTransport):
scheme = ['http', 'https']
async_mode = True
def __init__(self, parsed_url):
super(AsyncHTTPTransport, self).__init__(parsed_url)
if self._url.startswith('async+'):
self._url = self._url[6:]
self._worker = None
@property
def worker(self):
if not self._worker or not self._worker.is_alive():
self._worker = AsyncWorker()
return self._worker
def send_sync(self, data=None, headers=None, success_callback=None,
fail_callback=None):
try:
response = HTTPTransport.send(self, data, headers)
if callable(success_callback):
success_callback(url=response.info().get('Location'))
except Exception as e:
if callable(fail_callback):
fail_callback(exception=e)
def send_async(self, data, headers, success_callback=None,
fail_callback=None):
kwargs = {
'data': data,
'headers': headers,
'success_callback': success_callback,
'fail_callback': fail_callback,
}
self.worker.queue(self.send_sync, kwargs)
def close(self):
if self._worker:
self._worker.main_thread_terminated()
|
{
"content_hash": "3ff573f8ac8d33af9f97c6372db3bcc9",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 79,
"avg_line_length": 31.525252525252526,
"alnum_prop": 0.5629605895546299,
"repo_name": "daikeren/opbeat_python",
"id": "806a2bf3e7430fc69fab8c5d9c7d6cc3f2aba549",
"size": "3145",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "opbeat/transport/http.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "81877"
},
{
"name": "HTML",
"bytes": "284"
},
{
"name": "Makefile",
"bytes": "135"
},
{
"name": "Python",
"bytes": "407176"
}
],
"symlink_target": ""
}
|
"""
Images Pipeline
See documentation in topics/images.rst
"""
import os
import time
import hashlib
import urlparse
import rfc822
from cStringIO import StringIO
from collections import defaultdict
from twisted.internet import defer, threads
from PIL import Image
from scrapy import log
from scrapy.utils.misc import md5sum
from scrapy.http import Request
from scrapy.exceptions import DropItem, NotConfigured, IgnoreRequest
from scrapy.contrib.pipeline.media import MediaPipeline
class NoimagesDrop(DropItem):
"""Product with no images exception"""
class ImageException(Exception):
"""General image error exception"""
class FSImagesStore(object):
def __init__(self, basedir):
if '://' in basedir:
basedir = basedir.split('://', 1)[1]
self.basedir = basedir
self._mkdir(self.basedir)
self.created_directories = defaultdict(set)
def persist_image(self, key, image, buf, info):
absolute_path = self._get_filesystem_path(key)
self._mkdir(os.path.dirname(absolute_path), info)
image.save(absolute_path)
def stat_image(self, key, info):
absolute_path = self._get_filesystem_path(key)
try:
last_modified = os.path.getmtime(absolute_path)
except: # FIXME: catching everything!
return {}
with open(absolute_path, 'rb') as imagefile:
checksum = md5sum(imagefile)
return {'last_modified': last_modified, 'checksum': checksum}
def _get_filesystem_path(self, key):
path_comps = key.split('/')
return os.path.join(self.basedir, *path_comps)
def _mkdir(self, dirname, domain=None):
seen = self.created_directories[domain] if domain else set()
if dirname not in seen:
if not os.path.exists(dirname):
os.makedirs(dirname)
seen.add(dirname)
class S3ImagesStore(object):
AWS_ACCESS_KEY_ID = None
AWS_SECRET_ACCESS_KEY = None
POLICY = 'public-read'
HEADERS = {
'Cache-Control': 'max-age=172800',
'Content-Type': 'image/jpeg',
}
def __init__(self, uri):
assert uri.startswith('s3://')
self.bucket, self.prefix = uri[5:].split('/', 1)
def stat_image(self, key, info):
def _onsuccess(boto_key):
checksum = boto_key.etag.strip('"')
last_modified = boto_key.last_modified
modified_tuple = rfc822.parsedate_tz(last_modified)
modified_stamp = int(rfc822.mktime_tz(modified_tuple))
return {'checksum': checksum, 'last_modified': modified_stamp}
return self._get_boto_key(key).addCallback(_onsuccess)
def _get_boto_bucket(self):
from boto.s3.connection import S3Connection
# disable ssl (is_secure=False) because of this python bug:
# http://bugs.python.org/issue5103
c = S3Connection(self.AWS_ACCESS_KEY_ID, self.AWS_SECRET_ACCESS_KEY, is_secure=False)
return c.get_bucket(self.bucket, validate=False)
def _get_boto_key(self, key):
b = self._get_boto_bucket()
key_name = '%s%s' % (self.prefix, key)
return threads.deferToThread(b.get_key, key_name)
def persist_image(self, key, image, buf, info):
"""Upload image to S3 storage"""
width, height = image.size
b = self._get_boto_bucket()
key_name = '%s%s' % (self.prefix, key)
k = b.new_key(key_name)
k.set_metadata('width', str(width))
k.set_metadata('height', str(height))
buf.seek(0)
return threads.deferToThread(k.set_contents_from_file, buf,
headers=self.HEADERS, policy=self.POLICY)
class ImagesPipeline(MediaPipeline):
"""Abstract pipeline that implement the image downloading and thumbnail generation logic
This pipeline tries to minimize network transfers and image processing,
doing stat of the images and determining if image is new, uptodate or
expired.
`new` images are those that pipeline never processed and needs to be
downloaded from supplier site the first time.
`uptodate` images are the ones that the pipeline processed and are still
valid images.
`expired` images are those that pipeline already processed but the last
modification was made long time ago, so a reprocessing is recommended to
refresh it in case of change.
"""
MEDIA_NAME = 'image'
MIN_WIDTH = 0
MIN_HEIGHT = 0
EXPIRES = 90
THUMBS = {}
STORE_SCHEMES = {
'': FSImagesStore,
'file': FSImagesStore,
's3': S3ImagesStore,
}
def __init__(self, store_uri, download_func=None):
if not store_uri:
raise NotConfigured
self.store = self._get_store(store_uri)
super(ImagesPipeline, self).__init__(download_func=download_func)
@classmethod
def from_settings(cls, settings):
cls.MIN_WIDTH = settings.getint('IMAGES_MIN_WIDTH', 0)
cls.MIN_HEIGHT = settings.getint('IMAGES_MIN_HEIGHT', 0)
cls.EXPIRES = settings.getint('IMAGES_EXPIRES', 90)
cls.THUMBS = settings.get('IMAGES_THUMBS', {})
s3store = cls.STORE_SCHEMES['s3']
s3store.AWS_ACCESS_KEY_ID = settings['AWS_ACCESS_KEY_ID']
s3store.AWS_SECRET_ACCESS_KEY = settings['AWS_SECRET_ACCESS_KEY']
store_uri = settings['IMAGES_STORE']
return cls(store_uri)
def _get_store(self, uri):
if os.path.isabs(uri): # to support win32 paths like: C:\\some\dir
scheme = 'file'
else:
scheme = urlparse.urlparse(uri).scheme
store_cls = self.STORE_SCHEMES[scheme]
return store_cls(uri)
def media_downloaded(self, response, request, info):
referer = request.headers.get('Referer')
if response.status != 200:
log.msg(format='Image (code: %(status)s): Error downloading image from %(request)s referred in <%(referer)s>',
level=log.WARNING, spider=info.spider,
status=response.status, request=request, referer=referer)
raise ImageException('download-error')
if not response.body:
log.msg(format='Image (empty-content): Empty image from %(request)s referred in <%(referer)s>: no-content',
level=log.WARNING, spider=info.spider,
request=request, referer=referer)
raise ImageException('empty-content')
status = 'cached' if 'cached' in response.flags else 'downloaded'
log.msg(format='Image (%(status)s): Downloaded image from %(request)s referred in <%(referer)s>',
level=log.DEBUG, spider=info.spider,
status=status, request=request, referer=referer)
self.inc_stats(info.spider, status)
try:
key = self.image_key(request.url)
checksum = self.image_downloaded(response, request, info)
except ImageException as exc:
whyfmt = 'Image (error): Error processing image from %(request)s referred in <%(referer)s>: %(errormsg)s'
log.msg(format=whyfmt, level=log.WARNING, spider=info.spider,
request=request, referer=referer, errormsg=str(exc))
raise
except Exception as exc:
whyfmt = 'Image (unknown-error): Error processing image from %(request)s referred in <%(referer)s>'
log.err(None, whyfmt % {'request': request, 'referer': referer}, spider=info.spider)
raise ImageException(str(exc))
return {'url': request.url, 'path': key, 'checksum': checksum}
def media_failed(self, failure, request, info):
if not isinstance(failure.value, IgnoreRequest):
referer = request.headers.get('Referer')
log.msg(format='Image (unknown-error): Error downloading '
'%(medianame)s from %(request)s referred in '
'<%(referer)s>: %(exception)s',
level=log.WARNING, spider=info.spider, exception=failure.value,
medianame=self.MEDIA_NAME, request=request, referer=referer)
raise ImageException
def media_to_download(self, request, info):
def _onsuccess(result):
if not result:
return # returning None force download
last_modified = result.get('last_modified', None)
if not last_modified:
return # returning None force download
age_seconds = time.time() - last_modified
age_days = age_seconds / 60 / 60 / 24
if age_days > self.EXPIRES:
return # returning None force download
referer = request.headers.get('Referer')
log.msg(format='Image (uptodate): Downloaded %(medianame)s from %(request)s referred in <%(referer)s>',
level=log.DEBUG, spider=info.spider,
medianame=self.MEDIA_NAME, request=request, referer=referer)
self.inc_stats(info.spider, 'uptodate')
checksum = result.get('checksum', None)
return {'url': request.url, 'path': key, 'checksum': checksum}
key = self.image_key(request.url)
dfd = defer.maybeDeferred(self.store.stat_image, key, info)
dfd.addCallbacks(_onsuccess, lambda _: None)
dfd.addErrback(log.err, self.__class__.__name__ + '.store.stat_image')
return dfd
def image_downloaded(self, response, request, info):
checksum = None
for key, image, buf in self.get_images(response, request, info):
if checksum is None:
buf.seek(0)
checksum = md5sum(buf)
self.store.persist_image(key, image, buf, info)
return checksum
def get_images(self, response, request, info):
key = self.image_key(request.url)
orig_image = Image.open(StringIO(response.body))
width, height = orig_image.size
if width < self.MIN_WIDTH or height < self.MIN_HEIGHT:
raise ImageException("Image too small (%dx%d < %dx%d)" %
(width, height, self.MIN_WIDTH, self.MIN_HEIGHT))
image, buf = self.convert_image(orig_image)
yield key, image, buf
for thumb_id, size in self.THUMBS.iteritems():
thumb_key = self.thumb_key(request.url, thumb_id)
thumb_image, thumb_buf = self.convert_image(image, size)
yield thumb_key, thumb_image, thumb_buf
def inc_stats(self, spider, status):
spider.crawler.stats.inc_value('image_count', spider=spider)
spider.crawler.stats.inc_value('image_status_count/%s' % status, spider=spider)
def convert_image(self, image, size=None):
if image.format == 'PNG' and image.mode == 'RGBA':
background = Image.new('RGBA', image.size, (255, 255, 255))
background.paste(image, image)
image = background.convert('RGB')
elif image.mode != 'RGB':
image = image.convert('RGB')
if size:
image = image.copy()
image.thumbnail(size, Image.ANTIALIAS)
buf = StringIO()
image.save(buf, 'JPEG')
return image, buf
def image_key(self, url):
image_guid = hashlib.sha1(url).hexdigest()
return 'full/%s.jpg' % (image_guid)
def thumb_key(self, url, thumb_id):
image_guid = hashlib.sha1(url).hexdigest()
return 'thumbs/%s/%s.jpg' % (thumb_id, image_guid)
def get_media_requests(self, item, info):
return [Request(x) for x in item.get('image_urls', [])]
def item_completed(self, results, item, info):
if 'images' in item.fields:
item['images'] = [x for ok, x in results if ok]
return item
|
{
"content_hash": "be3e2d63bbc676d33b8386919b34776e",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 122,
"avg_line_length": 37.73482428115016,
"alnum_prop": 0.6111252222504445,
"repo_name": "gnemoug/scrapy",
"id": "4ccc8e28241c1db467e730bbec2db43e238e5992",
"size": "11811",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scrapy/contrib/pipeline/images.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "841071"
},
{
"name": "Shell",
"bytes": "1733"
}
],
"symlink_target": ""
}
|
import pkgutil
def test_imports():
"""
This just imports all modules in astropy, making sure they don't have any
dependencies that sneak through
"""
def onerror(name):
# We should raise any legitimate error that occurred, but not
# any warnings which happen to be caught because of our pytest
# settings (e.g., DeprecationWarning).
try:
raise
except Warning:
pass
for imper, nm, ispkg in pkgutil.walk_packages(['astropy'], 'astropy.',
onerror=onerror):
imper.find_spec(nm)
def test_toplevel_namespace():
import astropy
d = dir(astropy)
assert 'os' not in d
assert 'log' in d
assert 'test' in d
assert 'sys' not in d
|
{
"content_hash": "ef29bb1e37fb94192763534f3f05d5fd",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 77,
"avg_line_length": 27.275862068965516,
"alnum_prop": 0.5828065739570164,
"repo_name": "lpsinger/astropy",
"id": "d13c956f0f94ee98a0423e1d98653d8ae27e47a2",
"size": "856",
"binary": false,
"copies": "4",
"ref": "refs/heads/main",
"path": "astropy/tests/tests/test_imports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "11040074"
},
{
"name": "C++",
"bytes": "47001"
},
{
"name": "Cython",
"bytes": "78755"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Lex",
"bytes": "183333"
},
{
"name": "M4",
"bytes": "18757"
},
{
"name": "Makefile",
"bytes": "52508"
},
{
"name": "Python",
"bytes": "12323563"
},
{
"name": "Shell",
"bytes": "17024"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
"""Abstract Base Classes (ABCs) according to PEP 3119."""
import types
from _weakrefset import WeakSet
# Instance of old-style class
class _C: pass
_InstanceType = type(_C())
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is ABCMeta or derived from it. A
class that has a metaclass derived from ABCMeta cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
class C:
__metaclass__ = ABCMeta
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
class C:
__metaclass__ = ABCMeta
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class ABCMeta(type):
"""Metaclass for defining Abstract Base Classes (ABCs).
Use this metaclass to create an ABC. An ABC can be subclassed
directly, and then acts as a mix-in class. You can also register
unrelated concrete classes (even built-in classes) and unrelated
ABCs as 'virtual subclasses' -- these and their descendants will
be considered subclasses of the registering ABC by the built-in
issubclass() function, but the registering ABC won't show up in
their MRO (Method Resolution Order) nor will method
implementations defined by the registering ABC be callable (not
even via super()).
"""
# A global counter that is incremented each time a class is
# registered as a virtual subclass of anything. It forces the
# negative cache to be cleared before its next use.
_abc_invalidation_counter = 0
def __new__(mcls, name, bases, namespace):
cls = super(ABCMeta, mcls).__new__(mcls, name, bases, namespace)
# Compute set of abstract method names
abstracts = set(name
for name, value in namespace.items()
if getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
# Set up inheritance registry
cls._abc_registry = WeakSet()
cls._abc_cache = WeakSet()
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
return cls
def register(cls, subclass):
"""Register a virtual subclass of an ABC."""
if not isinstance(subclass, (type, types.ClassType)):
raise TypeError("Can only register classes")
if issubclass(subclass, cls):
return # Already a subclass
# Subtle: test for cycles *after* testing for "already a subclass";
# this means we allow X.register(X) and interpret it as a no-op.
if issubclass(cls, subclass):
# This would create a cycle, which is bad for the algorithm below
raise RuntimeError("Refusing to create an inheritance cycle")
cls._abc_registry.add(subclass)
ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache
def _dump_registry(cls, file=None):
"""Debug helper to print the ABC registry."""
print >> file, "Class: %s.%s" % (cls.__module__, cls.__name__)
print >> file, "Inv.counter: %s" % ABCMeta._abc_invalidation_counter
for name in sorted(cls.__dict__.keys()):
if name.startswith("_abc_"):
value = getattr(cls, name)
print >> file, "%s: %r" % (name, value)
def __instancecheck__(cls, instance):
"""Override for isinstance(instance, cls)."""
# Inline the cache checking when it's simple.
subclass = getattr(instance, '__class__', None)
if subclass is not None and subclass in cls._abc_cache:
return True
subtype = type(instance)
# Old-style instances
if subtype is _InstanceType:
subtype = subclass
if subtype is subclass or subclass is None:
if (cls._abc_negative_cache_version ==
ABCMeta._abc_invalidation_counter and
subtype in cls._abc_negative_cache):
return False
# Fall back to the subclass check.
return cls.__subclasscheck__(subtype)
return (cls.__subclasscheck__(subclass) or
cls.__subclasscheck__(subtype))
def __subclasscheck__(cls, subclass):
"""Override for issubclass(subclass, cls)."""
# Check cache
if subclass in cls._abc_cache:
return True
# Check negative cache; may have to invalidate
if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter:
# Invalidate the negative cache
cls._abc_negative_cache = WeakSet()
cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter
elif subclass in cls._abc_negative_cache:
return False
# Check the subclass hook
ok = cls.__subclasshook__(subclass)
if ok is not NotImplemented:
assert isinstance(ok, bool)
if ok:
cls._abc_cache.add(subclass)
else:
cls._abc_negative_cache.add(subclass)
return ok
# Check if it's a direct subclass
if cls in getattr(subclass, '__mro__', ()):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a registered class (recursive)
for rcls in cls._abc_registry:
if issubclass(subclass, rcls):
cls._abc_cache.add(subclass)
return True
# Check if it's a subclass of a subclass (recursive)
for scls in cls.__subclasses__():
if issubclass(subclass, scls):
cls._abc_cache.add(subclass)
return True
# No dice; update negative cache
cls._abc_negative_cache.add(subclass)
return False
|
{
"content_hash": "662597b24a4460777ee28ead9e762b6c",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 79,
"avg_line_length": 38.595628415300546,
"alnum_prop": 0.6073906272122328,
"repo_name": "ppyordanov/HCI_4_Future_Cities",
"id": "255e214e14d94bbb32cad8d04de88d20dbfa5105",
"size": "7164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Server/src/virtualenv/Lib/abc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "427445"
},
{
"name": "C++",
"bytes": "21783"
},
{
"name": "CSS",
"bytes": "280650"
},
{
"name": "D",
"bytes": "9679"
},
{
"name": "HTML",
"bytes": "37335"
},
{
"name": "Java",
"bytes": "740594"
},
{
"name": "JavaScript",
"bytes": "1801741"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "2631176"
},
{
"name": "Shell",
"bytes": "12283"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import reverse
from pyquery import PyQuery as pq
from olympia.abuse.models import AbuseReport
from olympia.addons.models import Addon
from olympia.amo.tests import TestCase, user_factory
from olympia.users.models import UserProfile
class TestAbuse(TestCase):
fixtures = ['base/addon_3615', 'base/user_999']
def test_list(self):
addon = Addon.objects.get(pk=3615)
user = UserProfile.objects.get(pk=999)
# Create a few abuse reports
AbuseReport.objects.create(addon=addon, message='Foo')
AbuseReport.objects.create(
addon=addon, ip_address='1.2.3.4', reporter=user_factory(),
message='Bar')
# This is a report for an addon not in the database
AbuseReport.objects.create(guid='@guid', message='Foo')
AbuseReport.objects.create(user=user_factory(), message='Eheheheh')
url = reverse('admin:abuse_abusereport_changelist')
self.grant_permission(user, '*:*')
self.client.login(email=user.email)
response = self.client.get(url)
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody tr').length == 4
response = self.client.get(url, {'type': 'addon'})
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody tr').length == 3
response = self.client.get(url, {'type': 'user'})
assert response.status_code == 200
doc = pq(response.content)
assert doc('#result_list tbody tr').length == 1
|
{
"content_hash": "0464af194dbe520a5043f00a212fe1ce",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 75,
"avg_line_length": 37.348837209302324,
"alnum_prop": 0.6519302615193027,
"repo_name": "lavish205/olympia",
"id": "8f433f202d3a1d80c7dfcb2338d31e24c8db52c4",
"size": "1630",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/olympia/abuse/tests/test_admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "808053"
},
{
"name": "HTML",
"bytes": "614229"
},
{
"name": "JavaScript",
"bytes": "1075018"
},
{
"name": "Makefile",
"bytes": "820"
},
{
"name": "PLSQL",
"bytes": "1074"
},
{
"name": "PLpgSQL",
"bytes": "2381"
},
{
"name": "Python",
"bytes": "5064850"
},
{
"name": "SQLPL",
"bytes": "559"
},
{
"name": "Shell",
"bytes": "11467"
},
{
"name": "Smarty",
"bytes": "1758"
}
],
"symlink_target": ""
}
|
print("hello world!!!")
|
{
"content_hash": "bc50e181ce88fcc06d8ca05279345c65",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 23,
"avg_line_length": 24,
"alnum_prop": 0.625,
"repo_name": "WebClub-NITK/Hacktoberfest-2k17",
"id": "81304866b07981492374b7501b016c6b904bbf1b",
"size": "24",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/hello.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "41"
},
{
"name": "C",
"bytes": "111323"
},
{
"name": "C#",
"bytes": "845"
},
{
"name": "C++",
"bytes": "25563"
},
{
"name": "CSS",
"bytes": "1069"
},
{
"name": "Go",
"bytes": "359"
},
{
"name": "HTML",
"bytes": "32484"
},
{
"name": "Java",
"bytes": "20074"
},
{
"name": "JavaScript",
"bytes": "2713"
},
{
"name": "Lua",
"bytes": "394"
},
{
"name": "PHP",
"bytes": "1042"
},
{
"name": "Pascal",
"bytes": "235"
},
{
"name": "Perl",
"bytes": "579"
},
{
"name": "Python",
"bytes": "32114"
},
{
"name": "Ruby",
"bytes": "60"
},
{
"name": "VHDL",
"bytes": "1542"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/item/component/shared_item_electronics_gp_module.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "b1d1bf82efb743e29f0ac9ebe0473c38",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 96,
"avg_line_length": 25,
"alnum_prop": 0.7046153846153846,
"repo_name": "obi-two/Rebelion",
"id": "4ec51a2db1bef85775da9e9071c004dedb9192ac",
"size": "470",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/draft_schematic/item/component/shared_item_electronics_gp_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
}
|
"""Tests for liveness module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.platform import test
class LivenessTest(test.TestCase):
def _parse_and_analyze(self, test_fn):
node, source = parser.parse_entity(test_fn, future_features=())
entity_info = transformer.EntityInfo(
source_code=source, source_file=None, future_features=(), namespace={})
node = qual_names.resolve(node)
ctx = transformer.Context(entity_info)
node = activity.resolve(node, ctx)
graphs = cfg.build(node)
liveness.resolve(node, ctx, graphs)
return node
def assertHasLiveOut(self, node, expected):
live_out = anno.getanno(node, anno.Static.LIVE_VARS_OUT)
live_out_strs = set(str(v) for v in live_out)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_out_strs, set(expected))
def assertHasLiveIn(self, node, expected):
live_in = anno.getanno(node, anno.Static.LIVE_VARS_IN)
live_in_strs = set(str(v) for v in live_in)
if not expected:
expected = ()
if not isinstance(expected, tuple):
expected = (expected,)
self.assertSetEqual(live_in_strs, set(expected))
def test_live_out_stacked_if(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('a', 'x'))
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_stacked_if_else(self):
def test_fn(x, a):
if a > 0:
x = 0
if a > 1:
x = 1
else:
x = 2
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'a')
self.assertHasLiveOut(fn_body[1], 'x')
def test_live_out_for_basic(self):
def test_fn(x, a):
for i in range(a):
x += i
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'x')
def test_live_out_for_iterate(self):
def test_fn(x, a):
for i in range(a):
x += i
return x, i # pylint:disable=undefined-loop-variable
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('x', 'i'))
def test_live_out_attributes(self):
def test_fn(x, a):
if a > 0:
x.y = 0
return x.y
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ('x.y', 'x'))
def test_live_out_nested_functions(self):
def test_fn(a, b):
if b:
a = []
def foo():
return a
foo()
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'a')
def test_live_out_nested_functions_isolation(self):
def test_fn(b):
if b:
a = 0 # pylint:disable=unused-variable
def child():
max(a) # pylint:disable=used-before-assignment
a = 1
return a
child()
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], 'max')
def test_live_out_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveOut(fn_body[0], ())
def test_live_in_stacked_if(self):
def test_fn(x, a, b, c):
if a > 0:
x = b
if c > 1:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'x'))
self.assertHasLiveIn(fn_body[1], ('c', 'x'))
def test_live_in_stacked_if_else(self):
def test_fn(x, a, b, c, d):
if a > 1:
x = b
else:
x = c
if d > 0:
x = 0
return x
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'b', 'c', 'd'))
self.assertHasLiveIn(fn_body[1], ('d', 'x'))
def test_live_in_for_basic(self):
def test_fn(x, y, a):
for i in a:
x = i
y += x
z = 0
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_for_nested(self):
def test_fn(x, y, a):
for i in a:
for j in i:
x = i
y += x
z = j
return y, z
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'y', 'z'))
def test_live_in_deletion(self):
def test_fn(x, y, a):
for _ in a:
if x:
del y
else:
y = 0
node = self._parse_and_analyze(test_fn)
fn_body = node.body
self.assertHasLiveIn(fn_body[0], ('a', 'x', 'y'))
def test_live_in_generator_comprehension(self):
def test_fn(y):
if all(x for x in y):
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('all', 'x', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('all', 'y'))
def test_live_in_list_comprehension(self):
def test_fn(y):
if [x for x in y]:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('x', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_set_comprehension(self):
def test_fn(y):
if {x for x in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('x', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('y',))
def test_live_in_dict_comprehension(self):
def test_fn(y):
if {k: v for k, v in y}:
return
node = self._parse_and_analyze(test_fn)
fn_body = node.body
if six.PY2:
self.assertHasLiveIn(fn_body[0], ('k', 'v', 'y'))
else:
self.assertHasLiveIn(fn_body[0], ('y',))
if __name__ == '__main__':
test.main()
|
{
"content_hash": "95d2bde000eb6d35d4c1e3d68b8efeef",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 79,
"avg_line_length": 22.581939799331103,
"alnum_prop": 0.5793838862559242,
"repo_name": "kevin-coder/tensorflow-fork",
"id": "5cbbe4a7951a785945ab48692d68325426638329",
"size": "7441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/autograph/pyct/static_analysis/liveness_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9117"
},
{
"name": "C",
"bytes": "340300"
},
{
"name": "C++",
"bytes": "39383425"
},
{
"name": "CMake",
"bytes": "194940"
},
{
"name": "Go",
"bytes": "1046987"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "567239"
},
{
"name": "Jupyter Notebook",
"bytes": "1940883"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "33617202"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "425910"
}
],
"symlink_target": ""
}
|
"""This example gets all custom targeting keys and the values.
To create custom targeting keys and values, run
create_custom_targeting_keys_and_values.py."""
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
custom_targeting_service = client.GetService(
'CustomTargetingService', version='v201508')
# Create statement to get all targeting keys.
targeting_key_statement = dfp.FilterStatement()
all_keys = []
# Get custom targeting keys by statement.
while True:
response = custom_targeting_service.getCustomTargetingKeysByStatement(
targeting_key_statement.ToStatement())
if 'results' in response:
all_keys.extend(response['results'])
targeting_key_statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
if all_keys:
# Create map of custom targeting key id to custom targeting values.
key_value_map = {}
# Create statement to get all targeting values.
query = ('WHERE customTargetingKeyId IN (%s)'
% ', '.join([str(key['id']) for key in all_keys]))
targeting_value_statement = dfp.FilterStatement(query)
# Get custom targeting values by statement.
while True:
response = custom_targeting_service.getCustomTargetingValuesByStatement(
targeting_value_statement.ToStatement())
if 'results' in response:
for key in all_keys:
for value in response['results']:
if key['id'] == value['customTargetingKeyId']:
if key['id'] not in key_value_map.keys():
key_value_map[key['id']] = []
key_value_map[key['id']].append(value)
targeting_value_statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
# Display results.
for key in all_keys:
print ('Custom targeting key with id \'%s\', name \'%s\', display name '
'\'%s\', and type \'%s\' was found.'
%(key['id'], key['name'], key['displayName'], key['type']))
if key['id'] in key_value_map.keys():
for value in key_value_map[key['id']]:
print ('\tCustom targeting value with id \'%s\', name \'%s\', and '
'display name \'%s\' was found.'
% (value['id'], value['name'], value['displayName']))
else:
print 'No keys were found.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
{
"content_hash": "5a8e59668633a431647a33620a5b618a",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 78,
"avg_line_length": 34.791666666666664,
"alnum_prop": 0.6315369261477046,
"repo_name": "richardfergie/googleads-python-lib",
"id": "32cc075e2206849e319d92f9ab2ef4746bc67011",
"size": "3123",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201508/custom_targeting_service/get_all_custom_targeting_keys_and_values.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
"""
Celery functions to be processed in a non-blocking distributed manner.
"""
|
{
"content_hash": "6b4fc4265011a7e2d8726743921b6708",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 70,
"avg_line_length": 20,
"alnum_prop": 0.7375,
"repo_name": "vitorio/ocropodium",
"id": "a7085f7cd78ca29a2b4e8567acab231c470f34cb",
"size": "80",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ocradmin/projects/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "75173"
},
{
"name": "HTML",
"bytes": "88397"
},
{
"name": "JavaScript",
"bytes": "695068"
},
{
"name": "Python",
"bytes": "403080"
},
{
"name": "Shell",
"bytes": "2805"
},
{
"name": "XSLT",
"bytes": "2772"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('customer', '0004_auto_20170906_1506'),
]
operations = [
migrations.AlterField(
model_name='customer',
name='metal_decision',
field=models.IntegerField(choices=[(0, 'None'), (1, 'I want the very best for top dollar. ($250+ per print)'), (2, 'No, but I still want a high quality metal')], default=0),
),
]
|
{
"content_hash": "5f39b765d2f776c4272155b5a5ca538f",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 185,
"avg_line_length": 29.11111111111111,
"alnum_prop": 0.6068702290076335,
"repo_name": "hqpr/findyour3d",
"id": "8d5314bc248c0e11b1abd2a396f237530199b2c3",
"size": "597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "findyour3d/customer/migrations/0005_auto_20170907_1012.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "679650"
},
{
"name": "CoffeeScript",
"bytes": "83631"
},
{
"name": "HTML",
"bytes": "197307"
},
{
"name": "JavaScript",
"bytes": "471026"
},
{
"name": "Python",
"bytes": "230763"
},
{
"name": "Shell",
"bytes": "4684"
}
],
"symlink_target": ""
}
|
import numpy as np
from bayespy.utils import misc
from .node import Node, Moments
class Constant(Node):
r"""
Node for presenting constant values.
The node wraps arrays into proper node type.
"""
def __init__(self, moments, x, **kwargs):
if not isinstance(moments, Moments) and issubclass(moments, Moments):
raise ValueError("Give moments as an object instance instead of a class")
self._moments = moments
self.x = x
# Compute moments
self.u = self._moments.compute_fixed_moments(x)
# Dimensions of the moments
dims = self._moments.dims
# Resolve plates
D = len(dims[0])
if D > 0:
plates = np.shape(self.u[0])[:-D]
else:
plates = np.shape(self.u[0])
kwargs.setdefault('plates', plates)
self._parent_moments = ()
# Parent constructor
super().__init__(dims=dims, **kwargs)
def _get_id_list(self):
"""
Returns the stochastic ID list.
This method is used to check that same stochastic nodes are not direct
parents of a node several times. It is only valid if there are
intermediate stochastic nodes.
To put it another way: each ID corresponds to one factor q(..) in the
posterior approximation. Different IDs mean different factors, thus they
mean independence. The parents must have independent factors.
Stochastic nodes should return their unique ID. Deterministic nodes
should return the IDs of their parents. Constant nodes should return
empty list of IDs.
"""
return []
def get_moments(self):
return self.u
def set_value(self, x):
x = np.asanyarray(x)
#shapes = [np.shape(ui) for ui in self.u]
self.u = self._moments.compute_fixed_moments(x)
for (i, dimsi) in enumerate(self.dims):
correct_shape = tuple(self.plates) + tuple(dimsi)
given_shape = np.shape(self.u[i])
if not misc.is_shape_subset(given_shape, correct_shape):
raise ValueError(
"Incorrect shape {0} for the array, expected {1}"
.format(given_shape, correct_shape)
)
return
def lower_bound_contribution(self, gradient=False, **kwargs):
# Deterministic functions are delta distributions so the lower bound
# contribuion is zero.
return 0
def random(self):
return self.x
|
{
"content_hash": "32e5e10fc44d3de4d68be64e92034b22",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 85,
"avg_line_length": 32.10126582278481,
"alnum_prop": 0.6021293375394322,
"repo_name": "jluttine/bayespy",
"id": "ccc578cbb4bfbf9939b3e1816684de08c4315d20",
"size": "2796",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "bayespy/inference/vmp/nodes/constant.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1222619"
}
],
"symlink_target": ""
}
|
from ac_mediator.exceptions import ImproperlyConfiguredACService, ACException
from services.acservice.constants import *
import requests
import logging
requests_logger = logging.getLogger('requests_sent')
class BaseACService(object):
"""
Base class for Audio Commons Service.
An Audio Commons service should be a composition of BaseACService and
a number of mixins from the classes below (those supported by the service
api).
"""
NAME = 'Service name'
URL = 'http://example.com'
API_BASE_URL = 'http://example.com/api/'
service_id = None
implemented_components = None
def configure(self, config):
# Do main configuration
if 'service_id' not in config:
raise ImproperlyConfiguredACService('Missing item \'service_id\'')
self.set_service_id(config['service_id'])
# Init implemented components to empty list
# Each configuration method from every component is responsible for filling this list
self.implemented_components = list()
# Call all object methods that start with 'conf_' to perform mixin's configuration
for item in dir(self):
if item.startswith('conf_') and callable(getattr(self, item)):
getattr(self, item)(config)
def set_service_id(self, service_id):
"""
This should be a unique id for the service.
The id is provided by the Audio Commons consortium.
:param service_id: 8 character alphanumeric string (e.g. ef21b9ad)
"""
self.service_id = service_id
def get_service_description(self):
"""
Returns a structured description of the capabilities of each component implemented
by the service. Uses each component's 'component_description' method.
:return: dict with components as keys
"""
description = dict()
# Call all object methods that start with 'describe_' to get description of components
for item in dir(self):
if item.startswith('describe_') and callable(getattr(self, item)):
name, component_description = getattr(self, item)()
description[name] = component_description
return description
def send_request(self,
url,
method='get',
params=None,
data=None,
supported_auth_methods=None,
account=None,
use_authentication_method=None):
"""
Make a request to the service. If not provided, this method automatically chooses
a suitable authentication method for making the request.
:param method: request method (either 'get' or 'post')
:param url: endpoint api url
:param params: request parameters in a dictionary
:param data: dictionary of data to be included as json body
:param supported_auth_methods: auth methods supported by the api endpoint (defaults to those defined for the service)
:param account: user account (for enduser authentication only)
:return: dictionary of json response (can raise exception if status_code!=200)
"""
if method not in ['get', 'post']:
raise ACException('Request method {0} not in allowed methods'.format(method))
if params is None:
params = {}
if data is None:
data = {}
if supported_auth_methods is None:
supported_auth_methods = self.SUPPORTED_AUTH_METHODS
if use_authentication_method is None:
if ENDUSER_AUTH_METHOD not in supported_auth_methods:
auth_method = APIKEY_AUTH_METHOD
else:
if account is None:
auth_method = APIKEY_AUTH_METHOD
else:
auth_method = ENDUSER_AUTH_METHOD
else:
if use_authentication_method not in supported_auth_methods:
raise ACException('Authentication method {0} not supported by endpoint'.format(use_authentication_method))
auth_method = use_authentication_method
if auth_method == ENDUSER_AUTH_METHOD and not account:
raise ACException('Enduser authentication method requested but no account provided.')
auth_info = self.get_auth_info_for_request(auth_method, account=account)
params.update(auth_info.get('params', dict())) # Update current params with auth params (if any)
headers = auth_info.get('headers', dict())
# Log request
log_line = '\n\nRequest to service: {0}\n'.format(self.name)
log_line += '{0}?{1}'.format(url, '&'.join(['{0}={1}'.format(key, value) for key, value in params.items()]))
if data:
log_line += '\nData:\n'
log_line += '\n'.join(['{0}: {1}'.format(key, value) for key, value in data.items()])
if headers:
log_line += '\nHeaders:\n'
log_line += '\n'.join(['{0}: {1}'.format(key, value) for key, value in headers.items()])
requests_logger.info(log_line)
# Make the request!
r = getattr(requests, method)(
url,
params=params,
data=data,
headers=headers)
# TODO: log request object somewhere?
return self.validate_response_status_code(r)
def validate_response_status_code(self, response):
"""
Process service API responses and raise exceptions if errors occur.
Otherwise return response as dictionary object loaded from json contents.
This base class contains a basic implementation of this method that raises
generic exceptions without explanation or details. Services will want to override
this method to better interpret the way errors are returned (and provide clearer response
to users).
:param response: response object (of type requests.models.Response)
:return: dictionary including json contents of the response
"""
if response.status_code != 200:
raise ACException('Returned wrong status code, {0}'.format(response.status_code), response.status_code)
return response.json()
@property
def id(self):
return self.service_id
@property
def name(self):
return self.NAME
@property
def url(self):
return self.URL
@property
def components(self):
return self.implemented_components
# Code to handle response warnings
# TODO: to be properly documented
_current_response_warnings = list()
def add_response_warning(self, msg):
# TODO: make sure we don't have nested warnings
if type(msg) == list:
self._current_response_warnings += msg
else:
self._current_response_warnings.append(msg)
def collect_response_warnings(self):
warnings = self._current_response_warnings
warnings = list(set(warnings)) # Remove duplicated warnings
return warnings
def clear_response_warnings(self):
self._current_response_warnings = list()
|
{
"content_hash": "371b9d3923ef08b077cfa30832144924",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 125,
"avg_line_length": 40.2247191011236,
"alnum_prop": 0.6255586592178771,
"repo_name": "AudioCommons/ac-mediator",
"id": "2a3b017792a778ed51f3ea0535c5a41d53d8c28d",
"size": "7160",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "services/acservice/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "5595"
},
{
"name": "Dockerfile",
"bytes": "1143"
},
{
"name": "HTML",
"bytes": "43305"
},
{
"name": "JavaScript",
"bytes": "337"
},
{
"name": "Python",
"bytes": "201502"
}
],
"symlink_target": ""
}
|
"""Should be run on a GCE instance to set up the build environment."""
from __future__ import print_function
import getpass
import os
from chromite.compute import compute_configs
from chromite.compute import bot_constants
from chromite.lib import cros_build_lib
from chromite.lib import osutils
# Make the script more readable.
RunCommand = cros_build_lib.RunCommand
SudoRunCommand = cros_build_lib.SudoRunCommand
BOT_CREDS_PATH = bot_constants.BOT_CREDS_TMP_PATH
# Most credentials are stored in the home directory.
HOME_DIR = osutils.ExpandPath('~')
def SetupPrerequisites():
"""Installs packages required for Chrome OS build."""
SudoRunCommand(['apt-get', 'update'])
SudoRunCommand(['apt-get', '-y', '--force-yes', 'upgrade'])
# Chrome OS pre-requisite packages.
packages = ['git', 'curl', 'pbzip2', 'gawk', 'gitk', 'subversion']
# Required for CIDB.
packages += ['python-sqlalchemy', 'python-mysqldb']
# Required for payload generation outside of the chroot.
packages += ['python-protobuf']
# Packages to monitor system performance and usage.
packages += ['sysstat']
SudoRunCommand(['apt-get', '-y', 'install'] + packages)
def InstallChromeDependencies():
"""Installs packages required to build Chromium."""
# The install-build-deps.sh relies on some packages that are not in
# the base image. Install them first before invoking the script.
SudoRunCommand(['apt-get', '-y', 'install',
'gcc-arm-linux-gnueabihf',
'g++-4.8-multilib-arm-linux-gnueabihf',
'gcc-4.8-multilib-arm-linux-gnueabihf',
'realpath'])
with osutils.TempDir(prefix='tmp-chrome-deps') as tempdir:
RunCommand(['git', 'clone', bot_constants.CHROMIUM_BUILD_URL], cwd=tempdir)
RunCommand([os.path.join(tempdir, 'build', 'install-build-deps.sh'),
'--syms', '--no-prompt'])
def SetMountCount():
"""Sets mount count to a large number."""
for drive in compute_configs.DRIVES:
SudoRunCommand(['tune2fs', '-c', '150', os.path.join('dev', drive)],
redirect_stdout=True)
def _SetupSVN():
"""Sets up the chromium svn username/password."""
# Create a ~/.subversion directory.
RunCommand(['svn', 'ls', 'http://src.chromium.org/svn'], redirect_stdout=True)
# Change the setting to store the svn password.
sed_str = ('s/# store-plaintext-passwords = '
'no/store-plaintext-passwords = yes/g')
RunCommand(['sed', '-i', '-e', sed_str,
osutils.ExpandPath(os.path.join('~', '.subversion', 'servers'))])
password_path = osutils.ExpandPath(
os.path.join(BOT_CREDS_PATH, bot_constants.SVN_PASSWORD_FILE))
password = osutils.ReadFile(password_path).strip()
# `svn ls` each repository to store the password in ~/.subversion.
for svn_host in bot_constants.CHROMIUM_SVN_HOSTS:
for svn_repo in bot_constants.CHROMIUM_SVN_REPOS:
RunCommand(['svn', 'ls', '--username', bot_constants.BUILDBOT_SVN_USER,
'--password', password, 'svn://%s/%s' % (svn_host, svn_repo)],
redirect_stdout=True)
def _SetupGoB():
"""Sets up GoB credentials."""
RunCommand(['git', 'config', '--global', 'user.email',
bot_constants.GIT_USER_EMAIL])
RunCommand(['git', 'config', '--global', 'user.name',
bot_constants.GIT_USER_NAME])
RunCommand(['git', 'clone', bot_constants.GCOMPUTE_TOOLS_URL],
cwd=HOME_DIR, redirect_stdout=True)
# Run git-cookie-authdaemon at boot time by adding it to
# /etc/rc.local
rc_local_path = os.path.join(os.path.sep, 'etc', 'rc.local')
daemon_path = os.path.join(HOME_DIR, 'gcompute-tools',
'git-cookie-authdaemon')
daemon_cmd = 'su %s -c %s\n' % (bot_constants.BUILDBOT_USER,
daemon_path)
content = osutils.ReadFile(rc_local_path).replace('exit 0', '')
content += daemon_cmd
content += 'exit 0\n'
with osutils.TempDir() as tempdir:
tmp_file = os.path.join(tempdir, 'rc.local')
osutils.WriteFile(tmp_file, content)
os.chmod(tmp_file, 755)
SudoRunCommand(['mv', tmp_file, rc_local_path])
def _SetupCIDB():
"""Copies cidb credentials."""
RunCommand(
['cp', '-r', os.path.join(BOT_CREDS_PATH, bot_constants.CIDB_CREDS_DIR),
HOME_DIR])
def _SetupTreeStatus():
"""Copies credentials for updating tree status."""
RunCommand(
['cp',
os.path.join(BOT_CREDS_PATH, bot_constants.TREE_STATUS_PASSWORD_FILE),
HOME_DIR])
def SetupCredentials():
"""Sets up various credentials."""
_SetupSVN()
_SetupGoB()
_SetupCIDB()
_SetupTreeStatus()
def SetupBuildbotEnvironment():
"""Sets up the buildbot environment."""
# Append host entries to /etc/hosts. This includes the buildbot
# master IP address.
host_entries = RunCommand(
['cat', os.path.join(BOT_CREDS_PATH, bot_constants.HOST_ENTRIES)],
capture_output=True).output
SudoRunCommand(['tee', '-a', '/etc/hosts'], input=host_entries)
# Create the buildbot directory.
SudoRunCommand(['mkdir', '-p', bot_constants.BUILDBOT_DIR])
SudoRunCommand(['chown', '-R', '%s:%s' % (bot_constants.BUILDBOT_USER,
bot_constants.BUILDBOT_USER),
bot_constants.BUILDBOT_DIR])
with osutils.TempDir() as tempdir:
# Download depot tools to a temp directory to bootstrap. `gclient
# sync` will create depot_tools in BUILDBOT_DIR later.
tmp_depot_tools_path = os.path.join(tempdir, 'depot_tools')
RunCommand(['git', 'clone', bot_constants.DEPOT_TOOLS_URL],
cwd=tempdir, redirect_stdout=True)
# `gclient` relies on depot_tools in $PATH, pass the extra
# envinornment variable.
path_env = '%s:%s' % (os.getenv('PATH'), tmp_depot_tools_path)
RunCommand(['gclient', 'config', bot_constants.BUILDBOT_SVN_REPO],
cwd=bot_constants.BUILDBOT_DIR, extra_env={'PATH': path_env})
RunCommand(['gclient', 'sync', '--jobs', '5'],
cwd=bot_constants.BUILDBOT_DIR,
redirect_stdout=True, extra_env={'PATH': path_env})
# Set up buildbot password.
config_dir = os.path.join(bot_constants.BUILDBOT_DIR, 'build', 'site_config')
RunCommand(['cp', os.path.join(BOT_CREDS_PATH,
bot_constants.BUILDBOT_PASSWORD_FILE),
os.path.join(config_dir,
bot_constants.BUILDBOT_PASSWORD_FILE)])
# Update the environment variable.
depot_tools_path = os.path.join(bot_constants.BUILDBOT_DIR, 'depot_tools')
RunCommand(['bash', '-c', r'echo export PATH=\$PATH:%s >> ~/.bashrc'
% depot_tools_path])
# Start buildbot slave at startup.
crontab_content = ''
result = RunCommand(
['crontab', '-l'], capture_output=True, error_code_ok=True)
crontab_content = result.output if result.returncode == 0 else ''
crontab_content += ('SHELL=/bin/bash\nUSER=chrome-bot\n'
'@reboot cd /b/build/slave && make start\n')
RunCommand(['crontab', '-'], input=crontab_content)
def TuneSystemSettings():
"""Tune the system settings for our build environment."""
# Increase the user-level file descriptor limits.
entries = ('* soft nofile 65536\n'
'* hard nofile 65536\n')
SudoRunCommand(['tee', '-a', '/etc/security/limits.conf'], input=entries)
def main(_argv):
assert getpass.getuser() == bot_constants.BUILDBOT_USER, (
'This script should be run by %s instead of %s!' % (
bot_constants.BUILDBOT_USER, getpass.getuser()))
SetupPrerequisites()
InstallChromeDependencies()
SetupCredentials()
SetupBuildbotEnvironment()
TuneSystemSettings()
|
{
"content_hash": "9b4ddee44b560c23d070778d9eb8c876",
"timestamp": "",
"source": "github",
"line_count": 204,
"max_line_length": 80,
"avg_line_length": 37.81372549019608,
"alnum_prop": 0.642727508426238,
"repo_name": "mxOBS/deb-pkg_trusty_chromium-browser",
"id": "8c6c1133699562841808e7b39aec708fe29d8310",
"size": "7880",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "third_party/chromite/compute/setup_bot.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "230130"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "12435900"
},
{
"name": "C++",
"bytes": "264378706"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "795726"
},
{
"name": "Dart",
"bytes": "74976"
},
{
"name": "Emacs Lisp",
"bytes": "2360"
},
{
"name": "Go",
"bytes": "31783"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "19491230"
},
{
"name": "Java",
"bytes": "7637875"
},
{
"name": "JavaScript",
"bytes": "12723911"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "14392"
},
{
"name": "Makefile",
"bytes": "208315"
},
{
"name": "Objective-C",
"bytes": "1460032"
},
{
"name": "Objective-C++",
"bytes": "7760068"
},
{
"name": "PLpgSQL",
"bytes": "175360"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427212"
},
{
"name": "Python",
"bytes": "11447382"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104846"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1208350"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "nesC",
"bytes": "18335"
}
],
"symlink_target": ""
}
|
import numpy as np
from baseline.remote import (
RemoteModelREST,
RemoteModelGRPC,
register_remote,
RemoteRESTClassifier,
RemoteRESTTagger,
RemoteRESTSeq2Seq,
RemoteRESTEmbeddings,
RemoteGRPCClassifier,
RemoteGRPCTagger,
RemoteGRPCSeq2Seq,
RemoteGRPCEmbeddings,
)
class RemoteRESTTensorFlowMixin(RemoteModelREST):
def create_request(self, examples):
inputs = {}
for feature in self.input_keys:
tensor = examples[feature]
if isinstance(tensor, np.ndarray):
inputs[feature] = tensor.tolist()
else:
inputs[feature] = tensor
request = {'signature_name': self.signature, 'inputs': inputs}
return request
@register_remote('http-classify')
class RemoteRESTTensorFlowClassifier(RemoteRESTTensorFlowMixin, RemoteRESTClassifier): pass
@register_remote('http-tagger')
class RemoteRESTTensorFlowTagger(RemoteRESTTensorFlowMixin, RemoteRESTTagger): pass
@register_remote('http-seq2seq')
class RemoteRESTTensorFlowSeq2Seq(RemoteRESTTensorFlowMixin, RemoteRESTSeq2Seq): pass
@register_remote('http-servable-embeddings')
class RemoteRESTTensorFlowEmbeddings(RemoteRESTTensorFlowMixin, RemoteRESTEmbeddings): pass
@register_remote('grpc')
class RemoteGRPCTensorFlowMixin(RemoteModelGRPC): pass
@register_remote('grpc-classify')
class RemoteGRPCTensorFlowClassifier(RemoteGRPCTensorFlowMixin, RemoteGRPCClassifier): pass
@register_remote('grpc-tagger')
class RemoteGRPCTensorFlowTagger(RemoteGRPCTensorFlowMixin, RemoteGRPCTagger): pass
@register_remote('grpc-seq2seq')
class RemoteGRPCTensorFlowSeq2Seq(RemoteGRPCTensorFlowMixin, RemoteGRPCSeq2Seq): pass
@register_remote('grpc-servable-embeddings')
class RemoteGRPCTensorFlowEmbeddings(RemoteGRPCTensorFlowMixin, RemoteGRPCEmbeddings): pass
@register_remote('grpc-preproc')
class RemoteGRPCTensorFlowPreprocMixin(RemoteModelGRPC):
def create_request(self, examples):
# TODO: Remove TF dependency client side
import tensorflow as tf
request = self.predictpb.PredictRequest()
request.model_spec.name = self.name
request.model_spec.signature_name = self.signature
if self.version is not None:
request.model_spec.version.value = self.version
for key in examples:
if key.endswith('lengths'):
shape = examples[key].shape
tensor_proto = tf.contrib.util.make_tensor_proto(examples[key], shape=shape, dtype=tf.int32)
request.inputs[key].CopyFrom(
tensor_proto
)
else:
request.inputs[key].CopyFrom(
tf.contrib.util.make_tensor_proto(examples[key], shape=[len(examples[key]), 1])
)
return request
@register_remote('grpc-preproc-classify')
class RemoteGRPCTensorFlowPreprocClassifier(RemoteGRPCTensorFlowPreprocMixin, RemoteGRPCClassifier): pass
@register_remote('grpc-preproc-tagger')
class RemoteGRPCTensorFlowPreprocTagger(RemoteGRPCTensorFlowPreprocMixin, RemoteGRPCTagger): pass
@register_remote('grpc-preproc-seq2seq')
class RemoteGRPCTensorFlowPreprocSeq2Seq(RemoteGRPCTensorFlowPreprocMixin, RemoteGRPCSeq2Seq): pass
@register_remote('grpc-preproc-servable-embeddings')
class RemoteGRPCTensorFlowPreprocEmbeddings(RemoteGRPCTensorFlowPreprocMixin, RemoteGRPCEmbeddings): pass
@register_remote('http-preproc')
class RemoteRESTTensorFlowPreprocMixin(RemoteModelREST):
def create_request(self, examples):
inputs = {}
if isinstance(examples['tokens'], np.ndarray):
inputs['tokens'] = examples['tokens'].tolist()
else:
inputs['tokens'] = examples['tokens']
for feature in self.input_keys:
if feature.endswith('lengths'):
if isinstance(examples[feature], np.ndarray):
inputs[feature] = examples[feature].tolist()
else:
inputs[feature] = examples[feature]
return {'signature_name': self.signature, 'inputs': inputs}
@register_remote('http-preproc-classify')
class RemoteRESTTensorFlowPreprocClassifier(RemoteRESTTensorFlowPreprocMixin, RemoteRESTClassifier): pass
@register_remote('http-preproc-tagger')
class RemoteRESTTensorFlowPreprocTagger(RemoteRESTTensorFlowPreprocMixin, RemoteRESTTagger): pass
@register_remote('http-preproc-seq2seq')
class RemoteRESTTensorFlowPreprocSeq2Seq(RemoteRESTTensorFlowPreprocMixin, RemoteRESTSeq2Seq): pass
@register_remote('http-preproc-servable-embeddings')
class RemoteRESTTensorFlowPreprocEmbeddings(RemoteRESTTensorFlowPreprocMixin, RemoteRESTEmbeddings): pass
|
{
"content_hash": "09175ac1090a1e51426792df09feb021",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 108,
"avg_line_length": 37.552,
"alnum_prop": 0.7334895611418832,
"repo_name": "dpressel/baseline",
"id": "cc31f2d1d53cb1679df09aa941e9fc614c46cb62",
"size": "4694",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baseline/tf/remote.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "9649"
},
{
"name": "CMake",
"bytes": "430"
},
{
"name": "HCL",
"bytes": "923"
},
{
"name": "Perl",
"bytes": "17554"
},
{
"name": "Python",
"bytes": "1281602"
},
{
"name": "Roff",
"bytes": "24"
},
{
"name": "Shell",
"bytes": "10168"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
import os
import xml.etree.ElementTree as ET
from flexget import plugin
from flexget.utils.imdb import is_valid_imdb_title_id
from flexget.event import event
log = logging.getLogger('nfo_lookup')
class NfoLookup(object):
"""
Retrieves information from a local '.nfo' info file.
The read metadata will be add as 'nfo_something' in the entry. Also, if an 'id' is found in the '.nfo' file then the
'imdb_id' field will be set to its value. This means that if the imdb_lookup plugin is used in addition to this
plugin it will be able to use the ID from '.nfo' file to get the correct movie.
The nfo file is used by Kodi.
Example:
nfo_lookup: yes
WARNING: This plugin will read a file with extension '.nfo' and the same name as the entry filename as an XML file
using xml.etree.ElementTree from the standard python library. As such, it is vulnerable to XML vulnerabilities
described in the link below
https://docs.python.org/3/library/xml.html#xml-vulnerabilities
Use this only with nfo files you have created yourself.
"""
schema = {'type': 'boolean'}
nfo_file_extension = '.nfo'
# This priority makes sure this plugin runs before the imdb_lookup plugin, if it is also used. That way setting
# imdb_id here will help imdb_lookup find the correct movie.
@plugin.priority(150)
def on_task_metainfo(self, task, config):
# check if disabled (value set to false)
if not config:
# Config was set to 'no' instead of yes. Don't do anything then.
return
for entry in task.entries:
# If this entry was obtained from the filesystem plugin it should have a filename field. If it does not have
# one then there is nothing we can do in this plugin.
filename = entry.get('filename')
location = entry.get('location')
# If there is no 'filename' field there is also no nfo file
if filename is None or location is None:
log.warning("Entry %s didn't come from the filesystem plugin", entry.get('title'))
return
else:
# This will be None if there is no nfo file
nfo_filename = self.get_nfo_filename(entry)
if nfo_filename is None:
log.warning("Entry %s has no corresponding %s file", entry.get('title'), self.nfo_file_extension)
return
# Populate the fields from the information in the .nfo file Note that at this point `nfo_filename` has the
# name of an existing '.nfo' file
self.lookup(entry, nfo_filename)
def lookup(self, entry, nfo_filename):
# If there is already data from a previous parse then we don't need to do anything
if entry.get('nfo_id') is not None:
log.warning("Entry %s was already parsed by nfo_lookup and it will be skipped. ", entry.get('title'))
return
# nfo_filename Should not be None at this point
assert nfo_filename is not None
# Get all values we can from the nfo file. If the nfo file can't be parsed then a warning is logged and we
# return without changing the entry
try:
nfo_reader = NfoReader(nfo_filename)
fields = nfo_reader.get_fields_from_nfo_file()
except BadXmlFile:
log.warning("Invalid '.nfo' file for entry %s", entry.get('title'))
return
entry.update(fields)
# If a valid IMDB id was found in the nfo file, set the imdb_id field of the entry. This will help the
# imdb_lookup plugin to get the correct data if it is also used.
if 'nfo_id' in fields:
if is_valid_imdb_title_id(entry.get('nfo_id', '')):
entry.update({'imdb_id': fields['nfo_id']})
else:
log.warning("ID found in nfo file for entry '%s', but it was not a valid IMDB ID", entry.get('title'))
def get_nfo_filename(self, entry):
"""
Get the filename of the nfo file from the 'location' in the entry.
Returns
-------
str
The file name of the 'nfo' file, or None it there is no 'nfo' file.
"""
location = entry.get('location')
nfo_full_filename = os.path.splitext(location)[0] + self.nfo_file_extension
if os.path.isfile(nfo_full_filename):
return nfo_full_filename
class BadXmlFile(Exception):
"""
Exception that is raised if the nfo file can't be parsed due to some invalid nfo file.
"""
pass
class NfoReader(object):
"""
Class in charge of parsing the '.nfo' file and getting a dictionary of fields.
The '.nfo' file is an XML file. Some fields can only appear once, such as 'title', 'id', 'plot', etc., while other
fields can appear multiple times (with different values), such as 'thumb', 'genre', etc. These fields are listed in
the `_fields` attribute.
"""
def __init__(self, filename):
try:
tree = ET.parse(filename)
root = tree.getroot()
except ET.ParseError:
raise BadXmlFile()
if os.path.exists(filename):
self._nfo_filename = filename
self._root = root
else:
raise BadXmlFile()
# Each key in the dictionary correspond to a field that should be read from the nfo file. The values are a tuple
# with a boolean and a callable. The boolean indicates if the field can appear multiple times, while the
# callable is a function to read the field value from the XML element.
#
# In the future we could extend the nfo_lookup plugin to accept 'set' in its configuration to add new entries to
# this dictionary to handle other tags in the nfo file and add the data to the entry.
self._fields = {"title": (False, NfoReader._single_elem_getter_func),
"originaltitle": (False, NfoReader._single_elem_getter_func),
"sorttitle": (False, NfoReader._single_elem_getter_func),
"rating": (False, NfoReader._single_elem_getter_func),
"year": (False, NfoReader._single_elem_getter_func),
"votes": (False, NfoReader._single_elem_getter_func),
"plot": (False, NfoReader._single_elem_getter_func),
"runtime": (False, NfoReader._single_elem_getter_func),
"id": (False, NfoReader._single_elem_getter_func),
"filenameandpath": (False, NfoReader._single_elem_getter_func),
"trailer": (False, NfoReader._single_elem_getter_func),
"thumb": (True, NfoReader._single_elem_getter_func),
"genre": (True, NfoReader._single_elem_getter_func),
"director": (True, NfoReader._single_elem_getter_func),
# Actor field has child elements, such as 'name' and 'role'
"actor": (True, NfoReader._composite_elem_getter_func),
"studio": (True, NfoReader._single_elem_getter_func),
"country": (True, NfoReader._single_elem_getter_func)}
@staticmethod
def _single_elem_getter_func(x):
"""
Method to get the text value of simple XML element that does not contain child nodes.
"""
return x.text
@staticmethod
def _composite_elem_getter_func(x):
"""
Method to get XML elements that have children as a dictionary.
"""
return {i.tag: i.text for i in x}
def _extract_single_field(self, name, getter_func):
"""
Use this method to get fields from the root XML tree that only appear once, such as 'title', 'year', etc.
"""
f = self._root.find(name)
if f is not None:
return getter_func(f)
def _extract_multiple_field(self, name, getter_func):
"""
Use this method to get fields from the root XML tree that can appear more than once, such as 'actor', 'genre',
'director', etc. The result will be a list of values.
"""
values = [getter_func(i) for i in self._root.findall(name)]
if len(values) > 0:
return values
def get_fields_from_nfo_file(self):
"""
Returns a dictionary with all firlds read from the '.nfo' file.
The keys are named as 'nfo_something'.
"""
d = {}
if self._root is None:
return d
# TODO: Right now it only works for movies
if self._root.tag != 'movie':
return d
for name, values in self._fields.items():
multiple_bool = values[0]
getter_func = values[1]
nfo_field_name = 'nfo_{0}'.format(name)
if multiple_bool:
v = self._extract_multiple_field(name, getter_func)
else:
v = self._extract_single_field(name, getter_func)
if v is not None:
d[nfo_field_name] = v
return d
@event('plugin.register')
def register_plugin():
plugin.register(NfoLookup, 'nfo_lookup', api_ver=2)
|
{
"content_hash": "8a7fba80f5d04bf03e69748be9ce0e5e",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 120,
"avg_line_length": 41.16086956521739,
"alnum_prop": 0.599767613816415,
"repo_name": "drwyrm/Flexget",
"id": "ddc49c527b7d15ad4ac83fcfc32ded28465d5a2f",
"size": "9467",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "flexget/plugins/metainfo/nfo_lookup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "HTML",
"bytes": "78933"
},
{
"name": "JavaScript",
"bytes": "261421"
},
{
"name": "Python",
"bytes": "3081448"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
}
|
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/ui/shared_ui_planet_rori.iff"
result.attribute_template_id = -1
result.stfName("loc_n","rori")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
{
"content_hash": "88df3febfaff9391b5c00e5ea83f68f2",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 65,
"avg_line_length": 22,
"alnum_prop": 0.6783216783216783,
"repo_name": "anhstudios/swganh",
"id": "2ec1c11dd7d27162e92fefb9de2661bd6ab3effd",
"size": "431",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/tangible/ui/shared_ui_planet_rori.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
}
|
"""Game mechanic modules."""
# Part of Clockwork MUD Server (https://github.com/whutch/cwmud)
# :copyright: (c) 2008 - 2017 Will Hutcheson
# :license: MIT (https://github.com/whutch/cwmud/blob/master/LICENSE.txt)
|
{
"content_hash": "62b08fef2fedfae4ed26e8735be2ca5a",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 73,
"avg_line_length": 53.25,
"alnum_prop": 0.7230046948356808,
"repo_name": "whutch/atria",
"id": "b91a8af17dd386b3446199bb1ef0ae139ce6650e",
"size": "237",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cwmud/game/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "637"
},
{
"name": "Python",
"bytes": "405976"
}
],
"symlink_target": ""
}
|
"""This code example gets all creative sets for a master creative.
To create creative sets, run create_creative_sets.py.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
"""
# Import appropriate modules from the client library.
from googleads import dfp
MASTER_CREATIVE_ID = 'INSERT_MASTER_CREATIVE_ID_HERE'
def main(client, master_creative_id):
# Initialize appropriate service.
creative_set_service = client.GetService('CreativeSetService',
version='v201505')
# Create statement object to only select creative sets that have the given
# master creative.
values = [{
'key': 'masterCreativeId',
'value': {
'xsi_type': 'NumberValue',
'value': master_creative_id
}
}]
query = 'WHERE masterCreativeId = :masterCreativeId'
statement = dfp.FilterStatement(query, values)
# Get creative sets by statement.
while True:
response = creative_set_service.getCreativeSetsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for creative_set in response['results']:
print (('Creative set with ID \'%s\', master creative ID \'%s\', and '
'companion creative IDs {%s} was found.')
% (creative_set['id'], creative_set['masterCreativeId'],
','.join(creative_set['companionCreativeIds'])))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, MASTER_CREATIVE_ID)
|
{
"content_hash": "9f70488ee7f651fda5ca94eaaf5ca4f8",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 78,
"avg_line_length": 33.642857142857146,
"alnum_prop": 0.6698513800424628,
"repo_name": "richardfergie/googleads-python-lib",
"id": "f47d345a6808869354944d83fbdfc8da40b4de69",
"size": "2502",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "examples/dfp/v201505/creative_set_service/get_creative_sets_by_statement.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "168602"
}
],
"symlink_target": ""
}
|
from requests import HTTPError
import urllib
from oslo_log import log as logging
from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5_ex
from f5_openstack_agent.lbaasv2.drivers.bigip import resource_helper
LOG = logging.getLogger(__name__)
class PoolServiceBuilder(object):
"""Create LBaaS v2 pools and related objects on BIG-IPs.
Handles requests to create, update, delete LBaaS v2 pools,
health monitors, and members on one or more BIG-IP systems.
"""
def __init__(self, service_adapter):
self.service_adapter = service_adapter
self.http_mon_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.http_monitor)
self.https_mon_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.https_monitor)
self.tcp_mon_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.tcp_monitor)
self.ping_mon_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.ping_monitor)
self.pool_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.pool)
self.node_helper = resource_helper.BigIPResourceHelper(
resource_helper.ResourceType.node)
def create_pool(self, service, bigips):
"""Create a pool on set of BIG-IPs.
Creates a BIG-IP pool to represent an LBaaS pool object.
:param service: Dictionary which contains a both a pool
and load balancer definition.
:param bigips: Array of BigIP class instances to create pool.
"""
pool = self.service_adapter.get_pool(service)
error = None
for bigip in bigips:
try:
self.pool_helper.create(bigip, pool)
except HTTPError as err:
if err.response.status_code == 409:
LOG.debug("Pool already exists...updating")
try:
self.pool_helper.update(bigip, pool)
except Exception as err:
error = f5_ex.PoolUpdateException(err.message)
LOG.error("Failed to assure pool %s on %s: %s",
pool['name'], bigip, error.message)
else:
error = f5_ex.PoolCreationException(err.message)
LOG.error("Failed to assure pool %s on %s: %s",
pool['name'], bigip, error.message)
except Exception as err:
error = f5_ex.PoolCreationException(err.message)
LOG.error("Failed to assure pool %s on %s: %s",
pool['name'], bigip, error.message)
return error
def delete_pool(self, service, bigips):
"""Delete a pool on set of BIG-IPs.
Deletes a BIG-IP pool defined by LBaaS pool object.
:param service: Dictionary which contains a both a pool
and load balancer definition.
:param bigips: Array of BigIP class instances to delete pool.
"""
loadbalancer = service.get('loadbalancer')
pool = self.service_adapter.get_pool(service)
members = service.get('members', list())
error = None
for bigip in bigips:
try:
self.pool_helper.delete(bigip, name=pool["name"],
partition=pool["partition"])
except HTTPError as err:
if err.response.status_code != 404:
error = f5_ex.PoolDeleteException(err.message)
LOG.error("Failed to remove pool %s from %s: %s",
pool['name'], bigip, error.message)
except Exception as err:
error = f5_ex.PoolDeleteException(err.message)
LOG.error("Failed to remove pool %s from %s: %s",
pool['name'], bigip, error.message)
for member in members:
self._delete_member_node(loadbalancer, member, bigip)
return error
def update_pool(self, service, bigips):
"""Update BIG-IP pool.
:param service: Dictionary which contains a both a pool
and load balancer definition.
:param bigips: Array of BigIP class instances to create pool.
"""
error = None
pool = self.service_adapter.get_pool(service)
for bigip in bigips:
try:
self.pool_helper.update(bigip, pool)
except Exception as err:
error = f5_ex.PoolUpdateException(err.message)
LOG.error("Failed to update pool %s from %s: %s",
pool['name'], bigip, error.message)
return error
def create_healthmonitor(self, service, bigips):
# create member
hm = self.service_adapter.get_healthmonitor(service)
hm_helper = self._get_monitor_helper(service)
error = None
for bigip in bigips:
try:
hm_helper.create(bigip, hm)
except HTTPError as err:
if err.response.status_code == 409:
try:
hm_helper.update(bigip, hm)
except Exception as err:
error = f5_ex.MonitorUpdateException(err.message)
LOG.error("Failed to update monitor %s on %s: %s",
hm['name'], bigip, error.message)
else:
error = f5_ex.MonitorCreationException(err.message)
LOG.error("Failed to create monitor %s on %s: %s",
hm['name'], bigip, error.message)
except Exception as err:
error = f5_ex.MonitorCreationException(err.message)
LOG.error("Failed to create monitor %s on %s: %s",
hm['name'], bigip, error.message)
return error
def delete_healthmonitor(self, service, bigips):
# delete health monitor
hm = self.service_adapter.get_healthmonitor(service)
hm_helper = self._get_monitor_helper(service)
error = None
for bigip in bigips:
# after updating pool, delete monitor
try:
hm_helper.delete(
bigip, name=hm["name"], partition=hm["partition"])
except HTTPError as err:
if err.response.status_code != 404:
error = f5_ex.MonitorDeleteException(err.message)
LOG.error("Failed to remove monitor %s from %s: %s",
hm['name'], bigip, error.message)
except Exception as err:
error = f5_ex.MonitorDeleteException(err.message)
LOG.error("Failed to remove monitor %s from %s: %s",
hm['name'], bigip, error.message)
return error
def _delete_member_node(self, loadbalancer, member, bigip):
error = None
svc = {'loadbalancer': loadbalancer,
'member': member}
node = self.service_adapter.get_member_node(svc)
try:
self.node_helper.delete(bigip,
name=urllib.quote(node['name']),
partition=node['partition'])
except HTTPError as err:
# Possilbe error if node is shared with another member.
# If so, ignore the error.
if err.response.status_code == 400:
LOG.debug(str(err))
elif err.response.status_code == 404:
LOG.debug(str(err))
else:
LOG.error("Unexpected node deletion error: %s",
urllib.quote(node['name']))
error = f5_ex.NodeDeleteException(
"Unable to delete node {}".format(
urllib.quote(node['name'])))
return error
def assure_pool_members(self, service, bigips):
pool = self.service_adapter.get_pool(service)
partition = pool["partition"]
loadbalancer = service.get('loadbalancer')
for bigip in bigips:
pool_loaded = True
try:
p = self.pool_helper.load(bigip,
name=pool["name"],
partition=partition)
m = p.members_s.members
except HTTPError as err:
LOG.error("Unabled to load pool %s: %s",
pool["name"], err.message)
pool_loaded = False
for member in service.get('members', list()):
svc = {'loadbalancer': loadbalancer,
'member': member}
if member.get('provisioning_status') == "PENDING_DELETE":
self._delete_member_node(loadbalancer, member, bigip)
continue
bigip_member = self.service_adapter.get_member(svc)
member_exists = pool_loaded and m.exists(
name=urllib.quote(bigip_member["name"]),
partition=partition)
if not member_exists:
member['missing'] = True
def _get_monitor_helper(self, service):
monitor_type = self.service_adapter.get_monitor_type(service)
if monitor_type == "HTTPS":
hm = self.https_mon_helper
elif monitor_type == "TCP":
hm = self.tcp_mon_helper
elif monitor_type == "PING":
hm = self.ping_mon_helper
else:
hm = self.http_mon_helper
return hm
def member_exists(self, service, bigip):
"""Return True if a member exists in a pool.
:param service: Has pool and member name/partition
:param bigip: BIG-IP to get member status from.
:return: Boolean
"""
pool = self.service_adapter.get_pool(service)
member = self.service_adapter.get_member(service)
part = pool["partition"]
try:
p = self.pool_helper.load(bigip,
name=pool["name"],
partition=part)
m = p.members_s.members
if m.exists(name=urllib.quote(member["name"]), partition=part):
return True
except Exception as e:
# log error but continue on
LOG.error("Error checking member exists: %s", e.message)
return False
def get_member_status(self, service, bigip, status_keys):
"""Return status values for a single pool.
Status keys to collect are defined as an array of strings in input
status_keys.
:param service: Has pool and member name/partition
:param bigip: BIG-IP to get member status from.
:param status_keys: Array of strings that define which status keys to
collect.
:return: A dict with key/value pairs for each status defined in
input status_keys.
"""
member_status = {}
pool = self.service_adapter.get_pool(service)
member = self.service_adapter.get_member(service)
part = pool["partition"]
try:
p = self.pool_helper.load(bigip,
name=pool["name"],
partition=part)
m = p.members_s.members
if m.exists(name=urllib.quote(member["name"]), partition=part):
m = m.load(name=urllib.quote(member["name"]), partition=part)
member_status = self.pool_helper.collect_stats(
m, stat_keys=status_keys)
else:
LOG.error("Unable to get member status. "
"Member %s does not exist.", member["name"])
except Exception as e:
# log error but continue on
LOG.error("Error getting member status: %s", e.message)
return member_status
|
{
"content_hash": "da39d40908cbd47fa3583f4e406c951a",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 77,
"avg_line_length": 39.875,
"alnum_prop": 0.5437221580597261,
"repo_name": "F5Networks/f5-openstack-agent",
"id": "505e30ee086d5f9693f13b42398d18016cbb794a",
"size": "12730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "f5_openstack_agent/lbaasv2/drivers/bigip/pool_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2220"
},
{
"name": "Makefile",
"bytes": "853"
},
{
"name": "Python",
"bytes": "1395055"
},
{
"name": "Ruby",
"bytes": "78"
},
{
"name": "Shell",
"bytes": "15836"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="xsrc", parent_name="histogram2d", **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
{
"content_hash": "17bb12cc7b772eaf511d64c31e017001",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 80,
"avg_line_length": 36.083333333333336,
"alnum_prop": 0.6004618937644342,
"repo_name": "plotly/python-api",
"id": "f032112fcffe759cc5895addeb163deef7b29ddc",
"size": "433",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/histogram2d/_xsrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
"""An example of starting https://cloud.google.com/profiler."""
# [START profiler_python_quickstart]
import googlecloudprofiler
def main():
# Profiler initialization. It starts a daemon thread which continuously
# collects and uploads profiles. Best done as early as possible.
try:
googlecloudprofiler.start(
service='hello-profiler',
service_version='1.0.1',
# verbose is the logging level. 0-error, 1-warning, 2-info,
# 3-debug. It defaults to 0 (error) if not set.
verbose=3,
# project_id must be set if not running on GCP.
# project_id='my-project-id',
)
except (ValueError, NotImplementedError) as exc:
print(exc) # Handle errors here
# [END profiler_python_quickstart]
busyloop()
# A loop function which spends 30% CPU time on loop3() and 70% CPU time
# on loop7().
def busyloop():
while True:
loop3()
loop7()
def loop3():
for _ in range(3):
loop()
def loop7():
for _ in range(7):
loop()
def loop():
for _ in range(10000):
pass
if __name__ == '__main__':
main()
|
{
"content_hash": "3f0095960217be7b62dc191c4c421598",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 75,
"avg_line_length": 23.4,
"alnum_prop": 0.5931623931623932,
"repo_name": "GoogleCloudPlatform/python-docs-samples",
"id": "98206a39674eaee2c906ff7bdcd8c361c8441bfc",
"size": "1767",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "profiler/quickstart/main.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "8008"
},
{
"name": "Dockerfile",
"bytes": "62031"
},
{
"name": "HTML",
"bytes": "69878"
},
{
"name": "JavaScript",
"bytes": "26494"
},
{
"name": "Jinja",
"bytes": "1892"
},
{
"name": "Jupyter Notebook",
"bytes": "47951698"
},
{
"name": "Makefile",
"bytes": "932"
},
{
"name": "Procfile",
"bytes": "138"
},
{
"name": "PureBasic",
"bytes": "11115"
},
{
"name": "Python",
"bytes": "5323502"
},
{
"name": "Shell",
"bytes": "78261"
}
],
"symlink_target": ""
}
|
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
import os
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-wallet=w1', '-wallet=w2', '-wallet=w3']]
def run_test(self):
assert_equal(set(self.nodes[0].listwallets()), {"w1", "w2", "w3"})
self.stop_node(0)
# should not initialize if there are duplicate wallets
self.assert_start_raises_init_error(0, ['-wallet=w1', '-wallet=w1'], 'Error loading wallet w1. Duplicate -wallet filename specified.')
# should not initialize if wallet file is a directory
os.mkdir(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w11'))
self.assert_start_raises_init_error(0, ['-wallet=w11'], 'Error loading wallet w11. -wallet filename must be a regular file.')
# should not initialize if one wallet is a copy of another
shutil.copyfile(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w2'),
os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w22'))
self.assert_start_raises_init_error(0, ['-wallet=w2', '-wallet=w22'], 'duplicates fileid')
# should not initialize if wallet file is a symlink
os.symlink(os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w1'), os.path.join(self.options.tmpdir, 'node0', 'regtest', 'w12'))
self.assert_start_raises_init_error(0, ['-wallet=w12'], 'Error loading wallet w12. -wallet filename must be a regular file.')
self.start_node(0, self.extra_args[0])
w1 = self.nodes[0].get_wallet_rpc("w1")
w2 = self.nodes[0].get_wallet_rpc("w2")
w3 = self.nodes[0].get_wallet_rpc("w3")
wallet_bad = self.nodes[0].get_wallet_rpc("bad")
w1.generate(1)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", self.nodes[0].getwalletinfo)
# check w1 wallet balance
w1_info = w1.getwalletinfo()
assert_equal(w1_info['immature_balance'], 125000.00000000)
w1_name = w1_info['walletname']
assert_equal(w1_name, "w1")
# check w2 wallet balance
w2_info = w2.getwalletinfo()
assert_equal(w2_info['immature_balance'], 0)
w2_name = w2_info['walletname']
assert_equal(w2_name, "w2")
w3_name = w3.getwalletinfo()['walletname']
assert_equal(w3_name, "w3")
assert_equal({"w1", "w2", "w3"}, {w1_name, w2_name, w3_name})
w1.generate(101)
assert_equal(w1.getbalance(), 10875000.00000000)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.generate(1)
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], "regtest")
assert_equal(batch[1]["result"]["walletname"], "w1")
if __name__ == '__main__':
MultiWalletTest().main()
|
{
"content_hash": "4bbfe27ce7729389e2eb26fe915bb1fe",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 142,
"avg_line_length": 41.30232558139535,
"alnum_prop": 0.6342905405405406,
"repo_name": "ionomy/ion",
"id": "afaf3686b28cb83c453401fd9886acaf3d2bbd59",
"size": "3761",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/multiwallet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "1392620"
},
{
"name": "C++",
"bytes": "6603677"
},
{
"name": "CMake",
"bytes": "41658"
},
{
"name": "CSS",
"bytes": "44782"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "201267"
},
{
"name": "Makefile",
"bytes": "119240"
},
{
"name": "Objective-C",
"bytes": "13448"
},
{
"name": "Objective-C++",
"bytes": "6627"
},
{
"name": "Python",
"bytes": "1137651"
},
{
"name": "QMake",
"bytes": "26274"
},
{
"name": "Shell",
"bytes": "73927"
}
],
"symlink_target": ""
}
|
import inspect
import bitUtils as bu
# These Tiny classes take in a FA18 processor as an argumnet and
# emulate the memory and register changes that will
# take place given a certain opCode
class OpCode(object):
requiresAddress = False
def action(self, pi, addr):
pass
def after(self, pi, addr):
pass
def run(self, pInstance, addr=None):
self.action(pInstance, addr)
self.after(pInstance, addr)
def minBitCount(self):
if (self.code % 4) != 0:
return 5
else:
return 3
def getBitRep(self, nBits):
assert nBits in [5, 3], "Derp"
if nBits == 3:
assert self.minBitCount() == 3, "Oh noes, 5/3 bit mixup!"
return bu.intToBA(self.code, 5)[2:5]
else:
return bu.intToBA(self.code, 5)
def __repr__(self):
return self.strRep
# Arithmetic, Logic and Register Manipulation
class ARLMOp(OpCode):
def getExecutionTime(self, pInstance):
return 1.5
class MultStepOp(ARLMOp):
# MRG TODO:
# I am very unsure as to the correctness of the implementation of this function
code = 0x10
strRep = "+*"
def action(self, pInstance, addr):
# Fuse together the git rep of T and A
# Signed Multiplicand is in S, unsigned multiplier in A, T=0 at start of a step sequence.
# Uses T:A as a 36-bit shift register with multiplier in A. Does the following:
A = pInstance.A
T = pInstance.T
S = pInstance.S
cb = pInstance.carryBit
if pInstance.isExtendedArithmeticMode():
# 1. If bit A0 is zero, shifts the 36-bit register T:A right one bit arithmetically (T17 is not
# changed and is copied into T16. T0 is copied to A17 and A0 is discarded.)
ta = bu.intToBA(T, 18) + bu.intToBA(A, 18)
else:
# 2. If bit A0 is one, S and T are added as though they had both been extended to be
# 19 bit signed numbers, and the 37-bit concatenation of this sum and A is shifted
# right one bit to replace T:A. Overflow may occur if S and T are both nonzero and
# their signs differ; this can only occur through improper initialization of T.
# This instruction is affected in Extended Arithmetic Mode by including the latched carry in
# the sum in case (2) above, and by latching the carry out from bit 17 of the sum, but
# this is not a particularly useful effect and may be changed in later F18 versions.
# You must arrange that the previously executed instruction has not changed the values of
# S, T or P9. Use nop preceding Multiply Step if necessary to meet this condition.
st_sum = S + T + cb
ta = bu.intToBA(st_sum, 19) + bu.intToBA(A, 18)
# This is the shared right shift as above
ta[1] = ta[0]
ta = ta[1:]
ta.append(False)
# Unpack the shared shift register into the two we store it as
T[:] = bu.baToInt(ta[0:18])
A[:] = bu.baToInt(ta[19:])
class LeftShiftOp(ARLMOp):
code = 0x11
strRep = "2*"
def action(self, pInstance, addr):
pInstance.T[:] = 1 << pInstance.T[:]
class RightShiftOp(ARLMOp):
code = 0x12
strRep = "2/"
def action(self, pInstance, addr):
pInstance.T[:] = 1 >> pInstance.T[:]
class NotOp(ARLMOp):
code = 0x13
strRep = "-"
def action(self, pInstance, addr):
pInstance.T[:] = ~pInstance.T[:]
class PlusOp(ARLMOp):
code = 0x14
strRep = "+"
def action(self, pInstance, addr):
sumOfRegs = pInstance.dataStack.pop() + pInstance.dataStack.pop()
# Include the carry in the sum if necessary
if pInstance.isExtendedArithmeticMode():
sumOfRegs += pInstance.carryBit
# Set the Carry Bit
if pInstance.isExtendedArithmeticMode():
pInstance.carryBit = bu.getBitAsBool(sumOfRegs, 19)
# Push the sum back onto the stack after truncating to a word size
pInstance.dataStack.push(bu.truncateToWord(sumOfRegs))
class AndOp(ARLMOp):
code = 0x15
strRep = "and"
def action(self, pInstance, addr):
op1 = pInstance.dataStack.pop()
op2 = pInstance.dataStack.pop()
pInstance.dataStack.push(op1 & op2)
class XorOp(ARLMOp):
code = 0x16
strRep = "or"
def action(self, pInstance, addr):
op1 = pInstance.dataStack.pop()
op2 = pInstance.dataStack.pop()
pInstance.dataStack.push(op1 ^ op2)
class DropOp(ARLMOp):
code = 0x17
strRep = "drop"
def action(self, pInstance, addr):
# NB: not clear on this one
pInstance.dataStack.pop()
class DupOp(ARLMOp):
code = 0x18
strRep = "dup"
def action(self, pInstance, addr):
pInstance.dataStack.push(pInstance.T)
class PopOp(ARLMOp):
code = 0x19
strRep = "pop"
def action(self, pInstance, addr):
pInstance.dataStack.push(pInstance.returnStack.pop())
class OverOp(ARLMOp):
code = 0x1A
strRep = "over"
def action(self, pInstance, addr):
op1 = pInstance.dataStack.pop()
op2 = pInstance.dataStack.pop()
pInstance.dataStack.push(op2)
pInstance.dataStack.push(op1)
pInstance.dataStack.push(op2)
class AFetchOp(ARLMOp):
code = 0x1B
strRep = "a"
def action(self, pInstance, addr):
pInstance.dataStack.push(pInstance.A)
class NoOp(ARLMOp):
code = 0x1C
strRep = "."
def action(self, pInstance, addr):
pass
class PushOp(ARLMOp):
code = 0x1D
strRep = "push"
def action(self, pInstance, addr):
pInstance.returnStack.push(pInstance.dataStack.pop())
class BStoreOp(ARLMOp):
code = 0x1E
strRep = "b!"
def action(self, pInstance, addr):
pInstance.B[:] = pInstance.dataStack.pop()
class AStoreOp(ARLMOp):
code = 0x1F
strRep = "a!"
def action(self, pInstance, addr):
pInstance.A[:] = pInstance.dataStack.pop()
# Memory, read, write, ops
class MRWOp(OpCode):
def getExecutionTime(pInstance):
# These actully need to inspect the processor and see if there
# Is a matching r/w available. If so, then 5.1 ns, otherwise
# potentially infinite.
return 5.1
def _fetch(pInstance, addr):
pInstance.dataStack.push(pInstance.mem[addr])
class FetchPOp(MRWOp):
code = 0x08
strRep = "@p"
def action(self, pInstance, addr):
_fetch(pInstance, pInstance.P[0])
class FetchAPlus(MRWOp):
code = 0x09
strRep = "@+"
def action(self, pInstance, addr):
_fetch(pInstance, pInstance.A[0])
pInstance.maybeIncrementAddress(pInstance.A)
class FetchBOp(MRWOp):
code = 0x0A
strRep = "@b"
def action(self, pInstance, addr):
_fetch(pInstance, pInstance.B[0])
pInstance.maybeIncrementAddress(pInstance.B)
class FetchAOp(MRWOp):
code = 0x0B
strRep = "@"
def action(self, pInstance, addr):
_fetch(pInstance, pInstance.A[0])
pInstance.maybeIncrementAddress(pInstance.A)
def _store(pInstance, addr):
pInstance.mem[addr] = pInstance.dataStack.pop()
class StorePOp(MRWOp):
code = 0x0C
strRep = "!p"
def action(self, pInstance, addr):
_store(pInstance, pInstance.P[0])
pInstance.maybeIncrementAddress(pInstance.P)
class StorePlusOp(MRWOp):
code = 0x0D
strRep = "!+"
def action(self, pInstance, addr):
_store(pInstance, pInstance.A[0])
pInstance.maybeIncrementAddress(pInstance.A)
class StoreBOp(MRWOp):
code = 0x0E
strRep = "!b"
def action(self, pInstance, addr):
_store(pInstance, pInstance.B[0])
class StoreAOp(MRWOp):
code = 0x0F
strRep = "!"
def action(self, pInstance, addr):
_store(pInstance, pInstance.A[0])
class FCOp(OpCode):
@classmethod
def getExecutionTime(pInstance):
return 5.1
class ReturnOp(FCOp):
code = 0x00
strRep = ";"
def action(self, pInstance, addr):
pInstance.P[:] = pInstance.R[:]
def after(self, pInstance, addr):
pInstance.triggerNextWordLoad()
class ExecuteOp(FCOp):
code = 0x01
strRep = "ex"
def action(self, pInstance, addr):
pInstance.R[:], pInstance.P[:] = pInstance.P[:], pInstance.R[:]
def after(self, pInstance, addr):
pInstance.triggerNextWordLoad()
class JumpOp(FCOp):
code = 0x02
strRep = "name ;"
requiresAddress = True
def action(self, pInstance, addr):
pInstance.P[:] = addr
def after(self, pInstance, addr):
pInstance.triggerNextWordLoad()
class CallOp(FCOp):
code = 0x03
strRep = "name"
requiresAddress = True
def action(self, pInstance, addr):
pInstance.R[:] = pInstance.P[:]
class UnextOp(FCOp):
code = 0x04
strRep = "unext"
def action(self, pInstance, addr):
if pInstance.R[0] == 0:
pInstance.maybeIncrementAddress(pInstance.P)
else:
pInstance.R[0] -= 1
@classmethod
def getExecutionTime(pInstance):
return 2.1
class NextOp(FCOp):
code = 0x05
strRep = "next"
requiresAddress = True
def action(self, pInstance, addr):
if pInstance.R == 0:
pInstance.maybeIncrementAddress(pInstance.P)
else:
pInstance.R -= 1
pInstance.P[:] = addr
pInstance.triggerNextWordLoad()
class IfOp(FCOp):
code = 0x06
strRep = "if"
requiresAddress = True
def action(self, pInstance, address):
if pInstance.T[0] != 0:
pInstance.maybeIncrementAddress(pInstance.P)
else:
pInstance.P[0] = address
pInstance.triggerNextWordLoad()
class UnlessOp(FCOp):
code = 0x07
strRep = "-if"
requiresAddress = True
def action(self, pInstance, address):
if bu.getBitAsBool(pInstance.T[0], 17):
pInstance.maybeIncrementAddress(pInstance.P)
else:
pInstance.P[0] = address
pInstance.triggerNextWordLoad()
# A quick tool for gathering the classes above and instanciating one of each
def gatherOpsOfSubclass(ofClass, namespace=locals()):
opCollection = [op for name, op in namespace.items() if (inspect.isclass(op) and issubclass(op, ofClass))]
# Issubclass is valid for self to self?!?!? (whatevs)
opCollection.remove(ofClass)
# Instanciate all and return
return [op() for op in opCollection]
# Gather the different types
ARLMOpList = gatherOpsOfSubclass(ARLMOp)
FCOpList = gatherOpsOfSubclass(FCOp)
MRWOpList = gatherOpsOfSubclass(MRWOp)
# All operations sorted by code number
allOpList = sorted(ARLMOpList + FCOpList + MRWOpList, cmp=lambda a, b: cmp(a.code, b.code))
# Print some diagnostic schlock
for op in allOpList:
print "0x%02X " % op.code, op.requiresAddress, op.minBitCount(), "--", op.__class__.__name__.ljust(10),
print
|
{
"content_hash": "7c17bce01dfbf2bebb72fa87e08df77c",
"timestamp": "",
"source": "github",
"line_count": 444,
"max_line_length": 110,
"avg_line_length": 24.8018018018018,
"alnum_prop": 0.624318924809299,
"repo_name": "meawoppl/GA144Tools",
"id": "0f16fc2870042beee5f9c3bcc64c7a0df1761ed9",
"size": "11012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FA18A_functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "47950"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('chipyapp', '0006_auto_20151111_0103'),
]
operations = [
migrations.AlterField(
model_name='activeunit',
name='quarter',
field=models.SmallIntegerField(db_index=True),
),
migrations.AlterField(
model_name='activeunit',
name='year',
field=models.SmallIntegerField(db_index=True),
),
migrations.AlterField(
model_name='penetration',
name='quarter',
field=models.SmallIntegerField(db_index=True),
),
migrations.AlterField(
model_name='penetration',
name='year',
field=models.SmallIntegerField(db_index=True),
),
]
|
{
"content_hash": "b7f23725e4c4f0cb1758f37e7b9e285e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 58,
"avg_line_length": 26.848484848484848,
"alnum_prop": 0.5643340857787811,
"repo_name": "jordanbettis/chipy-mentorship",
"id": "ab3a9cc14e33ef4d87a19b1fd1574fb2687d0dc7",
"size": "910",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chipyprj/chipyapp/migrations/0007_auto_20151111_0223.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "47973"
},
{
"name": "HTML",
"bytes": "5483"
},
{
"name": "JavaScript",
"bytes": "1055327"
},
{
"name": "Python",
"bytes": "25967"
}
],
"symlink_target": ""
}
|
import errno
import glob
import os
import sys
import django
import subprocess
from pymongo import ReadPreference, MongoClient
from mongoengine import connect
sys.path.insert(0, os.path.dirname(__file__))
# calculated paths for django and the site
# used as starting points for various other paths
DJANGO_ROOT = os.path.dirname(os.path.realpath(django.__file__))
SITE_ROOT = os.path.dirname(os.path.realpath(__file__))
# Version
CRITS_VERSION = '4-master'
#the following gets the current git hash to be displayed in the footer and
#hides it if it is not a git repo
try:
HIDE_GIT_HASH = False
#get the short hand of current git hash
GIT_HASH = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip()
#get the long hand of the current git hash
GIT_HASH_LONG = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
#get the git branch
GIT_BRANCH = subprocess.check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD']).strip()
except:
#if it is not a git repo, clear out all values and hide them
GIT_HASH = ''
GIT_HASH_LONG = ''
HIDE_GIT_HASH = True
GIT_BRANCH = ''
APPEND_SLASH = True
TEST_RUN = False
# Set to DENY|SAMEORIGIN|ALLOW-FROM uri
# Default: SAMEORIGIN
# More details: https://developer.mozilla.org/en-US/docs/HTTP/X-Frame-Options
#X_FRAME_OPTIONS = 'ALLOW-FROM https://www.example.com'
# Setup for runserver or Apache
if 'runserver' in sys.argv:
DEVEL_INSTANCE = True
SERVICE_MODEL = 'thread'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
LOGIN_URL = "/login/"
elif 'test' in sys.argv:
TEST_RUN = True
DEVEL_INSTANCE = True
SERVICE_MODEL = 'thread'
SESSION_COOKIE_SECURE = False
CSRF_COOKIE_SECURE = False
LOGIN_URL = "/login/"
else:
DEVEL_INSTANCE = False
SERVICE_MODEL = 'process'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
LOGIN_URL = "/login/"
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy'
}
}
# MongoDB Default Configuration
# Tip: To change database settings, override by using
# template from config/database_example.py
MONGO_HOST = 'localhost' # server to connect to
MONGO_PORT = 27017 # port MongoD is running on
MONGO_DATABASE = 'crits' # database name to connect to
MONGO_SSL = False # whether MongoD has SSL enabled
MONGO_USER = '' # username used to authenticate to mongo (normally empty)
MONGO_PASSWORD = '' # password for the mongo user
# File storage backends
S3 = "S3"
GRIDFS = "GRIDFS"
# DB to use for files
FILE_DB = GRIDFS
# S3 buckets
BUCKET_PCAPS = "pcaps"
BUCKET_OBJECTS = "objects"
BUCKET_SAMPLES = "samples"
# Import custom Database config
dbfile = os.path.join(SITE_ROOT, 'config/database.py')
if os.path.exists(dbfile):
execfile(dbfile)
if TEST_RUN:
MONGO_DATABASE = 'crits-unittest'
# Read preference to configure which nodes you can read from
# Possible values:
# primary: queries are sent to the primary node in a replicSet
# secondary: queries are allowed if sent to primary or secondary
# (for single host) or are distributed to secondaries
# if you are connecting through a router
# More info can be found here:
# http://api.mongodb.org/python/current/api/pymongo/index.html
MONGO_READ_PREFERENCE = ReadPreference.PRIMARY
# MongoDB default collections
COL_ACTORS = "actors" # main collection for actors
COL_ACTOR_IDENTIFIERS = "actor_identifiers" # main collection for actor identifiers
COL_ACTOR_THREAT_IDENTIFIERS = "actor_threat_identifiers" # actor threat identifiers
COL_ACTOR_THREAT_TYPES = "actor_threat_types" # actor threat types
COL_ACTOR_MOTIVATIONS = "actor_motivations" # actor motivations
COL_ACTOR_SOPHISTICATIONS = "actor_sophistications" # actor sophistications
COL_ACTOR_INTENDED_EFFECTS = "actor_intended_effects" # actor intended effects
COL_ANALYSIS_RESULTS = "analysis_results" # analysis results
COL_AUDIT_LOG = "audit_log" # audit log entries
COL_BACKDOOR_DETAILS = "backdoor_details" # backdoor information
COL_BUCKET_LISTS = "bucket_lists" # bucketlist information
COL_CAMPAIGNS = "campaigns" # campaigns list
COL_CERTIFICATES = "certificates" # certificates list
COL_COMMENTS = "comments" # comments collection
COL_CONFIG = "config" # config collection
COL_COUNTS = "counts" # general counts for dashboard
COL_DIVISION_DATA = "division_data" # information on divisions within company
COL_DOMAINS = "domains" # root domains with FQDNs and IP information
COL_EFFECTIVE_TLDS = "effective_tlds" # list of effective TLDs from Mozilla to determine root domains
COL_EMAIL = "email" # main email collection
COL_EVENTS = "events" # main events collection
COL_EVENT_TYPES = "event_types" # event types for events
COL_EXPLOIT_DETAILS = "exploit_details" # list of CVE's
COL_EXPLOITS = "exploits" # exploit count generated by MapReduce
COL_FILETYPES = "filetypes" # list of filetypes in system generated by MapReduce
COL_IDB_ACTIONS = "idb_actions" # list of available actions to be taken with indicators
COL_INDICATORS = "indicators" # main indicators collection
COL_INTERNAL_LOCATIONS = "internal_locations" # site locations for company
COL_IPS = "ips" # IPs collection
COL_NOTIFICATIONS = "notifications" # notifications collection
COL_OBJECTS = "objects" # objects that are files that have been added
COL_OBJECT_TYPES = "object_types" # types of objects that can be added
COL_PCAPS = "pcaps" # main pcaps collection
COL_RAW_DATA = "raw_data" # main raw data collection
COL_RAW_DATA_TYPES = "raw_data_types" # list of available raw data types
COL_RELATIONSHIP_TYPES = "relationship_types" # list of available relationship types
COL_SAMPLES = "sample" # main samples collection
COL_SCREENSHOTS = "screenshots" # main screenshots collection
COL_SECTOR_LISTS = "sector_lists" # sector lists information
COL_SECTORS = "sectors" # available sectors
COL_SERVICES = "services" # list of services for scanning
COL_SOURCE_ACCESS = "source_access" # source access ACL collection
COL_SOURCES = "sources" # source information generated by MapReduce
COL_STATISTICS = "statistics" # list of statistics for different objects (campaigns, for example)
COL_TARGETS = "targets" # target information for use in email
COL_USERS = "users" # main users collection
COL_USER_ROLES = "user_roles" # main user roles collection
COL_YARAHITS = "yarahits" # yara hit counts for samples
# MongoDB connection pool
if MONGO_USER:
connect(MONGO_DATABASE, host=MONGO_HOST, port=MONGO_PORT, read_preference=MONGO_READ_PREFERENCE, ssl=MONGO_SSL,
username=MONGO_USER, password=MONGO_PASSWORD)
else:
connect(MONGO_DATABASE, host=MONGO_HOST, port=MONGO_PORT, read_preference=MONGO_READ_PREFERENCE, ssl=MONGO_SSL)
# Get config from DB
c = MongoClient(MONGO_HOST, MONGO_PORT, ssl=MONGO_SSL)
db = c[MONGO_DATABASE]
if MONGO_USER:
db.authenticate(MONGO_USER, MONGO_PASSWORD)
coll = db[COL_CONFIG]
crits_config = coll.find_one({})
if not crits_config:
crits_config = {}
# Populate settings
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
# NOTE: we are setting ALLOWED_HOSTS to ['*'] by default which will work
# everywhere but is insecure for production installations (no less secure
# than setting DEBUG to True). This is done because we can't anticipate
# the host header for every CRITs install and this should work "out of
# the box".
ALLOWED_HOSTS = crits_config.get('allowed_hosts', ['*'])
COMPANY_NAME = crits_config.get('company_name', 'My Company')
CLASSIFICATION = crits_config.get('classification', 'unclassified')
CRITS_EMAIL = crits_config.get('crits_email', '')
CRITS_EMAIL_SUBJECT_TAG = crits_config.get('crits_email_subject_tag', '')
CRITS_EMAIL_END_TAG = crits_config.get('crits_email_end_tag', True)
DEBUG = crits_config.get('debug', True)
if crits_config.get('email_host', None):
EMAIL_HOST = crits_config.get('email_host', None)
if crits_config.get('email_port', None):
EMAIL_PORT = int(crits_config.get('email_port', None))
ENABLE_API = crits_config.get('enable_api', False)
ENABLE_TOASTS = crits_config.get('enable_toasts', False)
GIT_REPO_URL = crits_config.get('git_repo_url', '')
HTTP_PROXY = crits_config.get('http_proxy', None)
INSTANCE_NAME = crits_config.get('instance_name', 'My Instance')
INSTANCE_URL = crits_config.get('instance_url', '')
INVALID_LOGIN_ATTEMPTS = crits_config.get('invalid_login_attempts', 3) - 1
LANGUAGE_CODE = crits_config.get('language_code', 'en-us')
LDAP_AUTH = crits_config.get('ldap_auth', False)
LDAP_SERVER = crits_config.get('ldap_server', '')
LDAP_USERDN = crits_config.get('ldap_userdn', '')
LDAP_USERCN = crits_config.get('ldap_usercn', '')
LOG_DIRECTORY = crits_config.get('log_directory', os.path.join(SITE_ROOT, '..', 'logs'))
LOG_LEVEL = crits_config.get('log_level', 'INFO')
QUERY_CACHING = crits_config.get('query_caching', False)
RAR_PATH = crits_config.get('rar_path', '/usr/bin/unrar')
RT_URL = crits_config.get('rt_url', None)
SECURE_COOKIE = crits_config.get('secure_cookie', True)
SERVICE_DIRS = tuple(crits_config.get('service_dirs', []))
SERVICE_MODEL = crits_config.get('service_model', SERVICE_MODEL)
SERVICE_POOL_SIZE = int(crits_config.get('service_pool_size', 12))
SESSION_TIMEOUT = int(crits_config.get('session_timeout', 12)) * 60 * 60
SPLUNK_SEARCH_URL = crits_config.get('splunk_search_url', None)
TEMP_DIR = crits_config.get('temp_dir', '/tmp')
TIME_ZONE = crits_config.get('timezone', 'America/New_York')
ZIP7_PATH = crits_config.get('zip7_path', '/usr/bin/7za')
REMOTE_USER = crits_config.get('remote_user', False)
PASSWORD_COMPLEXITY_REGEX = crits_config.get('password_complexity_regex', '(?=^.{8,}$)((?=.*\d)|(?=.*\W+))(?![.\n])(?=.*[A-Z])(?=.*[a-z]).*$')
PASSWORD_COMPLEXITY_DESC = crits_config.get('password_complexity_desc', '8 characters, at least 1 capital, 1 lowercase and 1 number/special')
DEPTH_MAX = crits_config.get('depth_max', '10')
TOTAL_MAX = crits_config.get('total_max', '250')
REL_MAX = crits_config.get('rel_max', '50')
TOTP = crits_config.get('totp', False)
COLLECTION_TO_BUCKET_MAPPING = {
COL_PCAPS: BUCKET_PCAPS,
COL_OBJECTS: BUCKET_OBJECTS,
COL_SAMPLES: BUCKET_SAMPLES
}
# check Log Directory
if not os.path.exists(LOG_DIRECTORY):
LOG_DIRECTORY = os.path.join(SITE_ROOT, '..', 'logs')
# Custom settings for Django
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# DATE and DATETIME Formats
DATE_FORMAT = 'Y-m-d'
DATETIME_FORMAT = 'Y-m-d H:i:s.u'
PY_DATE_FORMAT = '%Y-%m-%d'
PY_TIME_FORMAT = '%H:%M:%S.%f'
PY_DATETIME_FORMAT = ' '.join([PY_DATE_FORMAT, PY_TIME_FORMAT])
OLD_PY_DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S'
PY_FORM_DATETIME_FORMATS = [PY_DATETIME_FORMAT, OLD_PY_DATETIME_FORMAT]
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = os.path.join(SITE_ROOT, '../extras/www')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = '/'
STATIC_ROOT = os.path.join(SITE_ROOT, '../extras/www/static')
STATIC_URL = '/static/'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
#'django.template.loaders.eggs.load_template_source',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.core.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'crits.core.views.base_context',
'crits.core.views.collections',
'crits.core.views.user_context',
)
ROOT_URLCONF = 'crits.urls'
TEMPLATE_DIRS = (
os.path.join(SITE_ROOT, '../documentation'),
os.path.join(SITE_ROOT, 'core/templates'),
os.path.join(SITE_ROOT, 'actors/templates'),
os.path.join(SITE_ROOT, 'core/dashboard/templates'),
os.path.join(SITE_ROOT, 'campaigns/templates'),
os.path.join(SITE_ROOT, 'certificates/templates'),
os.path.join(SITE_ROOT, 'comments/templates'),
os.path.join(SITE_ROOT, 'config/templates'),
os.path.join(SITE_ROOT, 'domains/templates'),
os.path.join(SITE_ROOT, 'emails/templates'),
os.path.join(SITE_ROOT, 'events/templates'),
os.path.join(SITE_ROOT, 'indicators/templates'),
os.path.join(SITE_ROOT, 'ips/templates'),
os.path.join(SITE_ROOT, 'objects/templates'),
os.path.join(SITE_ROOT, 'pcaps/templates'),
os.path.join(SITE_ROOT, 'raw_data/templates'),
os.path.join(SITE_ROOT, 'relationships/templates'),
os.path.join(SITE_ROOT, 'samples/templates'),
os.path.join(SITE_ROOT, 'screenshots/templates'),
os.path.join(SITE_ROOT, 'services/templates'),
os.path.join(SITE_ROOT, 'standards/templates'),
os.path.join(SITE_ROOT, 'stats/templates'),
os.path.join(SITE_ROOT, 'targets/templates'),
)
STATICFILES_DIRS = (
os.path.join(SITE_ROOT, 'core/static'),
os.path.join(SITE_ROOT, 'actors/static'),
os.path.join(SITE_ROOT, 'dashboards/static'),
os.path.join(SITE_ROOT, 'campaigns/static'),
os.path.join(SITE_ROOT, 'certificates/static'),
os.path.join(SITE_ROOT, 'comments/static'),
os.path.join(SITE_ROOT, 'domains/static'),
os.path.join(SITE_ROOT, 'emails/static'),
os.path.join(SITE_ROOT, 'events/static'),
os.path.join(SITE_ROOT, 'indicators/static'),
os.path.join(SITE_ROOT, 'ips/static'),
os.path.join(SITE_ROOT, 'objects/static'),
os.path.join(SITE_ROOT, 'pcaps/static'),
os.path.join(SITE_ROOT, 'raw_data/static'),
os.path.join(SITE_ROOT, 'relationships/static'),
os.path.join(SITE_ROOT, 'samples/static'),
os.path.join(SITE_ROOT, 'screenshots/static'),
os.path.join(SITE_ROOT, 'services/static'),
os.path.join(SITE_ROOT, 'config/static'),
os.path.join(SITE_ROOT, 'targets/static'),
)
INSTALLED_APPS = (
'crits.core',
'crits.dashboards',
'django.contrib.auth',
'mongoengine.django.mongo_auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.staticfiles',
'crits.actors',
'crits.campaigns',
'crits.certificates',
'crits.domains',
'crits.emails',
'crits.events',
'crits.indicators',
'crits.ips',
'crits.objects',
'crits.pcaps',
'crits.raw_data',
'crits.relationships',
'crits.samples',
'crits.screenshots',
'crits.services',
'crits.stats',
'crits.targets',
'tastypie',
'tastypie_mongoengine',
)
AUTH_USER_MODEL = 'mongo_auth.MongoUser'
MONGOENGINE_USER_DOCUMENT = 'crits.core.user.CRITsUser'
SESSION_ENGINE = 'mongoengine.django.sessions'
AUTHENTICATION_BACKENDS = (
#'mongoengine.django.auth.MongoEngineBackend',
'crits.core.user.CRITsAuthBackend',
)
if REMOTE_USER:
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.RemoteUserMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
AUTHENTICATION_BACKENDS = (
'crits.core.user.CRITsRemoteUserBackend',
)
# Handle logging after all custom configuration
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'verbose': {
'format': "%(levelname)s %(asctime)s %(name)s %(message)s"
},
},
'handlers': {
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
},
'normal': {
'level': LOG_LEVEL,
'class': 'logging.FileHandler',
'formatter': 'verbose',
'filename': os.path.join(LOG_DIRECTORY, 'crits.log'),
},
},
'loggers': {
'django': {
'handlers': ['null'],
'propagate': True,
'level': 'INFO',
},
'crits': {
'handlers': ['normal'],
'propagate': True,
'level': 'DEBUG',
},
},
}
# Handle creating log directories if they do not exist
for handler in LOGGING['handlers'].values():
log_file = handler.get('filename')
if log_file:
log_dir = os.path.dirname(log_file)
if not os.path.exists(log_dir):
try:
os.makedirs(log_dir)
except OSError as e:
# If file exists
if e.args[0] == errno.EEXIST:
pass
# re-raise on error that is not
# easy to automatically handle, such
# as permission errors
else:
raise
# CRITs Types
CRITS_TYPES = {
'Actor': COL_ACTORS,
'AnalysisResult': COL_ANALYSIS_RESULTS,
'Campaign': COL_CAMPAIGNS,
'Certificate': COL_CERTIFICATES,
'Comment': COL_COMMENTS,
'Domain': COL_DOMAINS,
'Email': COL_EMAIL,
'Event': COL_EVENTS,
'Indicator': COL_INDICATORS,
'IP': COL_IPS,
'Notification': COL_NOTIFICATIONS,
'PCAP': COL_PCAPS,
'RawData': COL_RAW_DATA,
'Sample': COL_SAMPLES,
'Screenshot': COL_SCREENSHOTS,
'Target': COL_TARGETS,
}
# Custom template lists for loading in different places in the UI
SERVICE_NAV_TEMPLATES = ()
SERVICE_CP_TEMPLATES = ()
SERVICE_TAB_TEMPLATES = ()
# discover services
for service_directory in SERVICE_DIRS:
if os.path.isdir(service_directory):
sys.path.insert(0, service_directory)
for d in os.listdir(service_directory):
abs_path = os.path.join(service_directory, d, 'templates')
if os.path.isdir(abs_path):
TEMPLATE_DIRS = TEMPLATE_DIRS + (abs_path,)
nav_items = os.path.join(abs_path, '%s_nav_items.html' % d)
cp_items = os.path.join(abs_path, '%s_cp_items.html' % d)
if os.path.isfile(nav_items):
SERVICE_NAV_TEMPLATES = SERVICE_NAV_TEMPLATES + ('%s_nav_items.html' % d,)
if os.path.isfile(cp_items):
SERVICE_CP_TEMPLATES = SERVICE_CP_TEMPLATES + ('%s_cp_items.html' % d,)
for tab_temp in glob.glob('%s/*_tab.html' % abs_path):
head, tail = os.path.split(tab_temp)
ctype = tail.split('_')[-2]
name = "_".join(tail.split('_')[:-2])
SERVICE_TAB_TEMPLATES = SERVICE_TAB_TEMPLATES + ((ctype, name, tail),)
# Allow configuration of the META or HEADER variable is used to find
# remote username when REMOTE_USER is enabled.
REMOTE_USER_META = 'REMOTE_USER'
# The next example could be used for reverse proxy setups
# where your frontend might pass Remote-User: header.
#
# WARNING: If you enable this, be 100% certain your backend is not
# directly accessible and this header could be spoofed by an attacker.
#
# REMOTE_USER_META = 'HTTP_REMOTE_USER'
# Import custom settings if it exists
csfile = os.path.join(SITE_ROOT, 'config/overrides.py')
if os.path.exists(csfile):
execfile(csfile)
|
{
"content_hash": "0fcc40ea2c0d5e1491f51f0e7b869f7c",
"timestamp": "",
"source": "github",
"line_count": 528,
"max_line_length": 142,
"avg_line_length": 41.67045454545455,
"alnum_prop": 0.6233524225070448,
"repo_name": "seanthegeek/crits",
"id": "05325fcbd5388dada1dd764d2e869bae96b4077e",
"size": "22031",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crits/settings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "8694"
},
{
"name": "CSS",
"bytes": "360810"
},
{
"name": "HTML",
"bytes": "447412"
},
{
"name": "JavaScript",
"bytes": "2013634"
},
{
"name": "Perl",
"bytes": "916"
},
{
"name": "Prolog",
"bytes": "948"
},
{
"name": "Python",
"bytes": "1908131"
},
{
"name": "Shell",
"bytes": "10293"
}
],
"symlink_target": ""
}
|
import io
import unittest
from doctrine.code import Code
from doctrine.code import Analyzer
TEST_CODE = u'''class CodeLayout(TextLayout):
"""This is a docstring"""
tab_width = 8
def calculate_text_segments(self, text, width, wrap):
"""
Calculate the segments of text to display given width screen
columns to display them.
text - unicode text or byte string to display
width - number of available screen columns
wrap - wrapping mode used
Returns a layout structure without alignment applied.
"""
# TODO: This function is a horror and a mess, and really hard to
# understand. It's based on urwids StandardLayout, which by itself
# is overly complex, and I added tab handling, which made it worse.
# It's a prime candidate for refacturing, making easier to understand
# and as it is heavily used, profiling would be nice too.
nl, nl_o, sp_o, tab_o = "\n", "\n", " ", "\t"
if PYTHON3 and isinstance(text, bytes):
nl = B(nl) # can only find bytes in python3 bytestrings
nl_o = ord(nl_o) # + an item of a bytestring is the ordinal value
sp_o = ord(sp_o)
tab_o = ord(tab_o)
b = []
p = 0
if wrap == 'clip':
# no wrapping to calculate, so it's easy.
while p<=len(text):
n_cr = find_newline(text, p)
if p == n_cr:
# A line with no characters.
l.append((0, n_cr))
continue
# force any char wrap
if l:
b.append(l)
p += pt
return b
'''
# We need a "dummy" analyzer for testing.
class PythonTestAnalyzer(Analyzer):
def find_block(self, start_row, max_block):
# This dummy Python analyzer looks for multiline strings
lines = []
multiline = None
for row in range(start_row, start_row + max_block):
pos = 0
line = self.code[row]
lines.append(line)
l = len(line)
while pos < l:
npos = line.find('"""', pos)
if npos != -1:
# A multiline string?
if multiline is None: # Yes, the start!
multiline = '"""'
elif multiline == '"""': # Yes, the end!
multiline = None
pos = npos + 3
continue
npos = line.find("'''", pos)
if npos != -1:
# A multiline string?
if multiline is None: # Yes, the start!
multiline = "'''"
elif multiline == "'''": # Yes, the end!
multiline = None
pos = npos + 3
continue
# Neither were found.
break
# We reached the end of the line.
if multiline is None:
# We are not in a multiline string at the moment:
return lines
# else we read the next row and see if the multiline string ends.
# And we now reached the maximum lenght we are willing to read
# before giving up
return lines
class TestAnalysis(unittest.TestCase):
def test_blocks(self):
# One of the primary tasks of the analyzer is to find blocks
# of code that have valid syntax and can be highlighted.
f = io.StringIO(TEST_CODE)
c = Code(f)
a = PythonTestAnalyzer(c)
self.assertEqual(a.find_block(0, 10),
['class CodeLayout(TextLayout):\n'])
self.assertEqual(a.find_block(1, 10),
[' """This is a docstring"""\n'])
self.assertEqual(a.find_block(6, 10),
[' """\n',
' Calculate the segments of text to display '
'given width screen\n',
' columns to display them.\n',
'\n',
' text - unicode text or byte string to '
'display\n',
' width - number of available screen columns'
'\n',
' wrap - wrapping mode used\n',
'\n',
' Returns a layout structure without '
'alignment applied.\n',
' """\n'])
self.assertEqual(a.find_block(6, 3),
[' """\n',
' Calculate the segments of text to display '
'given width screen\n',
' columns to display them.\n'])
|
{
"content_hash": "05cb8a104b34584aaf2c63311f12e392",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 78,
"avg_line_length": 35.177304964539005,
"alnum_prop": 0.4659274193548387,
"repo_name": "regebro/doctrine.code",
"id": "6b31b15a6656dbeb1c491ef25dfc096c3e13459e",
"size": "4984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_analysis.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19069"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
class Dict( dict ):
def __init__( self, creator ):
self._creator = creator
def __missing__( self, key ):
value = self[ key ] = self._creator( key )
return value
|
{
"content_hash": "46bd2b9c3b7b42226d65f4e2bd9984ec",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 50,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.6271676300578035,
"repo_name": "davits/DyeVim",
"id": "6f6083305ae8bd7ac62a3f71fd2f77a302c672b3",
"size": "1491",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/dye/utils/dict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55158"
},
{
"name": "Vim script",
"bytes": "21006"
}
],
"symlink_target": ""
}
|
"""
setup.py
~~~~~~~~
on package install:
- generates metadata
- installs json files for use in m2g_cloud
- installs `m2g` script keywords to the command line
- ensures python version
- installs m2g dependencies
Use `pip install .` to install the package.
Use `pip install -e .` to install the package in developer mode.
See our README for more details on package installation : https://github.com/neurodata/m2g/blob/staging/README.md
"""
from setuptools import setup, find_packages
from m2g import __version__
# initial setup
kwargs = {}
# add metadata
kwargs.update(
dict(
name="m2g",
version=__version__,
description="Neuro Data MRI to Graphs Pipeline",
author="Derek Pisner, Alex Loftus, Greg Kiar, Eric Bridgeford, and Will Gray Roncal",
author_email="dpisner@utexas.edu, aloftus2@jhu.edu, gkiar@jhu.edu, wgr@jhu.edu, ebridge2@jhu.edu",
url="https://github.com/neurodata/m2g",
download_url="https://github.com/neurodata/m2g/tarball/" + __version__,
keywords=["connectome", "mri", "pipeline"],
classifiers=["Programming Language :: Python :: 3.6"],
)
)
# add utility info
kwargs.update(
dict(
packages=find_packages(),
package_data={"templates": ["*.json"]},
include_package_data=False, # only include the m2g_cloud template jsons
entry_points={
"console_scripts": [
"m2g=m2g.scripts.m2g_bids:main",
"m2g_dwi_pipeline=m2g.scripts.m2g_dwi_pipeline:main",
"m2g_cloud=m2g.scripts.m2g_cloud:main",
"m2g_bids=m2g.scripts.m2g_bids:main", # for backwards compatibility
]
},
python_requires=">=3.6",
)
)
# add requirements
kwargs.update(
dict(
install_requires=[
"nibabel",
"numpy",
"dipy>=1.0.0",
"scipy",
"boto3",
"awscli",
"matplotlib",
"nilearn",
"vtk",
"pyvtk",
"fury==0.5.1",
"requests",
"plotly",
"pybids>=0.9.0",
"scikit-image",
"networkx>=2.4",
"configparser>=3.7.4",
"pytest",
]
)
)
# run setup
setup(**kwargs)
|
{
"content_hash": "e580a5c047c4e2e16181aa9be8817bdf",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 113,
"avg_line_length": 27.602409638554217,
"alnum_prop": 0.5665648188563945,
"repo_name": "neurodata/ndmg",
"id": "9ab5d0a544d48f82626b1da774ac201115deded8",
"size": "2313",
"binary": false,
"copies": "1",
"ref": "refs/heads/deploy",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3945"
},
{
"name": "Jupyter Notebook",
"bytes": "127234"
},
{
"name": "Python",
"bytes": "242183"
}
],
"symlink_target": ""
}
|
"""
API for the command-line I{pyflakes} tool.
"""
from __future__ import with_statement
import sys
import os
import _ast
from pyflakes import checker, __version__
from pyflakes import reporter as modReporter
__all__ = ['check', 'checkPath', 'checkRecursive', 'iterSourceCode', 'main']
def check(codeString, filename, reporter=None):
"""
Check the Python source given by C{codeString} for flakes.
@param codeString: The Python source to check.
@type codeString: C{str}
@param filename: The name of the file the source came from, used to report
errors.
@type filename: C{str}
@param reporter: A L{Reporter} instance, where errors and warnings will be
reported.
@return: The number of warnings emitted.
@rtype: C{int}
"""
if reporter is None:
reporter = modReporter._makeDefaultReporter()
# First, compile into an AST and handle syntax errors.
try:
tree = compile(codeString, filename, "exec", _ast.PyCF_ONLY_AST)
except SyntaxError:
value = sys.exc_info()[1]
msg = value.args[0]
(lineno, offset, text) = value.lineno, value.offset, value.text
if checker.PYPY:
if text is None:
lines = codeString.splitlines()
if len(lines) >= lineno:
text = lines[lineno - 1]
if sys.version_info >= (3, ) and isinstance(text, bytes):
try:
text = text.decode('ascii')
except UnicodeDecodeError:
text = None
offset -= 1
# If there's an encoding problem with the file, the text is None.
if text is None:
# Avoid using msg, since for the only known case, it contains a
# bogus message that claims the encoding the file declared was
# unknown.
reporter.unexpectedError(filename, 'problem decoding source')
else:
reporter.syntaxError(filename, msg, lineno, offset, text)
return 1
except Exception:
reporter.unexpectedError(filename, 'problem decoding source')
return 1
# Okay, it's syntactically valid. Now check it.
w = checker.Checker(tree, filename)
w.messages.sort(key=lambda m: m.lineno)
for warning in w.messages:
reporter.flake(warning)
return len(w.messages)
def checkPath(filename, reporter=None):
"""
Check the given path, printing out any warnings detected.
@param reporter: A L{Reporter} instance, where errors and warnings will be
reported.
@return: the number of warnings printed
"""
if reporter is None:
reporter = modReporter._makeDefaultReporter()
try:
# in Python 2.6, compile() will choke on \r\n line endings. In later
# versions of python it's smarter, and we want binary mode to give
# compile() the best opportunity to do the right thing WRT text
# encodings.
if sys.version_info < (2, 7):
mode = 'rU'
else:
mode = 'rb'
with open(filename, mode) as f:
codestr = f.read()
if sys.version_info < (2, 7):
codestr += '\n' # Work around for Python <= 2.6
except UnicodeError:
reporter.unexpectedError(filename, 'problem decoding source')
return 1
except IOError:
msg = sys.exc_info()[1]
reporter.unexpectedError(filename, msg.args[1])
return 1
return check(codestr, filename, reporter)
def iterSourceCode(paths):
"""
Iterate over all Python source files in C{paths}.
@param paths: A list of paths. Directories will be recursed into and
any .py files found will be yielded. Any non-directories will be
yielded as-is.
"""
for path in paths:
if os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for filename in filenames:
if filename.endswith('.py'):
yield os.path.join(dirpath, filename)
else:
yield path
def checkRecursive(paths, reporter):
"""
Recursively check all source files in C{paths}.
@param paths: A list of paths to Python source files and directories
containing Python source files.
@param reporter: A L{Reporter} where all of the warnings and errors
will be reported to.
@return: The number of warnings found.
"""
warnings = 0
for sourcePath in iterSourceCode(paths):
warnings += checkPath(sourcePath, reporter)
return warnings
def _exitOnSignal(sigName, message):
"""Handles a signal with sys.exit.
Some of these signals (SIGPIPE, for example) don't exist or are invalid on
Windows. So, ignore errors that might arise.
"""
import signal
try:
sigNumber = getattr(signal, sigName)
except AttributeError:
# the signal constants defined in the signal module are defined by
# whether the C library supports them or not. So, SIGPIPE might not
# even be defined.
return
def handler(sig, f):
sys.exit(message)
try:
signal.signal(sigNumber, handler)
except ValueError:
# It's also possible the signal is defined, but then it's invalid. In
# this case, signal.signal raises ValueError.
pass
def main(prog=None, args=None):
"""Entry point for the script "pyflakes"."""
import optparse
# Handle "Keyboard Interrupt" and "Broken pipe" gracefully
_exitOnSignal('SIGINT', '... stopped')
_exitOnSignal('SIGPIPE', 1)
parser = optparse.OptionParser(prog=prog, version=__version__)
(__, args) = parser.parse_args(args=args)
reporter = modReporter._makeDefaultReporter()
if args:
warnings = checkRecursive(args, reporter)
else:
warnings = check(sys.stdin.read(), '<stdin>', reporter)
raise SystemExit(warnings > 0)
|
{
"content_hash": "50698ee57e09f4f098258d4ef47224c0",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 78,
"avg_line_length": 32.11764705882353,
"alnum_prop": 0.6165501165501166,
"repo_name": "vicky2135/lucious",
"id": "a535bff1289ea7867616f5249bb5c324eb3ee62e",
"size": "6006",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "oscar/lib/python2.7/site-packages/pyflakes/api.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "896683"
},
{
"name": "C++",
"bytes": "52230"
},
{
"name": "CSS",
"bytes": "1169533"
},
{
"name": "HTML",
"bytes": "1104983"
},
{
"name": "JavaScript",
"bytes": "1055140"
},
{
"name": "Makefile",
"bytes": "145238"
},
{
"name": "Python",
"bytes": "55993261"
},
{
"name": "Shell",
"bytes": "40487"
}
],
"symlink_target": ""
}
|
from django.apps import AppConfig
class ExpensesConfig(AppConfig):
name = 'expenses'
verbose_name = 'Despesas'
|
{
"content_hash": "618c692de390b28333dbf96b55654ca2",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 33,
"avg_line_length": 20.166666666666668,
"alnum_prop": 0.7272727272727273,
"repo_name": "Lucasfeelix/ong-joao-de-barro",
"id": "05ca574e17c855c0d1331e23df4962960c74c1ce",
"size": "121",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expenses/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "150085"
},
{
"name": "HTML",
"bytes": "33677"
},
{
"name": "JavaScript",
"bytes": "3977"
},
{
"name": "Python",
"bytes": "37975"
}
],
"symlink_target": ""
}
|
"""
Spyder Editor
"""
import pandas as pd
from latlon import *
import re
from bs4 import BeautifulSoup
import csv
import sys
import requests
def scrape_birthdays_page(url=None, csv_name=None):
""" Scrape info from nobelprize.org birthdays page
Scrapes info from the birthdays page at:
http://www.nobelprize.org/nobel_prizes/lists/birthdays.html?day=0&month=0&year=&photo=1#birthday_result
Also scrapes each bio_page of each winner for more info.
Writes to csv: thumbnail pic URL,
bio url link,
name
Year Prize Won
Nobel Prize field
Year Born
Year Died
Name again (sync check)
Born City
Died City (if applicable)
Affiliation at time of award
Args:
url: HTML url to nobelprize.org birthdays page
csv_out_name: String with name of csv file name to write to
Returns:
Write csv file to name specified in csv_out_name
"""
r = requests.get(url)
soup = BeautifulSoup(r.text, from_encoding=r.encoding)
each_entry_divs = soup.find_all("div", attrs={"class":"row", "style": "margin-bottom: 15px;"})
each_entry_divs.pop(0)
f = csv.writer(open(csv_name, "wb"))
f.writerow(["name", "bio_thumbnail", "bio_link", "year_won",
"nobel_field", "year_born", "year_died", "name_check",
"born_city", "died_city", "location_at_award"])
for person in each_entry_divs:
bio_thumbnail = person.find("img")['src']
bio_link = person.find(class_='text_link')['href']
nobel_info = person.find_all(class_="medium-10 columns birthdays-result-main")[0].text.split('\n')
year_won = nobel_info[0].split(",")[0]
nobel_field = nobel_info[0].split(",")[1]
# Get rid of extra spaces between some words.
## TODO; uncomment later to redo all scrapes.
nobel_field = " ".join([x.strip() for x in nobel_field.split()])
name = nobel_info[1]
year_born = nobel_info[2].split(":")[1]
try:
year_died = nobel_info[3].split(":")[1]
except IndexError as e:
year_died = ""
bio_link_full = "http://www.nobelprize.org/" + bio_link
name_check, born_city, died_city, affiliation = scrape_bio_page(bio_link_full)
f.writerow([name, bio_thumbnail, bio_link, year_won,
nobel_field, year_born, year_died, name_check,
born_city, died_city, affiliation])
def scrape_bio_page(url=None):
'''Scrape Novel prize winner bio page for info.
Scrapes info from nobelprize.org bio-pages.
Info includes: name,
born_location,
died_location,
affiliation at time of award/ country of residence
Args:
url: Nobelprize.org Bio page to scrape.
Returns:
Four string (may be empty if not present): name, born_location,
died_location, institution
'''
r = requests.get(url)
soup = BeautifulSoup(r.text, from_encoding=r.encoding)
name = soup.find_all(attrs={'itemprop': 'Name'})[0].text
# Find the birthdate node, get its parent, then get the last string in the
# contents which has the city.
born_city = soup.find_all(attrs={'itemprop': 'birthDate'})[0].parent.contents[-1]
try:
death_city = soup.find_all(attrs={'itemprop': 'deathDate'})[0].parent.contents[-1]
except IndexError as e:
death_city = ""
affiliation = "None"
try:
# Peace/Literature Prizes generally have residences at time of award
# but no institution.
residence = soup.find_all('strong', text='Residence at the time of the award:')[0].parent.contents[-1]
affiliation = "None, " + residence
except IndexError as e:
pass
try:
# Overwrite None or Country of Residence with city affiliation if avail.
affiliation = soup.find_all(attrs={'itemprop': 'affiliation'})[0].contents[-1]
except IndexError as e:
pass
return name, born_city, death_city, affiliation
#def find_country_birth(bs4_html):
# all_names = [["name","birth_country_old_name",
# "birth_country_current_name",
# "year","field"]]
# place_acq = ""
# for i in bs4_html:
# # Only place acquired entries have an 'h3' sub-class
# if i.find_all('h3'):
# place_acq = i.h3.text
# # Only field_year/name entries have an 'h6' sub-class.
# if i.find_all('h6'):
# field_year = i.a.text
# name = i.h6.text
# year, field = grab_field_and_number(field_year)
# old_country_name, new_country_name = separate_old_country_names(place_acq)
#
# all_names.append([name.encode('utf-8').strip(),
# old_country_name.encode('utf-8').strip(),
# new_country_name.encode('utf-8').strip(),
# year.encode('utf-8').strip(),
# field.encode('utf-8').strip()])
#
# return df_from_lists(all_names, header_included=True)
def find_age(bs4_html):
all_names = [["name", "age"]]
# place_acq = ""
for i in bs4_html[6].find_all(['h3', 'h6']):
if "Age" in i.string:
age = i.string.split()[-1]
if "Age" not in i.string:
name = i.string
all_names.append([name.encode('utf-8'), age.encode('utf-8')])
return df_from_lists(all_names, header_included=True)
def grab_city_state(city_state, country):
'''
>>> grab_city_state(["Cardio-Pulmonary Laboratory", "Bellevue Hospital", "New York", "NY"], 'USA')
('New York', 'NY', 'Cardio-Pulmonary Laboratory, Bellevue Hospital')
>>> grab_city_state(["Bellevue Hospital", "New York", "NY"], 'USA')
('New York', 'NY', 'Bellevue Hospital')
>>> grab_city_state(['New York', 'NY'], 'USA')
('New York', 'NY', '')
>>> grab_city_state(['New York'], 'USA')
('New York', '', '')
'''
city = ""
state = ""
other = ""
if len(city_state) == 1:
city = city_state.pop()
elif len(city_state) > 1:
if country == "USA":
state = city_state.pop()
city = city_state.pop()
else:
city = city_state.pop()
# Handle a problem case of ';' in Altenberg; Grünau im Almtal
city = city.split(';')[0]
other = ", ".join(city_state)
return city.strip(), state.strip(), other.strip()
def grab_inst_country_citystate(location):
'''
>>> grab_inst_country_citystate("Edinburgh University, Edinburgh, United Kingdom")
('Edinburgh University', 'Edinburgh, United Kingdom')
>>> grab_inst_country_citystate("Fred Hutchinson Cancer Research Center, Seattle, WA, USA")
('Fred Hutchinson Cancer Research Center', 'Seattle, WA, USA')
>>> grab_inst_country_citystate("Columbia University Division, Cardio-Pulmonary Laboratory, Bellevue Hospital, New York, NY, USA")
('Bellevue Hospital', 'New York, NY, USA')
>>> grab_inst_country_citystate('Strasbourg University, Strasbourg, Alsace (then Germany, now France)')
('Strasbourg University', 'Strasbourg, France')
'''
# Handle corner case.
location = location.replace('then Germany, ', '')
# Handle record with missing data.
if location == 'Howard Hughes Medical Institute, , ':
print location
location = 'Howard Hughes Medical Institute, Chevy Chase, MD, USA'
# Many locations end with HHMI, while still having other locations.
if location[-33:] == ', Howard Hughes Medical Institute':
location = location[0:-33]
pieces = location.split(",")
pieces = [each.strip() for each in pieces]
# Many strings have two associated universities
# Some strings have 2 locations in them. Handle these differently.
# Using only the second location.
if len(pieces) >= 6:
# If USA is present, there may will be a state.
if "USA" == pieces[-1]:
institution = pieces[-4]
city = pieces[-3]
state = pieces[-2]
country = pieces[-1]
extra_loc = ""
else:
institution = pieces[-3]
city = pieces[-2]
state = ""
country = pieces[-1]
extra_loc = ""
else:
# Otherwise, process differently
institution = pieces[0]
country = pieces[-1]
city_state = pieces[1:-1]
city, state, extra_loc = grab_city_state(city_state, country)
# Fix problem records for Google map api lookup.
if country == "USSR":
country = "Russia"
if country == "Czechoslovakia":
country = "Czech Republic"
# Don't use any 'extra location' info for now.
# institution = ', '.join(filter(None, [institution, extra_loc]))
location = ', '.join(filter(None, [city, state, country]))
location = get_current_loc(location)
return institution, location
def separate_old_country_names(country):
"""Return old and new country if applicable.
Given a string with two country names, returns the old and new names.
Args:
country: string containing country name. May have old and new names.
Returns:
string of old country name and string of current country name.
*If the country name had not changed, returns same name for both*
>>> separate_old_country_names(' Alsace (now France)')
('Alsace', 'France')
"""
old = ""
new = ""
# if " (now " in country:
# old_and_new = country.split(' (now ')
if "now " in country:
split_string = re.search('\(.*now ', country).group(0)
old_and_new = country.split(split_string)
old = old_and_new[0]
new = old_and_new[1][:-1]
else:
old = country
new = country
return old.strip(), new.strip()
def get_current_loc(location_string):
'''Returns string of updated location.
Pulls out the updated location (now newLocation) from the location
to pass to Google Maps api for lon/lat coordinates.
Args:
location_string: String with location, with possible current updates.
Returns:
string of the updated location only.
'''
location = []
if '(now ' in location_string:
temp = location_string.split(',')
location = []
for word in temp:
if "(now " in word:
word = word.split('(now ')[1].strip(')')
location.append(word)
else:
# If (now not present, just return the original string)
return location_string
return ", ".join(word for word in location)
def map_field(x):
if x == 'The Nobel Prize in Literature':
return "literature"
elif x == 'The Nobel Prize in Chemistry':
return "chemistry"
elif x == 'The Nobel Prize in Physics':
return "physics"
elif x == 'The Nobel Prize in Physiology or Medicine':
return "physiology"
elif x == 'The Sveriges Riksbank Prize in Economic Sciences in Memory of Alfred Nobel':
return "economics"
elif x == 'The Nobel Peace Prize':
return "peace"
def df_from_lists(lists, header_included=True):
"""Makes pandas dataframe from list of lists.
"""
# Mutating global copy of list? Make a copy here.
inside_lists = lists[:]
headers = None
if header_included:
headers = inside_lists.pop(0)
df = pd.DataFrame(inside_lists, columns=headers)
return df
if __name__ == "__main__":
import doctest
doctest.testmod()
|
{
"content_hash": "9ba7edadb20217886524d6b8c1931b3e",
"timestamp": "",
"source": "github",
"line_count": 365,
"max_line_length": 134,
"avg_line_length": 33.104109589041094,
"alnum_prop": 0.570222626831085,
"repo_name": "FCH808/FCH808.github.io",
"id": "998a0caafa4661bb99fc464c1d44e6c60b0ee49f",
"size": "12108",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Data Visualization/Project/wrangle/wrangle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18836"
},
{
"name": "HTML",
"bytes": "11374097"
},
{
"name": "JavaScript",
"bytes": "9599"
},
{
"name": "Python",
"bytes": "190318"
}
],
"symlink_target": ""
}
|
from flask import Flask
import pandas as pd
import json
from sqlalchemy import create_engine
from sqlalchemy import text
from sqlalchemy import inspect
def object_as_dict(obj):
return {c.key: getattr(obj, c.key)
for c in inspect(obj).mapper.column_attrs}
engine = create_engine('sqlite:///nba.db')
def run_query(query):
sql = text(query)
result = engine.execute(sql)
rows = []
for row in result:
d = {}
for column in row.keys():
d[column] = row[column]
rows.append(d)
return rows
app = Flask(__name__)
@app.route('/query/<query_str>')
def query(query_str):
rows = run_query(query_str)
return json.dumps(rows)
@app.route('/')
def load():
return app.send_static_file('index.html')
if __name__ == "__main__":
app.run()
|
{
"content_hash": "925f136b6003ab88947d037bafc4c965",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 52,
"avg_line_length": 20.473684210526315,
"alnum_prop": 0.6529562982005142,
"repo_name": "jk2227/nba-hack",
"id": "6c6fb234f5d138bd2f4dc0d40050ca16e240db3b",
"size": "778",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1638"
},
{
"name": "HTML",
"bytes": "11948"
},
{
"name": "JavaScript",
"bytes": "12678"
},
{
"name": "Python",
"bytes": "5887"
}
],
"symlink_target": ""
}
|
from .views import *
from django.utils import timezone
from weather import get_geo_lookup
import kronos
@kronos.register('* * * * *')
def update_log():
accounts = Account.objects.all()
for account in accounts:
status = account.get_status()
if not sum(status['stations']):
account.set_manual(False)
for idx, station in enumerate(status['stations']):
try:
mystation = Station.objects.get(number=idx+1, account=account)
except Station.DoesNotExist:
continue
try:
running = WaterLog.objects.get(end_time__isnull=True, account=account, station=mystation)
except WaterLog.DoesNotExist:
running = False
except WaterLog.MultipleObjectsReturned:
raise
if station and not running:
#Create Log
WaterLog.objects.create(account=account, station=mystation, start_time=timezone.now())
elif station is False and running:
#End log
running.end_time = timezone.now()
running.save()
@kronos.register('0 1 * * *')
def pull_data():
six_am = datetime.time(6)
eight_am = datetime.time(8)
half_hour = datetime.time(0, 30)
five_minutes = datetime.time(0, 5)
accounts = Account.objects.all()
for account in accounts:
if account.zip_code and not account.city or not account.state:
data = get_geo_lookup(account)
temp_city = data["location"]["city"].replace('-', '_')
temp_city = temp_city.replace(' ', '_')
account.city = temp_city
account.state = data["location"]["state"]
account.save()
forecasts = ForecastWeather.objects.fetch(account)
for forecast in forecasts:
forecast.save()
try:
schedule = Schedule.objects.filter(account=account)[0]
except IndexError:
schedule = Schedule.objects.create(account=account, name="Primary", start_time=six_am, end_time=eight_am,
repeat=half_hour, run_time=five_minutes)
schedule.check_schedule(forecasts)
|
{
"content_hash": "806f7e6fd1c2d49c9e0ab659e5370b6a",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 117,
"avg_line_length": 37.91525423728814,
"alnum_prop": 0.581135449262405,
"repo_name": "emmceemoore/ospi-website",
"id": "288765c85ff478f24904c1a3a127351615c0e11e",
"size": "2237",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/ospi/cron.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "63"
},
{
"name": "JavaScript",
"bytes": "0"
},
{
"name": "Python",
"bytes": "63527"
},
{
"name": "Shell",
"bytes": "3073"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
from dewey import VERSION
ROOT_DIR = os.path.dirname(__file__)
SOURCE_DIR = os.path.join(ROOT_DIR)
with open('requirements.txt') as f:
requirements = f.read().splitlines()
setup(
name="dewey",
description="A helpful CLI friend",
author="GreenKahuna",
author_email="steven@greenkahuna.com",
url="https://github.com/greenkahuna/dewey",
version=VERSION,
packages=find_packages(),
install_requires=requirements,
include_package_data=True,
entry_points = {
"console_scripts": [
"hey_dewey = dewey.hey_dewey:main",
],
},
)
|
{
"content_hash": "d1283385ec4f5b4bfef94e55c09e7372",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 47,
"avg_line_length": 24.96153846153846,
"alnum_prop": 0.6563944530046225,
"repo_name": "skoczen/dewey",
"id": "aadb6be831ab942d326029231a451a302e6ad3a3",
"size": "670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13306"
},
{
"name": "Shell",
"bytes": "2753"
}
],
"symlink_target": ""
}
|
from pysys.constants import *
from xpybuild.xpybuild_basetest import XpybuildBaseTest
import shutil, re, os
class PySysTest(XpybuildBaseTest):
def execute(self):
# build only the target we're interested in - this ensures the dependencies are correct
self.xpybuild(args=['${OUTPUT_DIR}/CopyFromTargetsWithinDir/', '${OUTPUT_DIR}/CopyFromTargetsWithinDir-FindPaths/'])
def validate(self):
self.assertPathExists('build-output/CopyFromTargetsWithinDir-FindPaths/foo1.txt')
self.assertPathExists('build-output/CopyFromTargetsWithinDir-FindPaths/dirA/dirB/foo2.txt')
self.assertPathExists('build-output/CopyFromTargetsWithinDir-FindPaths/dirC/foo3.txt', exists=False)
# without FindPaths it should succeed and be created, but there won't be anything in it because
# this pathset resolves to just the parent dir - which is necessary for FindPaths to work, and also
# for C++ include directories
self.assertThat('[] == os.listdir(%s)', repr(self.output+'/build-output/CopyFromTargetsWithinDir/'))
|
{
"content_hash": "6f4db61fc98ab3112c38b0a5921f7150",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 118,
"avg_line_length": 56.388888888888886,
"alnum_prop": 0.7832512315270936,
"repo_name": "xpybuild/xpybuild",
"id": "5d850890a7a77a0265ff622e861dae765fa045f9",
"size": "1015",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/correctness/framework/PathSet_TargetsWithinDir/run.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "96"
},
{
"name": "C",
"bytes": "610"
},
{
"name": "C++",
"bytes": "872"
},
{
"name": "Dockerfile",
"bytes": "55"
},
{
"name": "Java",
"bytes": "423"
},
{
"name": "Python",
"bytes": "688439"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
import os
TIME_SEQUENCE_LENGTH = 30
def rolling_average(df):
return pd.rolling_mean(df, window=TIME_SEQUENCE_LENGTH-2, center=True).mean()
def rolling_median(df):
return pd.rolling_median(df, window=TIME_SEQUENCE_LENGTH-2, center=True).mean()
def rolling_max(df):
return pd.rolling_max(df, window=TIME_SEQUENCE_LENGTH-2, center=True).mean()
def rolling_min(df):
return pd.rolling_min(df, window=TIME_SEQUENCE_LENGTH-2, center=True).mean()
def standard_deviation(df):
return df.std()
def max_min_dif(df):
diff = (pd.rolling_min(df, window=TIME_SEQUENCE_LENGTH-2, center=True).mean()) - (pd.rolling_max(df, window=TIME_SEQUENCE_LENGTH-2, center=True).mean())
return diff
def create_rm_feature(df, sequence_length):
features = []
original_x = df['ACCEL_X'].astype(float)
original_y = df['ACCEL_Y'].astype(float)
original_z = df['ACCEL_Z'].astype(float)
original_gx = df['GYRO_X'].astype(float)
original_gy = df['GYRO_Y'].astype(float)
original_gz = df['GYRO_Z'].astype(float)
i = original_x.index.values
time_sequence = [i] * sequence_length
# The acutal number of 'i' values in the np.array is the sequence_length
# COULD try putting a Kalman filter here before we split into 10 second chunks
# this would smooth all the readings before processing - not sure if that is a good approach...
# This filter could replace values or form new features
idx = np.array(time_sequence).T.flatten()[:len(original_x)]
x = original_x.groupby(idx).mean()
x.name = 'ACCEL_X'
features.append(x)
y = original_y.groupby(idx).mean()
y.name = 'ACCEL_Y'
features.append(y)
z = original_z.groupby(idx).mean()
z.name = 'ACCEL_Z'
features.append(z)
gx = original_gx.groupby(idx).mean()
gx.name = 'GYRO_X'
features.append(gx)
gy = original_gy.groupby(idx).mean()
gy.name = 'GYRO_Y'
features.append(gy)
gz = original_gz.groupby(idx).mean()
gz.name = 'GYRO_Z'
features.append(gz)
#rolling median
x_ra = df['ACCEL_X'].groupby(idx).apply(rolling_median)
x_ra.name = 'rolling_median_x'
features.append(x_ra)
y_ra = df['ACCEL_Y'].groupby(idx).apply(rolling_median)
y_ra.name = 'rolling_median_y'
features.append(y_ra)
z_ra = df['ACCEL_Z'].groupby(idx).apply(rolling_median)
z_ra.name = 'rolling_median_z'
features.append(z_ra)
#gx_ra = df['GYRO_X'].groupby(idx).apply(rolling_median)
#gx_ra.name = 'rolling_median_gx'
#features.append(gx_ra)
#gy_ra = df['GYRO_Y'].groupby(idx).apply(rolling_median)
#gy_ra.name = 'rolling_median_gy'
#features.append(gy_ra)
#gz_ra = df['GYRO_Z'].groupby(idx).apply(rolling_median)
#gz_ra.name = 'rolling_median_gz'
#features.append(gz_ra)
#rolling max
x_rm = df['ACCEL_X'].groupby(idx).apply(rolling_max)
x_rm.name = 'rolling_max_x'
features.append(x_rm)
y_rm = df['ACCEL_Y'].groupby(idx).apply(rolling_max)
y_rm.name = 'rolling_max_y'
features.append(y_rm)
z_rm = df['ACCEL_Z'].groupby(idx).apply(rolling_max)
z_rm.name = 'rolling_max_z'
features.append(z_rm)
#gx_rm = df['GYRO_X'].groupby(idx).apply(rolling_max)
#gx_rm.name = 'rolling_max_gx'
#features.append(gx_rm)
#gy_rm = df['GYRO_Y'].groupby(idx).apply(rolling_max)
#gy_rm.name = 'rolling_max_gy'
#features.append(gy_rm)
#gz_rm = df['GYRO_Z'].groupby(idx).apply(rolling_max)
#gz_rm.name = 'rolling_max_gz'
#features.append(gz_rm)
#rolling min
x_rmin = df['ACCEL_X'].groupby(idx).apply(rolling_min)
x_rmin.name = 'rolling_min_x'
features.append(x_rmin)
y_rmin = df['ACCEL_Y'].groupby(idx).apply(rolling_min)
y_rmin.name = 'rolling_min_y'
features.append(y_rmin)
z_rmin = df['ACCEL_Z'].groupby(idx).apply(rolling_min)
z_rmin.name = 'rolling_min_z'
features.append(z_rmin)
#gx_rmin = df['GYRO_X'].groupby(idx).apply(rolling_min)
#gx_rmin.name = 'rolling_min_gx'
#features.append(gx_rmin)
#gy_rmin = df['GYRO_Y'].groupby(idx).apply(rolling_min)
#gy_rmin.name = 'rolling_min_gy'
#features.append(gy_rmin)
#gz_rmin = df['GYRO_Z'].groupby(idx).apply(rolling_min)
#gz_rmin.name = 'rolling_min_gz'
#features.append(gz_rmin)
#standard deviation
#x_std = df['ACCEL_X'].groupby(idx).apply(standard_deviation)
#x_std.name = 'std_x'
#features.append(x_std)
#y_std = df['ACCEL_Y'].groupby(idx).apply(standard_deviation)
#y_std.name = 'std_y'
#features.append(y_std)
#z_std = df['ACCEL_Z'].groupby(idx).apply(standard_deviation)
#z_std.name = 'std_z'
#features.append(z_std)
#gx_std = df['GYRO_X'].groupby(idx).apply(standard_deviation)
#gx_std.name = 'std_gx'
#features.append(gx_std)
#gy_std = df['GYRO_Y'].groupby(idx).apply(standard_deviation)
#gy_std.name = 'std_gy'
#features.append(gy_std)
#gz_std = df['GYRO_Z'].groupby(idx).apply(standard_deviation)
#gz_std.name = 'std_gz'
#features.append(gz_std)
# Max min diff
x_diff = df['ACCEL_X'].groupby(idx).apply(max_min_dif)
x_diff.name = 'diff_x'
features.append(x_diff)
y_diff = df['ACCEL_Y'].groupby(idx).apply(max_min_dif)
y_diff.name = 'diff_y'
features.append(y_diff)
z_diff = df['ACCEL_Z'].groupby(idx).apply(max_min_dif)
z_diff.name = 'diff_z'
features.append(z_diff)
gx_diff = df['GYRO_X'].groupby(idx).apply(max_min_dif)
gx_diff.name = 'diff_gx'
features.append(gx_diff)
gy_diff = df['GYRO_Y'].groupby(idx).apply(max_min_dif)
gy_diff.name = 'diff_gy'
features.append(gy_diff)
gz_diff = df['GYRO_Z'].groupby(idx).apply(max_min_dif)
gz_diff.name = 'diff_gz'
features.append(gz_diff)
data = pd.concat(features, axis=1)
return data
|
{
"content_hash": "9550f20684aa0f9d3a3324319befce88",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 156,
"avg_line_length": 29.074257425742573,
"alnum_prop": 0.6354503660820705,
"repo_name": "ChristopherGS/sensor_readings",
"id": "24738ab2d71594dc53de7f9978564cfa7ed29741",
"size": "5873",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/machine_learning/feature_engineering.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5726"
},
{
"name": "HTML",
"bytes": "15821"
},
{
"name": "Jupyter Notebook",
"bytes": "5080949"
},
{
"name": "Python",
"bytes": "165446"
}
],
"symlink_target": ""
}
|
import asyncio
import functools
import os
import shutil
import tempfile
from unittest import mock
from unittest.mock import MagicMock
import pytest
from aiohttp import abc, web
from aiohttp.web_urldispatcher import SystemRoute
@pytest.fixture(scope='function')
def tmp_dir_path(request):
"""
Give a path for a temporary directory
The directory is destroyed at the end of the test.
"""
# Temporary directory.
tmp_dir = tempfile.mkdtemp()
def teardown():
# Delete the whole directory:
shutil.rmtree(tmp_dir)
request.addfinalizer(teardown)
return tmp_dir
@pytest.mark.parametrize("show_index,status,data",
[(False, 403, None),
(True, 200,
b'<html>\n<head>\n<title>Index of /</title>\n'
b'</head>\n<body>\n<h1>Index of /</h1>\n<ul>\n'
b'<li><a href="/my_dir">my_dir/</a></li>\n'
b'<li><a href="/my_file">my_file</a></li>\n'
b'</ul>\n</body>\n</html>')])
@asyncio.coroutine
def test_access_root_of_static_handler(tmp_dir_path, loop, test_client,
show_index, status, data):
"""
Tests the operation of static file server.
Try to access the root of static file server, and make
sure that correct HTTP statuses are returned depending if we directory
index should be shown or not.
"""
# Put a file inside tmp_dir_path:
my_file_path = os.path.join(tmp_dir_path, 'my_file')
with open(my_file_path, 'w') as fw:
fw.write('hello')
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')
with open(my_file_path, 'w') as fw:
fw.write('world')
app = web.Application(loop=loop)
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=show_index)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/')
assert r.status == status
if data:
assert r.headers['Content-Type'] == "text/html; charset=utf-8"
read_ = (yield from r.read())
assert read_ == data
@pytest.mark.parametrize('data', ['hello world'])
@asyncio.coroutine
def test_follow_symlink(tmp_dir_path, loop, test_client, data):
"""
Tests the access to a symlink, in static folder
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')
with open(my_file_path, 'w') as fw:
fw.write(data)
my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink')
os.symlink(my_dir_path, my_symlink_path)
app = web.Application(loop=loop)
# Register global static route:
app.router.add_static('/', tmp_dir_path, follow_symlinks=True)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/my_symlink/my_file_in_dir')
assert r.status == 200
assert (yield from r.text()) == data
@pytest.mark.parametrize('dir_name,filename,data', [
('', 'test file.txt', 'test text'),
('test dir name', 'test dir file .txt', 'test text file folder')
])
@asyncio.coroutine
def test_access_to_the_file_with_spaces(tmp_dir_path, loop, test_client,
dir_name, filename, data):
"""
Checks operation of static files with spaces
"""
my_dir_path = os.path.join(tmp_dir_path, dir_name)
if dir_name:
os.mkdir(my_dir_path)
my_file_path = os.path.join(my_dir_path, filename)
with open(my_file_path, 'w') as fw:
fw.write(data)
app = web.Application(loop=loop)
url = os.path.join('/', dir_name, filename)
app.router.add_static('/', tmp_dir_path)
client = yield from test_client(app)
r = yield from client.get(url)
assert r.status == 200
assert (yield from r.text()) == data
@asyncio.coroutine
def test_access_non_existing_resource(tmp_dir_path, loop, test_client):
"""
Tests accessing non-existing resource
Try to access a non-exiting resource and make sure that 404 HTTP status
returned.
"""
app = web.Application(loop=loop)
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/non_existing_resource')
assert r.status == 404
@pytest.mark.parametrize('registered_path,request_url', [
('/a:b', '/a:b'),
('/a@b', '/a@b'),
('/a:b', '/a%3Ab'),
])
@asyncio.coroutine
def test_url_escaping(loop, test_client, registered_path, request_url):
"""
Tests accessing a resource with
"""
app = web.Application(loop=loop)
def handler(_):
return web.Response()
app.router.add_get(registered_path, handler)
client = yield from test_client(app)
r = yield from client.get(request_url)
assert r.status == 200
@asyncio.coroutine
def test_unauthorized_folder_access(tmp_dir_path, loop, test_client):
"""
Tests the unauthorized access to a folder of static file server.
Try to list a folder content of static file server when server does not
have permissions to do so for the folder.
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_dir')
os.mkdir(my_dir_path)
app = web.Application(loop=loop)
with mock.patch('pathlib.Path.__new__') as path_constructor:
path = MagicMock()
path.joinpath.return_value = path
path.resolve.return_value = path
path.iterdir.return_value.__iter__.side_effect = PermissionError()
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/my_dir')
assert r.status == 403
@asyncio.coroutine
def test_access_symlink_loop(tmp_dir_path, loop, test_client):
"""
Tests the access to a looped symlink, which could not be resolved.
"""
my_dir_path = os.path.join(tmp_dir_path, 'my_symlink')
os.symlink(my_dir_path, my_dir_path)
app = web.Application(loop=loop)
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/my_symlink')
assert r.status == 404
@asyncio.coroutine
def test_access_special_resource(tmp_dir_path, loop, test_client):
"""
Tests the access to a resource that is neither a file nor a directory.
Checks that if a special resource is accessed (f.e. named pipe or UNIX
domain socket) then 404 HTTP status returned.
"""
app = web.Application(loop=loop)
with mock.patch('pathlib.Path.__new__') as path_constructor:
special = MagicMock()
special.is_dir.return_value = False
special.is_file.return_value = False
path = MagicMock()
path.joinpath.side_effect = lambda p: (special if p == 'special'
else path)
path.resolve.return_value = path
special.resolve.return_value = special
path_constructor.return_value = path
# Register global static route:
app.router.add_static('/', tmp_dir_path, show_index=True)
client = yield from test_client(app)
# Request the root of the static directory.
r = yield from client.get('/special')
assert r.status == 404
@asyncio.coroutine
def test_partialy_applied_handler(loop, test_client):
app = web.Application(loop=loop)
@asyncio.coroutine
def handler(data, request):
return web.Response(body=data)
app.router.add_route('GET', '/', functools.partial(handler, b'hello'))
client = yield from test_client(app)
r = yield from client.get('/')
data = (yield from r.read())
assert data == b'hello'
def test_system_route():
route = SystemRoute(web.HTTPCreated(reason='test'))
with pytest.raises(RuntimeError):
route.url()
with pytest.raises(RuntimeError):
route.url_for()
assert route.name is None
assert route.resource is None
assert "<SystemRoute 201: test>" == repr(route)
assert 201 == route.status
assert 'test' == route.reason
@asyncio.coroutine
def test_412_is_returned(loop, test_client):
class MyRouter(abc.AbstractRouter):
@asyncio.coroutine
def resolve(self, request):
raise web.HTTPPreconditionFailed()
app = web.Application(router=MyRouter(), loop=loop)
client = yield from test_client(app)
resp = yield from client.get('/')
assert resp.status == 412
@asyncio.coroutine
def test_allow_head(loop, test_client):
"""
Test allow_head on routes.
"""
app = web.Application(loop=loop)
def handler(_):
return web.Response()
app.router.add_get('/a', handler, name='a')
app.router.add_get('/b', handler, allow_head=False, name='b')
client = yield from test_client(app)
r = yield from client.get('/a')
assert r.status == 200
yield from r.release()
r = yield from client.head('/a')
assert r.status == 200
yield from r.release()
r = yield from client.get('/b')
assert r.status == 200
yield from r.release()
r = yield from client.head('/b')
assert r.status == 405
yield from r.release()
|
{
"content_hash": "51176b80b5bb9a9c7232d151a9c78797",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 75,
"avg_line_length": 29.565349544072948,
"alnum_prop": 0.626297933586923,
"repo_name": "alex-eri/aiohttp-1",
"id": "6ce5c3400c670e405010e43af844be17d1ccc521",
"size": "9727",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_web_urldispatcher.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4890"
},
{
"name": "Makefile",
"bytes": "3293"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "1218882"
},
{
"name": "Shell",
"bytes": "2309"
}
],
"symlink_target": ""
}
|
import itertools
import logging
import operator
import os
import six
import sys
import weakref
import ryu.contrib
ryu.contrib.update_module_path()
import ovs.db.data
import ovs.db.types
import ovs.poller
from ovs import (jsonrpc,
ovsuuid,
stream)
from ovs.db import idl
from ryu.lib import hub
from ryu.lib.ovs import vswitch_idl
LOG = logging.getLogger(__name__) # use ovs.vlog?
# for debug
def ovsrec_row_changes_to_string(ovsrec_row):
if not ovsrec_row._changes:
return ovsrec_row._changes
return dict((key, value.to_string())
for key, value in ovsrec_row._changes.items())
# for debug
def ovsrec_row_to_string(ovsrec_row):
output = ''
output += 'uuid: %s ' % ovsrec_row.uuid
if ovsrec_row._data:
output += '_data: %s ' % dict((key, value.to_string()) for key, value
in ovsrec_row._data.items())
else:
output += '_data: %s ' % ovsrec_row._data
output += '_changes: %s' % ovsrec_row_changes_to_string(ovsrec_row)
return output
def atom_from_string(base, value_string, symtab=None):
type_ = base.type
atom = None
if type_ == ovs.db.types.IntegerType:
atom = ovs.db.data.Atom(type_, int(value_string))
elif type_ == ovs.db.types.RealType:
# TODO:XXX negation
atom = ovs.db.data.Atom(
type_, ovs.db.parser.float_to_int(float(value_string)))
elif type_ == ovs.db.types.BooleanType:
if value_string in ("true", "yes", "on", "1"):
atom = ovs.db.data.Atom(type_, True)
elif value_string == ("false", "no", "off", "0"):
atom = ovs.db.data.Atom(type_, False)
elif type_ == ovs.db.types.StringType:
# TODO:XXXX escape: if value_string[0] == '"':
atom = ovs.db.data.Atom(type_, value_string)
elif type_ == ovs.db.types.UuidType:
if value_string[0] == "@":
assert symtab is not None
uuid_ = symtab[value_string]
atom = ovs.db.data.Atom(type_, uuid_)
else:
atom = ovs.db.data.Atom(type_,
ovs.ovsuuid.from_string(value_string))
if atom is None:
raise ValueError("expected %s" % type_.to_string(), value_string)
atom.check_constraints(base)
return atom
def datum_from_string(type_, value_string, symtab=None):
value_string = value_string.strip()
if type_.is_map():
if value_string.startswith('{'):
# TODO:dict case
LOG.debug('value_string %s', value_string)
raise NotImplementedError()
d = dict(v.split('=', 1) for v in value_string.split(','))
d = dict((atom_from_string(type_.key, key, symtab),
atom_from_string(type_.value, value, symtab))
for key, value in d.items())
elif type_.is_set():
if value_string.startswith('['):
# TODO:set case
LOG.debug('value_string %s', value_string)
raise NotImplementedError()
values = value_string.split(',')
d = dict((atom_from_string(type_.key, value, symtab), None)
for value in values)
else:
atom = atom_from_string(type_.key, value_string, symtab)
d = {atom: None}
datum = ovs.db.data.Datum(type_, d)
return datum.to_json()
def ifind(pred, seq):
try:
return itertools.ifilter(pred, seq).next()
except StopIteration:
return None
def not_reached():
os.abort()
def vsctl_fatal(msg):
LOG.error(msg)
raise Exception(msg) # not call ovs.utils.ovs_fatal for reusability
class VSCtlBridge(object):
def __init__(self, ovsrec_bridge, name, parent, vlan):
super(VSCtlBridge, self).__init__()
self.br_cfg = ovsrec_bridge
self.name = name
self.ports = set()
self.parent = parent
self.vlan = vlan
self.children = set() # WeakSet is needed?
def find_vlan_bridge(self, vlan):
return ifind(lambda child: child.vlan == vlan, self.children)
class VSCtlPort(object):
def __init__(self, vsctl_bridge_parent, ovsrec_port):
super(VSCtlPort, self).__init__()
self.bridge = weakref.ref(vsctl_bridge_parent) # backpointer
self.port_cfg = ovsrec_port
self.ifaces = set()
self.qos = None
class VSCtlIface(object):
def __init__(self, vsctl_port_parent, ovsrec_iface):
super(VSCtlIface, self).__init__()
self.port = weakref.ref(vsctl_port_parent) # backpointer
self.iface_cfg = ovsrec_iface
class VSCtlQoS(object):
def __init__(self, vsctl_port_parent, ovsrec_qos):
super(VSCtlQoS, self).__init__()
self.port = weakref.ref(vsctl_port_parent)
self.qos_cfg = ovsrec_qos
self.queues = set()
class VSCtlQueue(object):
def __init__(self, vsctl_qos_parent, ovsrec_queue):
super(VSCtlQueue, self).__init__()
self.qos = weakref.ref(vsctl_qos_parent)
self.queue_cfg = ovsrec_queue
class VSCtlContext(object):
def _invalidate_cache(self):
self.cache_valid = False
self.bridges.clear()
self.ports.clear()
self.ifaces.clear()
def __init__(self, idl_, txn, ovsrec_open_vswitch):
super(VSCtlContext, self).__init__()
# Modifiable state
# self.table = None
self.idl = idl_
self.txn = txn
self.ovs = ovsrec_open_vswitch
self.symtab = None # TODO:XXX
self.verified_ports = False
# A cache of the contents of the database.
self.cache_valid = False
self.bridges = {} # bridge name -> VSCtlBridge
self.ports = {} # port name -> VSCtlPort
self.ifaces = {} # iface name -> VSCtlIface
self.try_again = False # used by wait-until command
def done(self):
self._invalidate_cache()
def verify_bridges(self):
self.ovs.verify(vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES)
def verify_ports(self):
if self.verified_ports:
return
self.verify_bridges()
for ovsrec_bridge in self.idl.tables[
vswitch_idl.OVSREC_TABLE_BRIDGE].rows.values():
ovsrec_bridge.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
for ovsrec_port in self.idl.tables[
vswitch_idl.OVSREC_TABLE_PORT].rows.values():
ovsrec_port.verify(vswitch_idl.OVSREC_PORT_COL_INTERFACES)
self.verified_ports = True
def add_bridge_to_cache(self, ovsrec_bridge, name, parent, vlan):
vsctl_bridge = VSCtlBridge(ovsrec_bridge, name, parent, vlan)
if parent:
parent.children.add(vsctl_bridge)
self.bridges[name] = vsctl_bridge
return vsctl_bridge
def del_cached_bridge(self, vsctl_bridge):
assert not vsctl_bridge.ports
assert not vsctl_bridge.children
parent = vsctl_bridge.parent
if parent:
parent.children.remove(vsctl_bridge)
vsctl_bridge.parent = None # break circular reference
ovsrec_bridge = vsctl_bridge.br_cfg
if ovsrec_bridge:
ovsrec_bridge.delete()
self.ovs_delete_bridge(ovsrec_bridge)
del self.bridges[vsctl_bridge.name]
def del_cached_qos(self, vsctl_qos):
vsctl_qos.port().qos = None
vsctl_qos.port = None
vsctl_qos.queues = None
def add_port_to_cache(self, vsctl_bridge_parent, ovsrec_port):
tag = getattr(ovsrec_port, vswitch_idl.OVSREC_PORT_COL_TAG, None)
if (tag is not None and tag >= 0 and tag < 4096):
vlan_bridge = vsctl_bridge_parent.find_vlan_bridge()
if vlan_bridge:
vsctl_bridge_parent = vlan_bridge
vsctl_port = VSCtlPort(vsctl_bridge_parent, ovsrec_port)
vsctl_bridge_parent.ports.add(vsctl_port)
self.ports[ovsrec_port.name] = vsctl_port
return vsctl_port
def del_cached_port(self, vsctl_port):
assert not vsctl_port.ifaces
vsctl_port.bridge().ports.remove(vsctl_port)
vsctl_port.bridge = None
port = self.ports.pop(vsctl_port.port_cfg.name)
assert port == vsctl_port
vsctl_port.port_cfg.delete()
def add_iface_to_cache(self, vsctl_port_parent, ovsrec_iface):
vsctl_iface = VSCtlIface(vsctl_port_parent, ovsrec_iface)
vsctl_port_parent.ifaces.add(vsctl_iface)
self.ifaces[ovsrec_iface.name] = vsctl_iface
def add_qos_to_cache(self, vsctl_port_parent, ovsrec_qos):
vsctl_qos = VSCtlQoS(vsctl_port_parent, ovsrec_qos)
vsctl_port_parent.qos = vsctl_qos
return vsctl_qos
def add_queue_to_cache(self, vsctl_qos_parent, ovsrec_queue):
vsctl_queue = VSCtlQueue(vsctl_qos_parent, ovsrec_queue)
vsctl_qos_parent.queues.add(vsctl_queue)
def del_cached_iface(self, vsctl_iface):
vsctl_iface.port().ifaces.remove(vsctl_iface)
vsctl_iface.port = None
del self.ifaces[vsctl_iface.iface_cfg.name]
vsctl_iface.iface_cfg.delete()
def invalidate_cache(self):
if not self.cache_valid:
return
self._invalidate_cache()
def populate_cache(self):
self._populate_cache(self.idl.tables[vswitch_idl.OVSREC_TABLE_BRIDGE])
@staticmethod
def port_is_fake_bridge(ovsrec_port):
return (ovsrec_port.fake_bridge and
ovsrec_port.tag >= 0 and ovsrec_port.tag <= 4095)
def _populate_cache(self, ovsrec_bridges):
if self.cache_valid:
return
self.cache_valid = True
bridges = set()
ports = set()
for ovsrec_bridge in ovsrec_bridges.rows.values():
name = ovsrec_bridge.name
if name in bridges:
LOG.warn('%s: database contains duplicate bridge name', name)
bridges.add(name)
vsctl_bridge = self.add_bridge_to_cache(ovsrec_bridge, name,
None, 0)
if not vsctl_bridge:
continue
for ovsrec_port in ovsrec_bridge.ports:
port_name = ovsrec_port.name
if port_name in ports:
# Duplicate ovsrec_port name.
# (We will warn about that later.)
continue
ports.add(port_name)
if (self.port_is_fake_bridge(ovsrec_port) and
port_name not in bridges):
bridges.add(port_name)
self.add_bridge_to_cache(None, port_name, vsctl_bridge,
ovsrec_port.tag)
bridges = set()
for ovsrec_bridge in ovsrec_bridges.rows.values():
name = ovsrec_bridge.name
if name in bridges:
continue
bridges.add(name)
vsctl_bridge = self.bridges[name]
for ovsrec_port in ovsrec_bridge.ports:
port_name = ovsrec_port.name
vsctl_port = self.ports.get(port_name)
if vsctl_port:
if ovsrec_port == vsctl_port.port_cfg:
LOG.warn('%s: vsctl_port is in multiple bridges '
'(%s and %s)',
port_name, vsctl_bridge.name,
vsctl_port.br.name)
else:
LOG.error('%s: database contains duplicate '
'vsctl_port name',
ovsrec_port.name)
continue
if (self.port_is_fake_bridge(ovsrec_port) and
port_name in bridges):
continue
# LOG.debug('ovsrec_port %s %s %s',
# ovsrec_port, ovsrec_port._data, ovsrec_port.tag)
vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port)
# LOG.debug('vsctl_port %s', vsctl_port)
for ovsrec_iface in ovsrec_port.interfaces:
iface = self.ifaces.get(ovsrec_iface.name)
if iface:
if ovsrec_iface == iface.iface_cfg:
LOG.warn(
'%s: interface is in multiple ports '
'(%s and %s)',
ovsrec_iface.name,
iface.port().port_cfg.name,
vsctl_port.port_cfg.name)
else:
LOG.error(
'%s: database contains duplicate interface '
'name',
ovsrec_iface.name)
continue
self.add_iface_to_cache(vsctl_port, ovsrec_iface)
ovsrec_qos = ovsrec_port.qos
vsctl_qos = self.add_qos_to_cache(vsctl_port, ovsrec_qos)
if len(ovsrec_qos):
for ovsrec_queue in ovsrec_qos[0].queues:
self.add_queue_to_cache(vsctl_qos, ovsrec_queue)
def check_conflicts(self, name, msg):
self.verify_ports()
if name in self.bridges:
vsctl_fatal('%s because a bridge named %s already exists' %
(msg, name))
if name in self.ports:
vsctl_fatal('%s because a port named %s already exists on '
'bridge %s' %
(msg, name, self.ports[name].bridge().name))
if name in self.ifaces:
vsctl_fatal('%s because an interface named %s already '
'exists on bridge %s' %
(msg, name, self.ifaces[name].port().bridge().name))
def find_bridge(self, name, must_exist):
assert self.cache_valid
vsctl_bridge = self.bridges.get(name)
if must_exist and not vsctl_bridge:
vsctl_fatal('no bridge named %s' % name)
self.verify_bridges()
return vsctl_bridge
def find_real_bridge(self, name, must_exist):
vsctl_bridge = self.find_bridge(name, must_exist)
if vsctl_bridge and vsctl_bridge.parent:
vsctl_fatal('%s is a fake bridge' % name)
return vsctl_bridge
def find_bridge_by_id(self, datapath_id, must_exist):
assert self.cache_valid
for vsctl_bridge in self.bridges.values():
if vsctl_bridge.br_cfg.datapath_id[0].strip('"') == datapath_id:
self.verify_bridges()
return vsctl_bridge
if must_exist:
vsctl_fatal('no bridge id %s' % datapath_id)
return None
def find_port(self, name, must_exist):
assert self.cache_valid
vsctl_port = self.ports.get(name)
if vsctl_port and name == vsctl_port.bridge().name:
vsctl_port = None
if must_exist and not vsctl_port:
vsctl_fatal('no vsctl_port named %s' % name)
return vsctl_port
def find_iface(self, name, must_exist):
assert self.cache_valid
vsctl_iface = self.ifaces.get(name)
if vsctl_iface and name == vsctl_iface.port().bridge().name:
vsctl_iface = None
if must_exist and not vsctl_iface:
vsctl_fatal('no interface named %s' % name)
self.verify_ports()
return vsctl_iface
def set_qos(self, vsctl_port, type, max_rate):
qos = vsctl_port.qos.qos_cfg
if not len(qos):
ovsrec_qos = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QOS])
vsctl_port.port_cfg.qos = [ovsrec_qos]
else:
ovsrec_qos = qos[0]
ovsrec_qos.type = type
if max_rate is not None:
self.set_column(ovsrec_qos, 'other_config', 'max-rate', max_rate)
self.add_qos_to_cache(vsctl_port, [ovsrec_qos])
return ovsrec_qos
def set_queue(self, vsctl_qos, max_rate, min_rate,
queue_id):
ovsrec_qos = vsctl_qos.qos_cfg[0]
try:
ovsrec_queue = ovsrec_qos.queues[queue_id]
except (AttributeError, KeyError):
ovsrec_queue = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QUEUE])
if max_rate is not None:
self.set_column(ovsrec_queue, 'other_config',
'max-rate', max_rate)
if min_rate is not None:
self.set_column(ovsrec_queue, 'other_config',
'min-rate', min_rate)
self.set_column(ovsrec_qos, 'queues', queue_id,
['uuid', str(ovsrec_queue.uuid)])
self.add_queue_to_cache(vsctl_qos, ovsrec_queue)
return ovsrec_queue
@staticmethod
def _column_set(ovsrec_row, column, ovsrec_value):
# need to trigger Row.__setattr__()
setattr(ovsrec_row, column, ovsrec_value)
@staticmethod
def _column_insert(ovsrec_row, column, ovsrec_add):
value = getattr(ovsrec_row, column)
value.append(ovsrec_add)
VSCtlContext._column_set(ovsrec_row, column, value)
@staticmethod
def _column_delete(ovsrec_row, column, ovsrec_del):
value = getattr(ovsrec_row, column)
try:
value.remove(ovsrec_del)
except ValueError:
# Datum.to_python() with _uuid_to_row trims down deleted
# references. If ovsrec_del.delete() is called before
# _column_delete(), value doesn't include ovsrec_del.
pass
VSCtlContext._column_set(ovsrec_row, column, value)
@staticmethod
def bridge_insert_port(ovsrec_bridge, ovsrec_port):
VSCtlContext._column_insert(ovsrec_bridge,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS,
ovsrec_port)
@staticmethod
def bridge_delete_port(ovsrec_bridge, ovsrec_port):
VSCtlContext._column_delete(ovsrec_bridge,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS,
ovsrec_port)
@staticmethod
def port_delete_qos(ovsrec_port, ovsrec_qos):
VSCtlContext._column_delete(ovsrec_port,
vswitch_idl.OVSREC_PORT_COL_QOS,
ovsrec_qos)
def ovs_insert_bridge(self, ovsrec_bridge):
self._column_insert(self.ovs,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
ovsrec_bridge)
def ovs_delete_bridge(self, ovsrec_bridge):
self._column_delete(self.ovs,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
ovsrec_bridge)
def del_port(self, vsctl_port):
if vsctl_port.bridge().parent:
ovsrec_bridge = vsctl_port.bridge().parent.br_cfg
else:
ovsrec_bridge = vsctl_port.bridge().br_cfg
self.bridge_delete_port(ovsrec_bridge, vsctl_port.port_cfg)
for vsctl_iface in vsctl_port.ifaces.copy():
self.del_cached_iface(vsctl_iface)
self.del_cached_port(vsctl_port)
def del_bridge(self, vsctl_bridge):
for child in vsctl_bridge.children.copy():
self.del_bridge(child)
for vsctl_port in vsctl_bridge.ports.copy():
self.del_port(vsctl_port)
self.del_cached_bridge(vsctl_bridge)
def del_qos(self, vsctl_qos):
ovsrec_port = vsctl_qos.port().port_cfg
ovsrec_qos = vsctl_qos.qos_cfg
if len(ovsrec_qos):
self.port_delete_qos(ovsrec_port, ovsrec_qos[0])
self.del_cached_qos(vsctl_qos)
def add_port(self, br_name, port_name, may_exist, fake_iface,
iface_names, settings=None):
"""
:type settings: list of (column, key, value_json)
where column and key are str,
value_json is json that is represented
by Datum.to_json()
"""
settings = settings or []
self.populate_cache()
if may_exist:
vsctl_port = self.find_port(port_name, False)
if vsctl_port:
want_names = set(iface_names)
have_names = set(ovsrec_iface.name for ovsrec_iface in
vsctl_port.port_cfg.interfaces)
if vsctl_port.bridge().name != br_name:
vsctl_fatal('"%s" but %s is actually attached to '
'vsctl_bridge %s',
br_name, port_name, vsctl_port.bridge().name)
if want_names != have_names:
want_names_string = ','.join(want_names)
have_names_string = ','.join(have_names)
vsctl_fatal('"%s" but %s actually has interface(s) %s' %
(want_names_string,
port_name, have_names_string))
return
self.check_conflicts(port_name,
'cannot create a port named %s' % port_name)
for iface_name in iface_names:
self.check_conflicts(
iface_name, 'cannot create an interface named %s' % iface_name)
vsctl_bridge = self.find_bridge(br_name, True)
ifaces = []
for iface_name in iface_names:
ovsrec_iface = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = iface_name
ifaces.append(ovsrec_iface)
ovsrec_port = self.txn.insert(
self.idl.tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = port_name
ovsrec_port.interfaces = ifaces
ovsrec_port.bond_fake_iface = fake_iface
if vsctl_bridge.parent:
tag = vsctl_bridge.vlan
ovsrec_port.tag = tag
for setting in settings:
# TODO:XXX self.symtab:
column, key, value = setting
self.set_column(ovsrec_port, column, key, value)
if vsctl_bridge.parent:
ovsrec_bridge = vsctl_bridge.parent.br_cfg
else:
ovsrec_bridge = vsctl_bridge.br_cfg
self.bridge_insert_port(ovsrec_bridge, ovsrec_port)
vsctl_port = self.add_port_to_cache(vsctl_bridge, ovsrec_port)
for ovsrec_iface in ifaces:
self.add_iface_to_cache(vsctl_port, ovsrec_iface)
def add_bridge(self, br_name, parent_name=None, vlan=0, may_exist=False):
self.populate_cache()
if may_exist:
vsctl_bridge = self.find_bridge(br_name, False)
if vsctl_bridge:
if not parent_name:
if vsctl_bridge.parent:
vsctl_fatal('"--may-exist add-vsctl_bridge %s" '
'but %s is a VLAN bridge for VLAN %d' %
(br_name, br_name, vsctl_bridge.vlan))
else:
if not vsctl_bridge.parent:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s is not a VLAN bridge' %
(br_name, parent_name, vlan, br_name))
elif vsctl_bridge.parent.name != parent_name:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s has the wrong parent %s' %
(br_name, parent_name, vlan,
br_name, vsctl_bridge.parent.name))
elif vsctl_bridge.vlan != vlan:
vsctl_fatal('"--may-exist add-vsctl_bridge %s %s %d" '
'but %s is a VLAN bridge for the wrong '
'VLAN %d' %
(br_name, parent_name, vlan, br_name,
vsctl_bridge.vlan))
return
self.check_conflicts(br_name,
'cannot create a bridge named %s' % br_name)
txn = self.txn
tables = self.idl.tables
if not parent_name:
ovsrec_iface = txn.insert(
tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = br_name
ovsrec_iface.type = 'internal'
ovsrec_port = txn.insert(tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = br_name
ovsrec_port.interfaces = [ovsrec_iface]
ovsrec_port.fake_bridge = False
ovsrec_bridge = txn.insert(tables[vswitch_idl.OVSREC_TABLE_BRIDGE])
ovsrec_bridge.name = br_name
ovsrec_bridge.ports = [ovsrec_port]
self.ovs_insert_bridge(ovsrec_bridge)
else:
parent = self.find_bridge(parent_name, False)
if parent and parent.parent:
vsctl_fatal('cannot create bridge with fake bridge as parent')
if not parent:
vsctl_fatal('parent bridge %s does not exist' % parent_name)
ovsrec_iface = txn.insert(
tables[vswitch_idl.OVSREC_TABLE_INTERFACE])
ovsrec_iface.name = br_name
ovsrec_iface.type = 'internal'
ovsrec_port = txn.insert(tables[vswitch_idl.OVSREC_TABLE_PORT])
ovsrec_port.name = br_name
ovsrec_port.interfaces = [ovsrec_iface]
ovsrec_port.fake_bridge = True
ovsrec_port.tag = vlan
self.bridge_insert_port(parent.br_cfg, ovsrec_port)
self.invalidate_cache()
@staticmethod
def parse_column_key_value(table_schema, setting_string):
"""
parse <column>[:<key>]=<value>
"""
column_value = setting_string.split('=', 1)
if len(column_value) == 1:
column = column_value[0]
value = None
else:
column, value = column_value
if ':' in column:
column, key = column.split(':', 1)
else:
key = None
if value is not None:
LOG.debug("columns %s", list(table_schema.columns.keys()))
type_ = table_schema.columns[column].type
value = datum_from_string(type_, value)
LOG.debug("column %s value %s", column, value)
return (column, key, value)
def set_column(self, ovsrec_row, column, key, value_json):
if column not in ovsrec_row._table.columns:
vsctl_fatal('%s does not contain a column whose name matches "%s"'
% (ovsrec_row._table.name, column))
column_schema = ovsrec_row._table.columns[column]
if key is not None:
value_json = ['map', [[key, value_json]]]
if column_schema.type.value.type == ovs.db.types.VoidType:
vsctl_fatal('cannot specify key to set for non-map column %s' %
column)
datum = ovs.db.data.Datum.from_json(column_schema.type, value_json,
self.symtab)
values = getattr(ovsrec_row, column, {})
values.update(datum.to_python(ovs.db.idl._uuid_to_row))
setattr(ovsrec_row, column, values)
else:
datum = ovs.db.data.Datum.from_json(column_schema.type, value_json,
self.symtab)
setattr(ovsrec_row, column,
datum.to_python(ovs.db.idl._uuid_to_row))
def _get_row_by_id(self, table_name, vsctl_row_id, record_id):
if not vsctl_row_id.table:
return None
if not vsctl_row_id.name_column:
if record_id != '.':
return None
values = list(self.idl.tables[vsctl_row_id.table].rows.values())
if not values or len(values) > 2:
return None
referrer = values[0]
else:
referrer = None
for ovsrec_row in self.idl.tables[
vsctl_row_id.table].rows.values():
name = getattr(ovsrec_row, vsctl_row_id.name_column)
assert type(name) in (list, str, six.text_type)
if type(name) != list and name == record_id:
if (referrer):
vsctl_fatal('multiple rows in %s match "%s"' %
(table_name, record_id))
referrer = ovsrec_row
if not referrer:
return None
final = None
if vsctl_row_id.uuid_column:
referrer.verify(vsctl_row_id.uuid_column)
uuid = getattr(referrer, vsctl_row_id.uuid_column)
uuid_ = referrer._data[vsctl_row_id.uuid_column]
assert uuid_.type.key.type == ovs.db.types.UuidType
assert uuid_.type.value is None
assert type(uuid) == list
if len(uuid) == 1:
final = uuid[0]
else:
final = referrer
return final
def get_row(self, vsctl_table, record_id):
table_name = vsctl_table.table_name
if ovsuuid.is_valid_string(record_id):
uuid = ovsuuid.from_string(record_id)
return self.idl.tables[table_name].rows.get(uuid)
else:
for vsctl_row_id in vsctl_table.row_ids:
ovsrec_row = self._get_row_by_id(table_name, vsctl_row_id,
record_id)
if ovsrec_row:
return ovsrec_row
return None
def must_get_row(self, vsctl_table, record_id):
ovsrec_row = self.get_row(vsctl_table, record_id)
if not ovsrec_row:
vsctl_fatal('no row "%s" in table %s' % (record_id,
vsctl_table.table_name))
return ovsrec_row
class _CmdShowTable(object):
def __init__(self, table, name_column, columns, recurse):
super(_CmdShowTable, self).__init__()
self.table = table
self.name_column = name_column
self.columns = columns
self.recurse = recurse
class _VSCtlRowID(object):
def __init__(self, table, name_column, uuid_column):
super(_VSCtlRowID, self).__init__()
self.table = table
self.name_column = name_column
self.uuid_column = uuid_column
class _VSCtlTable(object):
def __init__(self, table_name, vsctl_row_id_list):
super(_VSCtlTable, self).__init__()
self.table_name = table_name
self.row_ids = vsctl_row_id_list
class VSCtlCommand(object):
def __init__(self, command, args=None, options=None):
super(VSCtlCommand, self).__init__()
self.command = command
self.args = args or []
self.options = options or []
# Data modified by commands
self.result = None
# internally used by VSCtl
self._prerequisite = None
self._run = None
def has_option(self, option):
return option in self.options
class VSCtl(object):
def _reset(self):
self.schema_helper = None
self.ovs = None
self.txn = None
self.wait_for_reload = True
self.dry_run = False
def __init__(self, remote):
super(VSCtl, self).__init__()
self.remote = remote
self.schema_json = None
self.schema = None
self.schema_helper = None
self.ovs = None
self.txn = None
self.wait_for_reload = True
self.dry_run = False
def _rpc_get_schema_json(self, database):
LOG.debug('remote %s', self.remote)
error, stream_ = stream.Stream.open_block(
stream.Stream.open(self.remote))
if error:
vsctl_fatal('error %s' % os.strerror(error))
rpc = jsonrpc.Connection(stream_)
request = jsonrpc.Message.create_request('get_schema', [database])
error, reply = rpc.transact_block(request)
rpc.close()
if error:
vsctl_fatal(os.strerror(error))
elif reply.error:
vsctl_fatal('error %s' % reply.error)
return reply.result
def _init_schema_helper(self):
if self.schema_json is None:
self.schema_json = self._rpc_get_schema_json(
vswitch_idl.OVSREC_DB_NAME)
schema_helper = idl.SchemaHelper(None, self.schema_json)
schema_helper.register_all()
self.schema = schema_helper.get_idl_schema()
# LOG.debug('schema_json %s', schema_json)
self.schema_helper = idl.SchemaHelper(None, self.schema_json)
@staticmethod
def _idl_block(idl_):
poller = ovs.poller.Poller()
idl_.wait(poller)
poller.block()
@staticmethod
def _idl_wait(idl_, seqno):
while idl_.change_seqno == seqno and not idl_.run():
VSCtl._idl_block(idl_)
def _run_prerequisites(self, commands):
schema_helper = self.schema_helper
schema_helper.register_table(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH)
if self.wait_for_reload:
# LOG.debug('schema_helper._tables %s', schema_helper._tables)
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_CUR_CFG])
for command in commands:
if not command._prerequisite:
continue
ctx = VSCtlContext(None, None, None)
command._prerequisite(ctx, command)
ctx.done()
def _do_vsctl(self, idl_, commands):
txn = idl.Transaction(idl_)
self.txn = txn
if self.dry_run:
txn.dry_run = True
txn.add_comment('ovs-vsctl') # TODO:XXX add operation name. args
ovs_rows = idl_.tables[vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH].rows
if ovs_rows:
ovs_ = list(ovs_rows.values())[0]
else:
# XXX add verification that table is empty
ovs_ = txn.insert(
idl_.tables[vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH])
if self.wait_for_reload:
ovs_.increment(vswitch_idl.OVSREC_OPEN_VSWITCH_COL_NEXT_CFG)
# TODO:XXX
# symtab = ovsdb_symbol_table_create()
ctx = VSCtlContext(idl_, txn, ovs_)
for command in commands:
if not command._run:
continue
command._run(ctx, command)
if ctx.try_again:
return False
LOG.debug('result:\n%s', [command.result for command in commands])
ctx.done()
# TODO:XXX check if created symbols are really created, referenced.
status = txn.commit_block()
next_cfg = 0
if self.wait_for_reload and status == idl.Transaction.SUCCESS:
next_cfg = txn.get_increment_new_value()
# TODO:XXX
# if status in (idl.Transaction.UNCHANGED, idl.Transaction.SUCCESS):
# for command in commands:
# if not command.post_func:
# continue
# ctx = VSCtlContext(idl_, txn, self.ovs)
# command.post_func(ctx)
# ctx.done()
txn_ = self.txn
self.txn = None
txn = None
if status in (idl.Transaction.UNCOMMITTED, idl.Transaction.INCOMPLETE):
not_reached()
elif status == idl.Transaction.ABORTED:
vsctl_fatal('transaction aborted')
elif status == idl.Transaction.UNCHANGED:
LOG.info('unchanged')
elif status == idl.Transaction.SUCCESS:
LOG.info('success')
elif status == idl.Transaction.TRY_AGAIN:
return False
elif status == idl.Transaction.ERROR:
vsctl_fatal('transaction error: %s' % txn_.get_error())
elif status == idl.Transaction.NOT_LOCKED:
vsctl_fatal('database not locked')
else:
not_reached()
if self.wait_for_reload and status != idl.Transaction.UNCHANGED:
while True:
idl_.run()
if (ovs_.cur_cfg >= next_cfg):
break
self._idl_block(idl_)
return True
def _do_main(self, commands):
"""
:type commands: list of VSCtlCommand
"""
self._reset()
self._init_schema_helper()
self._run_prerequisites(commands)
idl_ = idl.Idl(self.remote, self.schema_helper)
seqno = idl_.change_seqno
while True:
self._idl_wait(idl_, seqno)
seqno = idl_.change_seqno
if self._do_vsctl(idl_, commands):
break
if self.txn:
self.txn.abort()
self.txn = None
# TODO:XXX
# ovsdb_symbol_table_destroy(symtab)
idl_.close()
def _run_command(self, commands):
"""
:type commands: list of VSCtlCommand
"""
all_commands = {
# Open vSwitch commands.
'init': (None, self._cmd_init),
'show': (self._pre_cmd_show, self._cmd_show),
# Bridge commands.
'add-br': (self._pre_add_br, self._cmd_add_br),
'del-br': (self._pre_get_info, self._cmd_del_br),
'list-br': (self._pre_get_info, self._cmd_list_br),
# Port. commands
'list-ports': (self._pre_get_info, self._cmd_list_ports),
'add-port': (self._pre_cmd_add_port, self._cmd_add_port),
'del-port': (self._pre_get_info, self._cmd_del_port),
# 'add-bond':
# 'port-to-br':
# Interface commands.
'list-ifaces': (self._pre_get_info, self._cmd_list_ifaces),
# 'iface-to-br':
# Controller commands.
'get-controller': (self._pre_controller, self._cmd_get_controller),
'del-controller': (self._pre_controller, self._cmd_del_controller),
'set-controller': (self._pre_controller, self._cmd_set_controller),
# 'get-fail-mode':
# 'del-fail-mode':
# 'set-fail-mode':
# Manager commands.
# 'get-manager':
# 'del-manager':
# 'set-manager':
# Switch commands.
# 'emer-reset':
# Database commands.
# 'comment':
'get': (self._pre_cmd_get, self._cmd_get),
# 'list':
'find': (self._pre_cmd_find, self._cmd_find),
'set': (self._pre_cmd_set, self._cmd_set),
# 'add':
'clear': (self._pre_cmd_clear, self._cmd_clear),
# 'create':
# 'destroy':
# 'wait-until':
'set-qos': (self._pre_cmd_set_qos, self._cmd_set_qos),
'set-queue': (self._pre_cmd_set_queue, self._cmd_set_queue),
'del-qos': (self._pre_get_info, self._cmd_del_qos),
# for quantum_adapter
'list-ifaces-verbose': (self._pre_cmd_list_ifaces_verbose,
self._cmd_list_ifaces_verbose),
}
for command in commands:
funcs = all_commands[command.command]
command._prerequisite, command._run = funcs
self._do_main(commands)
def run_command(self, commands, timeout_sec=None, exception=None):
if timeout_sec is None:
self._run_command(commands)
else:
with hub.Timeout(timeout_sec, exception):
self._run_command(commands)
# commands
def _cmd_init(self, _ctx, _command):
# nothing. Just check connection to ovsdb
pass
_CMD_SHOW_TABLES = [
_CmdShowTable(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH, None,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_MANAGER_OPTIONS,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_OVS_VERSION],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
[vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER,
vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
[vswitch_idl.OVSREC_PORT_COL_TAG,
vswitch_idl.OVSREC_PORT_COL_TRUNKS,
vswitch_idl.OVSREC_PORT_COL_INTERFACES],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_INTERFACE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE,
vswitch_idl.OVSREC_INTERFACE_COL_OPTIONS],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_CONTROLLER,
vswitch_idl.OVSREC_CONTROLLER_COL_TARGET,
[vswitch_idl.OVSREC_CONTROLLER_COL_IS_CONNECTED],
False),
_CmdShowTable(vswitch_idl.OVSREC_TABLE_MANAGER,
vswitch_idl.OVSREC_MANAGER_COL_TARGET,
[vswitch_idl.OVSREC_MANAGER_COL_IS_CONNECTED],
False),
]
def _pre_cmd_show(self, _ctx, _command):
schema_helper = self.schema_helper
for show in self._CMD_SHOW_TABLES:
schema_helper.register_table(show.table)
if show.name_column:
schema_helper.register_columns(show.table, [show.name_column])
schema_helper.register_columns(show.table, show.columns)
@staticmethod
def _cmd_show_find_table_by_row(row):
for show in VSCtl._CMD_SHOW_TABLES:
if show.table == row._table.name:
return show
return None
@staticmethod
def _cmd_show_find_table_by_name(name):
for show in VSCtl._CMD_SHOW_TABLES:
if show.table == name:
return show
return None
@staticmethod
def _cmd_show_row(ctx, row, level):
_INDENT_SIZE = 4 # # of spaces per indent
show = VSCtl._cmd_show_find_table_by_row(row)
output = ''
output += ' ' * level * _INDENT_SIZE
if show and show.name_column:
output += '%s ' % show.table
datum = getattr(row, show.name_column)
output += datum
else:
output += str(row.uuid)
output += '\n'
if not show or show.recurse:
return
show.recurse = True
for column in show.columns:
datum = row._data[column]
key = datum.type.key
if (key.type == ovs.db.types.UuidType and key.ref_table_name):
ref_show = VSCtl._cmd_show_find_table_by_name(
key.ref_table_name)
if ref_show:
for atom in datum.values:
ref_row = ctx.idl.tables[ref_show.table].rows.get(
atom.value)
if ref_row:
VSCtl._cmd_show_row(ctx, ref_row, level + 1)
continue
if not datum.is_default():
output += ' ' * (level + 1) * _INDENT_SIZE
output += '%s: %s\n' % (column, datum)
show.recurse = False
return output
def _cmd_show(self, ctx, command):
for row in ctx.idl.tables[
self._CMD_SHOW_TABLES[0].table].rows.values():
output = self._cmd_show_row(ctx, row, 0)
command.result = output
def _pre_get_info(self, _ctx, _command):
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[vswitch_idl.OVSREC_OPEN_VSWITCH_COL_BRIDGES])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_BRIDGE,
[vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER,
vswitch_idl.OVSREC_BRIDGE_COL_FAIL_MODE,
vswitch_idl.OVSREC_BRIDGE_COL_PORTS])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT,
[vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_FAKE_BRIDGE,
vswitch_idl.OVSREC_PORT_COL_TAG,
vswitch_idl.OVSREC_PORT_COL_INTERFACES,
vswitch_idl.OVSREC_PORT_COL_QOS])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_NAME])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QOS,
[vswitch_idl.OVSREC_QOS_COL_QUEUES])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QUEUE,
[])
def _cmd_list_br(self, ctx, command):
ctx.populate_cache()
command.result = sorted(ctx.bridges.keys())
def _pre_add_br(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE])
def _cmd_add_br(self, ctx, command):
br_name = command.args[0]
if len(command.args) == 1:
parent_name = None
vlan = 0
elif len(command.args) == 3:
parent_name = command.args[1]
vlan = int(command.args[2])
if vlan < 0 or vlan > 4095:
vsctl_fatal("vlan must be between 0 and 4095 %d" % vlan)
else:
vsctl_fatal('this command takes exactly 1 or 3 argument')
ctx.add_bridge(br_name, parent_name, vlan)
def _del_br(self, ctx, br_name, must_exist=False):
ctx.populate_cache()
br = ctx.find_bridge(br_name, must_exist)
if br:
ctx.del_bridge(br)
def _cmd_del_br(self, ctx, command):
br_name = command.args[0]
self._del_br(ctx, br_name)
def _list_ports(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
if br.br_cfg:
br.br_cfg.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
else:
br.parent.br_cfg.verify(vswitch_idl.OVSREC_BRIDGE_COL_PORTS)
return [port.port_cfg.name for port in br.ports
if port.port_cfg.name != br.name]
def _cmd_list_ports(self, ctx, command):
br_name = command.args[0]
port_names = self._list_ports(ctx, br_name)
command.result = sorted(port_names)
def _pre_add_port(self, _ctx, columns):
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT,
[vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_BOND_FAKE_IFACE])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_PORT, columns)
def _pre_cmd_add_port(self, ctx, command):
self._pre_get_info(ctx, command)
columns = [ctx.parse_column_key_value(
self.schema.tables[vswitch_idl.OVSREC_TABLE_PORT], setting)[0]
for setting in command.args[2:]]
self._pre_add_port(ctx, columns)
def _cmd_add_port(self, ctx, command):
may_exist = command.has_option('--may_exist')
br_name = command.args[0]
port_name = command.args[1]
iface_names = [command.args[1]]
settings = [ctx.parse_column_key_value(
self.schema.tables[vswitch_idl.OVSREC_TABLE_PORT], setting)
for setting in command.args[2:]]
ctx.add_port(br_name, port_name, may_exist,
False, iface_names, settings)
def _del_port(self, ctx, br_name=None, target=None,
must_exist=False, with_iface=False):
assert target is not None
ctx.populate_cache()
if not with_iface:
vsctl_port = ctx.find_port(target, must_exist)
else:
vsctl_port = ctx.find_port(target, False)
if not vsctl_port:
vsctl_iface = ctx.find_iface(target, False)
if vsctl_iface:
vsctl_port = vsctl_iface.port()
if must_exist and not vsctl_port:
vsctl_fatal('no port or interface named %s' % target)
if not vsctl_port:
return
if not br_name:
vsctl_bridge = ctx.find_bridge(br_name, True)
if vsctl_port.bridge() != vsctl_bridge:
if vsctl_port.bridge().parent == vsctl_bridge:
vsctl_fatal('bridge %s does not have a port %s (although '
'its parent bridge %s does)' %
(br_name, target, vsctl_bridge.parent.name))
else:
vsctl_fatal('bridge %s does not have a port %s' %
(br_name, target))
ctx.del_port(vsctl_port)
def _cmd_del_port(self, ctx, command):
must_exist = command.has_option('--must-exist')
with_iface = command.has_option('--with-iface')
target = command.args[-1]
br_name = command.args[0] if len(command.args) == 2 else None
self._del_port(ctx, br_name, target, must_exist, with_iface)
def _list_ifaces(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
ctx.verify_ports()
iface_names = set()
for vsctl_port in br.ports:
for vsctl_iface in vsctl_port.ifaces:
iface_name = vsctl_iface.iface_cfg.name
if iface_name != br_name:
iface_names.add(iface_name)
return iface_names
def _cmd_list_ifaces(self, ctx, command):
br_name = command.args[0]
iface_names = self._list_ifaces(ctx, br_name)
command.result = sorted(iface_names)
def _pre_cmd_list_ifaces_verbose(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_BRIDGE,
[vswitch_idl.OVSREC_BRIDGE_COL_DATAPATH_ID])
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_INTERFACE,
[vswitch_idl.OVSREC_INTERFACE_COL_TYPE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
vswitch_idl.OVSREC_INTERFACE_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_INTERFACE_COL_OPTIONS,
vswitch_idl.OVSREC_INTERFACE_COL_OFPORT])
@staticmethod
def _iface_to_dict(iface_cfg):
_ATTRIBUTE = ['name', 'ofport', 'type', 'external_ids', 'options']
attr = dict((key, getattr(iface_cfg, key)) for key in _ATTRIBUTE)
if attr['ofport']:
attr['ofport'] = attr['ofport'][0]
return attr
def _list_ifaces_verbose(self, ctx, datapath_id, port_name):
ctx.populate_cache()
br = ctx.find_bridge_by_id(datapath_id, True)
ctx.verify_ports()
iface_cfgs = []
if port_name is None:
for vsctl_port in br.ports:
iface_cfgs.extend(self._iface_to_dict(vsctl_iface.iface_cfg)
for vsctl_iface in vsctl_port.ifaces)
else:
# When port is created, ofport column might be None.
# So try with port name if it happended
for vsctl_port in br.ports:
iface_cfgs.extend(
self._iface_to_dict(vsctl_iface.iface_cfg)
for vsctl_iface in vsctl_port.ifaces
if (vsctl_iface.iface_cfg.name == port_name))
return iface_cfgs
def _cmd_list_ifaces_verbose(self, ctx, command):
datapath_id = command.args[0]
port_name = None
if len(command.args) >= 2:
port_name = command.args[1]
LOG.debug('command.args %s', command.args)
iface_cfgs = self._list_ifaces_verbose(ctx, datapath_id, port_name)
command.result = sorted(iface_cfgs)
def _verify_controllers(self, ovsrec_bridge):
ovsrec_bridge.verify(vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER)
for controller in ovsrec_bridge.controller:
controller.verify(vswitch_idl.OVSREC_CONTROLLER_COL_TARGET)
def _pre_controller(self, ctx, command):
self._pre_get_info(ctx, command)
self.schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_CONTROLLER,
[vswitch_idl.OVSREC_CONTROLLER_COL_TARGET])
def _get_controller(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_bridge(br_name, True)
self._verify_controllers(br.br_cfg)
return set(controller.target for controller in br.br_cfg.controller)
def _cmd_get_controller(self, ctx, command):
br_name = command.args[0]
controller_names = self._get_controller(ctx, br_name)
command.result = sorted(controller_names)
def _delete_controllers(self, ovsrec_controllers):
for controller in ovsrec_controllers:
controller.delete()
def _del_controller(self, ctx, br_name):
ctx.populate_cache()
br = ctx.find_real_bridge(br_name, True)
ovsrec_bridge = br.br_cfg
self._verify_controllers(ovsrec_bridge)
if ovsrec_bridge.controller:
self._delete_controllers(ovsrec_bridge.controller)
ovsrec_bridge.controller = []
def _cmd_del_controller(self, ctx, command):
br_name = command.args[0]
self._del_controller(ctx, br_name)
def _insert_controllers(self, controller_names):
ovsrec_controllers = []
for name in controller_names:
# TODO: check if the name startswith() supported protocols
ovsrec_controller = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_CONTROLLER])
ovsrec_controller.target = name
ovsrec_controllers.append(ovsrec_controller)
return ovsrec_controllers
def _insert_qos(self):
ovsrec_qos = self.txn.insert(
self.txn.idl.tables[vswitch_idl.OVSREC_TABLE_QOS])
return ovsrec_qos
def _set_controller(self, ctx, br_name, controller_names):
ctx.populate_cache()
ovsrec_bridge = ctx.find_real_bridge(br_name, True).br_cfg
self._verify_controllers(ovsrec_bridge)
self._delete_controllers(ovsrec_bridge.controller)
controllers = self._insert_controllers(controller_names)
ovsrec_bridge.controller = controllers
def _cmd_set_controller(self, ctx, command):
br_name = command.args[0]
controller_names = command.args[1:]
self._set_controller(ctx, br_name, controller_names)
def _del_qos(self, ctx, port_name):
assert port_name is not None
ctx.populate_cache()
vsctl_port = ctx.find_port(port_name, True)
vsctl_qos = vsctl_port.qos
ctx.del_qos(vsctl_qos)
def _cmd_del_qos(self, ctx, command):
port_name = command.args[0]
self._del_qos(ctx, port_name)
def _set_qos(self, ctx, port_name, type, max_rate):
ctx.populate_cache()
vsctl_port = ctx.find_port(port_name, True)
ovsrec_qos = ctx.set_qos(vsctl_port, type, max_rate)
return ovsrec_qos
def _cmd_set_qos(self, ctx, command):
port_name = command.args[0]
type = command.args[1]
max_rate = command.args[2]
result = self._set_qos(ctx, port_name, type, max_rate)
command.result = [result]
def _pre_cmd_set_qos(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QOS,
[vswitch_idl.OVSREC_QOS_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_QOS_COL_OTHER_CONFIG,
vswitch_idl.OVSREC_QOS_COL_QUEUES,
vswitch_idl.OVSREC_QOS_COL_TYPE])
def _cmd_set_queue(self, ctx, command):
ctx.populate_cache()
port_name = command.args[0]
queues = command.args[1]
vsctl_port = ctx.find_port(port_name, True)
vsctl_qos = vsctl_port.qos
queue_id = 0
results = []
for queue in queues:
max_rate = queue.get('max-rate', None)
min_rate = queue.get('min-rate', None)
ovsrec_queue = ctx.set_queue(
vsctl_qos, max_rate, min_rate, queue_id)
results.append(ovsrec_queue)
queue_id += 1
command.result = results
def _pre_cmd_set_queue(self, ctx, command):
self._pre_get_info(ctx, command)
schema_helper = self.schema_helper
schema_helper.register_columns(
vswitch_idl.OVSREC_TABLE_QUEUE,
[vswitch_idl.OVSREC_QUEUE_COL_DSCP,
vswitch_idl.OVSREC_QUEUE_COL_EXTERNAL_IDS,
vswitch_idl.OVSREC_QUEUE_COL_OTHER_CONFIG])
_TABLES = [
_VSCtlTable(vswitch_idl.OVSREC_TABLE_BRIDGE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_CONTROLLER,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_CONTROLLER)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_INTERFACE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_INTERFACE,
vswitch_idl.OVSREC_INTERFACE_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_MIRROR,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_MIRROR,
vswitch_idl.OVSREC_MIRROR_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_MANAGER,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_MANAGER,
vswitch_idl.OVSREC_MANAGER_COL_TARGET,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_NETFLOW,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_NETFLOW)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
None,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_PORT,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
None)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_QOS,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_PORT,
vswitch_idl.OVSREC_PORT_COL_NAME,
vswitch_idl.OVSREC_PORT_COL_QOS)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_QUEUE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_QOS,
None,
vswitch_idl.OVSREC_QOS_COL_QUEUES)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_SSL,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_OPEN_VSWITCH,
None,
vswitch_idl.OVSREC_OPEN_VSWITCH_COL_SSL)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_SFLOW,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_BRIDGE,
vswitch_idl.OVSREC_BRIDGE_COL_NAME,
vswitch_idl.OVSREC_BRIDGE_COL_SFLOW)]),
_VSCtlTable(vswitch_idl.OVSREC_TABLE_FLOW_TABLE,
[_VSCtlRowID(vswitch_idl.OVSREC_TABLE_FLOW_TABLE,
vswitch_idl.OVSREC_FLOW_TABLE_COL_NAME,
None)]),
]
@staticmethod
def _score_partial_match(name, s):
_MAX_SCORE = 0xffffffff
assert len(name) < _MAX_SCORE
s = s[:_MAX_SCORE - 1] # in practice, this doesn't matter
if name == s:
return _MAX_SCORE
name = name.lower().replace('-', '_')
s = s.lower().replace('-', '_')
if s.startswith(name):
return _MAX_SCORE - 1
if name.startswith(s):
return len(s)
return 0
@staticmethod
def _get_table(table_name):
best_match = None
best_score = 0
for table in VSCtl._TABLES:
score = VSCtl._score_partial_match(table.table_name, table_name)
if score > best_score:
best_match = table
best_score = score
elif score == best_score:
best_match = None
if best_match:
return best_match
elif best_score:
vsctl_fatal('multiple table names match "%s"' % table_name)
else:
vsctl_fatal('unknown table "%s"' % table_name)
def _pre_get_table(self, _ctx, table_name):
vsctl_table = self._get_table(table_name)
schema_helper = self.schema_helper
schema_helper.register_table(vsctl_table.table_name)
for row_id in vsctl_table.row_ids:
if row_id.table:
schema_helper.register_table(row_id.table)
if row_id.name_column:
schema_helper.register_columns(row_id.table,
[row_id.name_column])
if row_id.uuid_column:
schema_helper.register_columns(row_id.table,
[row_id.uuid_column])
return vsctl_table
def _get_column(self, table_name, column_name):
best_match = None
best_score = 0
columns = self.schema.tables[table_name].columns.keys()
for column in columns:
score = VSCtl._score_partial_match(column, column_name)
if score > best_score:
best_match = column
best_score = score
elif score == best_score:
best_match = None
if best_match:
# ovs.db.schema_helper._keep_table_columns() requires that
# column_name is type of str. Not unicode string
return str(best_match)
elif best_score:
vsctl_fatal('%s contains more than one column whose name '
'matches "%s"' % (table_name, column_name))
else:
vsctl_fatal('%s does not contain a column whose name matches '
'"%s"' % (table_name, column_name))
def _pre_get_column(self, _ctx, table_name, column):
column_name = self._get_column(table_name, column)
self.schema_helper.register_columns(table_name, [column_name])
def _pre_get(self, ctx, table_name, columns):
vsctl_table = self._pre_get_table(ctx, table_name)
for column in columns:
self._pre_get_column(ctx, vsctl_table.table_name, column)
def _pre_cmd_get(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [ctx.parse_column_key_value(table_schema, column_key)[0]
for column_key in command.args[2:]]
self._pre_get(ctx, table_name, columns)
def _get(self, ctx, table_name, record_id, column_keys,
id_=None, if_exists=False):
"""
:type column_keys: list of (column, key_string)
where column and key are str
"""
vsctl_table = self._get_table(table_name)
row = ctx.must_get_row(vsctl_table, record_id)
if id_:
raise NotImplementedError() # TODO:XXX
symbol, new = ctx.create_symbol(id_)
if not new:
vsctl_fatal('row id "%s" specified on "get" command was used '
'before it was defined' % id_)
symbol.uuid = row.uuid
symbol.strong_ref = True
values = []
for column, key_string in column_keys:
row.verify(column)
datum = getattr(row, column)
if key_string:
if type(datum) != dict:
vsctl_fatal('cannot specify key to get for non-map column '
'%s' % column)
values.append(datum[key_string])
else:
values.append(datum)
return values
def _cmd_get(self, ctx, command):
id_ = None # TODO:XXX --id
if_exists = command.has_option('--if-exists')
table_name = command.args[0]
record_id = command.args[1]
table_schema = self.schema.tables[table_name]
column_keys = [ctx.parse_column_key_value(table_schema, column_key)[:2]
for column_key in command.args[2:]]
values = self._get(ctx, table_name, record_id, column_keys,
id_, if_exists)
command.result = values
def _pre_cmd_find(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [ctx.parse_column_key_value(table_schema,
column_key_value)[0]
for column_key_value in command.args[1:]]
LOG.debug('columns %s', columns)
self._pre_get(ctx, table_name, columns)
def _check_value(self, ovsrec_row, column_key_value):
column, key, value_json = column_key_value
column_schema = ovsrec_row._table.columns[column]
value = ovs.db.data.Datum.from_json(
column_schema.type, value_json).to_python(ovs.db.idl._uuid_to_row)
datum = getattr(ovsrec_row, column)
if key is None:
if datum == value:
return True
else:
if datum[key] != value:
return True
return False
def _find(self, ctx, table_name, column_key_values):
result = []
for ovsrec_row in ctx.idl.tables[table_name].rows.values():
LOG.debug('ovsrec_row %s', ovsrec_row_to_string(ovsrec_row))
if all(self._check_value(ovsrec_row, column_key_value)
for column_key_value in column_key_values):
result.append(ovsrec_row)
return result
def _cmd_find(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
column_key_values = [ctx.parse_column_key_value(table_schema,
column_key_value)
for column_key_value in command.args[1:]]
command.result = self._find(ctx, table_name, column_key_values)
def _check_mutable(self, table_name, column):
column_schema = self.schema.tables[table_name].columns[column]
if not column_schema.mutable:
vsctl_fatal('cannot modify read-only column %s in table %s' %
(column, table_name))
def _pre_set(self, ctx, table_name, columns):
self._pre_get_table(ctx, table_name)
for column in columns:
self._pre_get_column(ctx, table_name, column)
self._check_mutable(table_name, column)
def _pre_cmd_set(self, ctx, command):
table_name = command.args[0]
table_schema = self.schema.tables[table_name]
columns = [ctx.parse_column_key_value(table_schema,
column_key_value)[0]
for column_key_value in command.args[2:]]
self._pre_set(ctx, table_name, columns)
def _set(self, ctx, table_name, record_id, column_key_values):
"""
:type column_key_values: list of (column, key_string, value_json)
"""
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
for column, key, value in column_key_values:
ctx.set_column(ovsrec_row, column, key, value)
ctx.invalidate_cache()
def _cmd_set(self, ctx, command):
table_name = command.args[0]
record_id = command.args[1]
# column_key_value: <column>[:<key>]=<value>
table_schema = self.schema.tables[table_name]
column_key_values = [ctx.parse_column_key_value(table_schema,
column_key_value)
for column_key_value in command.args[2:]]
self._set(ctx, table_name, record_id, column_key_values)
def _pre_clear(self, ctx, table_name, column):
self._pre_get_table(ctx, table_name)
self._pre_get_column(ctx, table_name, column)
self._check_mutable(table_name, column)
def _pre_cmd_clear(self, ctx, command):
table_name = command.args[0]
column = command.args[2]
self._pre_clear(ctx, table_name, column)
def _clear(self, ctx, table_name, record_id, column):
vsctl_table = self._get_table(table_name)
ovsrec_row = ctx.must_get_row(vsctl_table, record_id)
column_schema = ctx.idl.tables[table_name].columns[column]
if column_schema.type.n_min > 0:
vsctl_fatal('"clear" operation cannot be applied to column %s '
'of table %s, which is not allowed to be empty' %
(column, table_name))
# assuming that default datum is empty.
default_datum = ovs.db.data.Datum.default(column_schema.type)
setattr(ovsrec_row, column,
default_datum.to_python(ovs.db.idl._uuid_to_row))
ctx.invalidate_cache()
def _cmd_clear(self, ctx, command):
table_name = command.args[0]
record_id = command.args[1]
column = command.args[2]
self._clear(ctx, table_name, record_id, column)
#
# Create constants from ovs db schema
#
def schema_print(schema_location, prefix):
prefix = prefix.upper()
json = ovs.json.from_file(schema_location)
schema = ovs.db.schema.DbSchema.from_json(json)
print('# Do NOT edit.')
print('# This is automatically generated.')
print('# created based on version %s' % (schema.version or 'unknown'))
print('')
print('')
print('%s_DB_NAME = \'%s\'' % (prefix, schema.name))
for table in sorted(schema.tables.values(),
key=operator.attrgetter('name')):
print('')
print('%s_TABLE_%s = \'%s\'' % (prefix,
table.name.upper(), table.name))
for column in sorted(table.columns.values(),
key=operator.attrgetter('name')):
print('%s_%s_COL_%s = \'%s\'' % (prefix, table.name.upper(),
column.name.upper(),
column.name))
def main():
if len(sys.argv) <= 2:
print('Usage: %s <schema file> <prefix>' % sys.argv[0])
location = sys.argv[1]
prefix = sys.argv[2]
schema_print(location, prefix)
if __name__ == '__main__':
main()
|
{
"content_hash": "ae6917232f57d3d5320ec45c6d772e0a",
"timestamp": "",
"source": "github",
"line_count": 1894,
"max_line_length": 79,
"avg_line_length": 37.95776135163675,
"alnum_prop": 0.5452345184443331,
"repo_name": "sivaramakrishnansr/ryu",
"id": "8670eb355e265ffd6d39483247e70ae513daa945",
"size": "72576",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "ryu/lib/ovs/vsctl.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "8269"
},
{
"name": "CSS",
"bytes": "306"
},
{
"name": "Erlang",
"bytes": "871862"
},
{
"name": "HTML",
"bytes": "306"
},
{
"name": "JavaScript",
"bytes": "8436"
},
{
"name": "Makefile",
"bytes": "1213"
},
{
"name": "Python",
"bytes": "5239853"
},
{
"name": "Shell",
"bytes": "14253"
}
],
"symlink_target": ""
}
|
from .lattice import default_optics_mode
from .lattice import energy
from .accelerator import default_vchamber_on
from .accelerator import default_radiation_on
from .accelerator import accelerator_data
from .accelerator import create_accelerator
from .families import get_family_data
from .families import family_mapping
from .families import get_section_name_mapping
# -- default accelerator values for TS_V04_01--
lattice_version = accelerator_data['lattice_version']
|
{
"content_hash": "eb7aac8e1b24a6ad91b2121ed46edab5",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 53,
"avg_line_length": 33.785714285714285,
"alnum_prop": 0.8160676532769556,
"repo_name": "lnls-fac/sirius",
"id": "0843904ec2c6852ce9c9cc18e0320117f685ef1a",
"size": "474",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymodels/TS_V04_01/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1616466"
}
],
"symlink_target": ""
}
|
from google.cloud import dataform_v1beta1
async def sample_get_compilation_result():
# Create a client
client = dataform_v1beta1.DataformAsyncClient()
# Initialize request argument(s)
request = dataform_v1beta1.GetCompilationResultRequest(
name="name_value",
)
# Make the request
response = await client.get_compilation_result(request=request)
# Handle the response
print(response)
# [END dataform_v1beta1_generated_Dataform_GetCompilationResult_async]
|
{
"content_hash": "50322ec7a55caa4a1ab253df7a225915",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 26.473684210526315,
"alnum_prop": 0.731610337972167,
"repo_name": "googleapis/python-dataform",
"id": "6e42896d3ba0443b8646f8028fbef2aa4252243b",
"size": "1905",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/dataform_v1beta1_generated_dataform_get_compilation_result_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "951895"
},
{
"name": "Shell",
"bytes": "30666"
}
],
"symlink_target": ""
}
|
from DRMAA import *
import sys, os
#######################
# CREATE JOB TEMPLATE #
#######################
def setup_job_template():
args=["-l", "-a"]
(result, jt, error)=drmaa_allocate_job_template()
if result != DRMAA_ERRNO_SUCCESS:
print >> sys.stderr, "drmaa_allocate_job_template() failed: %s" % (error)
sys.exit(-1)
cwd=os.getcwd()
(result, error)=drmaa_set_attribute(jt, DRMAA_WD, cwd)
if result != DRMAA_ERRNO_SUCCESS:
print >> sys.stderr, "Error setting job template attribute: %s" % (error)
sys.exit(-1)
(result, error)=drmaa_set_attribute(jt, DRMAA_JOB_NAME, "ht2")
(result, error)=drmaa_set_attribute(jt, DRMAA_REMOTE_COMMAND, "/bin/ls")
(result, error)=drmaa_set_vector_attribute(jt, DRMAA_V_ARGV, args)
if result != DRMAA_ERRNO_SUCCESS:
print >> sys.stderr,"Error setting remote command arguments: %s" % (error)
sys.exit(-1)
(result, error)=drmaa_set_attribute(jt, DRMAA_OUTPUT_PATH,
":stdout."+DRMAA_GW_JOB_ID)
(result, error)=drmaa_set_attribute(jt, DRMAA_ERROR_PATH,
":stderr."+DRMAA_GW_JOB_ID)
return jt
#############
# MAIN CODE #
#############
(result, error)=drmaa_init(None)
if result != DRMAA_ERRNO_SUCCESS:
print >> sys.stderr, "drmaa_init() failed: %s" % (error)
sys.exit(-1)
else:
print "drmaa_init() success"
jt=setup_job_template()
(result, job_id, error)=drmaa_run_job(jt)
if result != DRMAA_ERRNO_SUCCESS:
print >> sys.stderr, "drmaa_run_job() failed: %s" % (error)
sys.exit(-1)
print >> sys.stderr, "Job successfully submited ID: %s" % (job_id)
(result, job_id_out, stat, rusage, error)=drmaa_wait(job_id, DRMAA_TIMEOUT_WAIT_FOREVER)
if result != DRMAA_ERRNO_SUCCESS:
print >> sys.stderr, "drmaa_wait() failed: %s" % (error)
sys.exit(-1)
(result, stat, error)=drmaa_wexitstatus(stat)
print >> sys.stderr, "Job finished with exit code %s, usage: %s" % (stat, job_id)
(result, attr_value)=drmaa_get_next_attr_value(rusage)
while result != DRMAA_ERRNO_NO_MORE_ELEMENTS:
print >> sys.stderr, "\t%s" % (attr_value)
(result, attr_value)=drmaa_get_next_attr_value(rusage)
result=drmaa_release_attr_values(rusage)
# ----- Finalize -----
(result, error)=drmaa_delete_job_template(jt)
if result != DRMAA_ERRNO_SUCCESS:
print >> sys.stderr, "drmaa_delete_job_template() failed: %s" % (error)
sys.exit(-1)
(result, error)=drmaa_exit()
if result != DRMAA_ERRNO_SUCCESS:
print >> sys.stderr, "drmaa_exit() failed: %s" % (error)
sys.exit(-1)
|
{
"content_hash": "c2bd1b13bba90dcdbbaf942d5357e3ad",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 88,
"avg_line_length": 26.51086956521739,
"alnum_prop": 0.6523165231652317,
"repo_name": "oldpatricka/Gridway",
"id": "9453ef63177d8fe6492255ddec842cfcc2ffae69",
"size": "3248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/drmaa/drmaa_python/howto2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2261450"
},
{
"name": "Java",
"bytes": "901118"
},
{
"name": "Perl",
"bytes": "18647"
},
{
"name": "Python",
"bytes": "28476"
},
{
"name": "Ruby",
"bytes": "44793"
},
{
"name": "Shell",
"bytes": "305352"
}
],
"symlink_target": ""
}
|
import collections
import contextlib
import sys
import time
from neutron_lib.agent import constants as agent_consts
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources as local_resources
from neutron_lib import constants
from neutron_lib import context
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_service import service
from oslo_utils import excutils
from osprofiler import profiler
from neutron.agent.l2 import l2_agent_extensions_manager as ext_manager
from neutron.agent import rpc as agent_rpc
from neutron.agent import securitygroups_rpc as agent_sg_rpc
from neutron.api.rpc.callbacks import resources
from neutron.api.rpc.handlers import securitygroups_rpc as sg_rpc
from neutron.common import config as common_config
from neutron.common import topics
from neutron.plugins.ml2.drivers.agent import _agent_manager_base as amb
from neutron.plugins.ml2.drivers.agent import capabilities
from neutron.plugins.ml2.drivers.agent import config as cagt_config # noqa
LOG = logging.getLogger(__name__)
@profiler.trace_cls("rpc")
class CommonAgentLoop(service.Service):
def __init__(self, manager, polling_interval,
quitting_rpc_timeout, agent_type, agent_binary):
"""Constructor.
:param manager: the manager object containing the impl specifics
:param polling_interval: interval (secs) to poll DB.
:param quitting_rpc_timeout: timeout in seconds for rpc calls after
stop is called.
:param agent_type: Specifies the type of the agent
:param agent_binary: The agent binary string
"""
super(CommonAgentLoop, self).__init__()
self.mgr = manager
self._validate_manager_class()
self.polling_interval = polling_interval
self.quitting_rpc_timeout = quitting_rpc_timeout
self.agent_type = agent_type
self.agent_binary = agent_binary
def _validate_manager_class(self):
if not isinstance(self.mgr,
amb.CommonAgentManagerBase):
LOG.error("Manager class must inherit from "
"CommonAgentManagerBase to ensure CommonAgent "
"works properly.")
sys.exit(1)
def start(self):
# stores all configured ports on agent
self.network_ports = collections.defaultdict(list)
# flag to do a sync after revival
self.fullsync = False
self.context = context.get_admin_context_without_session()
self.setup_rpc()
self.init_extension_manager(self.connection)
configurations = {'extensions': self.ext_manager.names()}
configurations.update(self.mgr.get_agent_configurations())
#TODO(mangelajo): optimize resource_versions (see ovs agent)
self.agent_state = {
'binary': self.agent_binary,
'host': cfg.CONF.host,
'topic': constants.L2_AGENT_TOPIC,
'configurations': configurations,
'agent_type': self.agent_type,
'resource_versions': resources.LOCAL_RESOURCE_VERSIONS,
'start_flag': True}
report_interval = cfg.CONF.AGENT.report_interval
if report_interval:
heartbeat = loopingcall.FixedIntervalLoopingCall(
self._report_state)
heartbeat.start(interval=report_interval)
capabilities.notify_init_event(self.agent_type, self)
# The initialization is complete; we can start receiving messages
self.connection.consume_in_threads()
self.daemon_loop()
def stop(self, graceful=True):
LOG.info("Stopping %s agent.", self.agent_type)
if graceful and self.quitting_rpc_timeout:
self.set_rpc_timeout(self.quitting_rpc_timeout)
super(CommonAgentLoop, self).stop(graceful)
def reset(self):
common_config.setup_logging()
def _report_state(self):
try:
devices = len(self.mgr.get_all_devices())
self.agent_state.get('configurations')['devices'] = devices
agent_status = self.state_rpc.report_state(self.context,
self.agent_state,
True)
if agent_status == agent_consts.AGENT_REVIVED:
LOG.info('%s Agent has just been revived. '
'Doing a full sync.',
self.agent_type)
self.fullsync = True
# we only want to update resource versions on startup
self.agent_state.pop('resource_versions', None)
self.agent_state.pop('start_flag', None)
except Exception:
LOG.exception("Failed reporting state!")
def _validate_rpc_endpoints(self):
if not isinstance(self.endpoints[0],
amb.CommonAgentManagerRpcCallBackBase):
LOG.error("RPC Callback class must inherit from "
"CommonAgentManagerRpcCallBackBase to ensure "
"CommonAgent works properly.")
sys.exit(1)
def setup_rpc(self):
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN)
self.sg_agent = agent_sg_rpc.SecurityGroupAgentRpc(
self.context, self.sg_plugin_rpc, defer_refresh_firewall=True)
self.agent_id = self.mgr.get_agent_id()
LOG.info("RPC agent_id: %s", self.agent_id)
self.topic = topics.AGENT
self.state_rpc = agent_rpc.PluginReportStateAPI(topics.REPORTS)
# RPC network init
# Handle updates from service
self.rpc_callbacks = self.mgr.get_rpc_callbacks(self.context, self,
self.sg_agent)
self.endpoints = [self.rpc_callbacks]
self._validate_rpc_endpoints()
# Define the listening consumers for the agent
consumers = self.mgr.get_rpc_consumers()
self.connection = agent_rpc.create_consumers(self.endpoints,
self.topic,
consumers,
start_listening=False)
def init_extension_manager(self, connection):
ext_manager.register_opts(cfg.CONF)
self.ext_manager = (
ext_manager.L2AgentExtensionsManager(cfg.CONF))
self.ext_manager.initialize(
connection, self.mgr.get_extension_driver_type())
def _clean_network_ports(self, device):
for netid, ports_list in self.network_ports.items():
for port_data in ports_list:
if device == port_data['device']:
ports_list.remove(port_data)
if ports_list == []:
self.network_ports.pop(netid)
return port_data['port_id']
def _update_network_ports(self, network_id, port_id, device):
self._clean_network_ports(device)
self.network_ports[network_id].append({
"port_id": port_id,
"device": device
})
def process_network_devices(self, device_info):
resync_a = False
resync_b = False
self.sg_agent.setup_port_filters(device_info.get('added'),
device_info.get('updated'))
# Updated devices are processed the same as new ones, as their
# admin_state_up may have changed. The set union prevents duplicating
# work when a device is new and updated in the same polling iteration.
devices_added_updated = (set(device_info.get('added'))
| set(device_info.get('updated')))
if devices_added_updated:
resync_a = self.treat_devices_added_updated(devices_added_updated)
if device_info.get('removed'):
resync_b = self.treat_devices_removed(device_info['removed'])
# If one of the above operations fails => resync with plugin
return (resync_a | resync_b)
def treat_devices_added_updated(self, devices):
try:
devices_details_list = self.plugin_rpc.get_devices_details_list(
self.context, devices, self.agent_id, host=cfg.CONF.host)
except Exception:
LOG.exception("Unable to get port details for %s", devices)
# resync is needed
return True
for device_details in devices_details_list:
self._process_device_if_exists(device_details)
# no resync is needed
return False
def _process_device_if_exists(self, device_details):
# ignore exceptions from devices that disappear because they will
# be handled as removed in the next iteration
device = device_details['device']
with self._ignore_missing_device_exceptions(device):
LOG.debug("Port %s added", device)
if 'port_id' in device_details:
LOG.info("Port %(device)s updated. Details: %(details)s",
{'device': device, 'details': device_details})
self.mgr.setup_arp_spoofing_protection(device,
device_details)
segment = amb.NetworkSegment(
device_details.get('network_type'),
device_details['physical_network'],
device_details.get('segmentation_id'),
device_details.get('mtu')
)
network_id = device_details['network_id']
self.rpc_callbacks.add_network(network_id, segment)
interface_plugged = self.mgr.plug_interface(
network_id, segment,
device, device_details['device_owner'])
# REVISIT(scheuran): Changed the way how ports admin_state_up
# is implemented.
#
# Old lb implementation:
# - admin_state_up: ensure that tap is plugged into bridge
# - admin_state_down: remove tap from bridge
# New lb implementation:
# - admin_state_up: set tap device state to up
# - admin_state_down: set tap device state to down
#
# However both approaches could result in races with
# nova/libvirt and therefore to an invalid system state in the
# scenario, where an instance is booted with a port configured
# with admin_state_up = False:
#
# Libvirt does the following actions in exactly
# this order (see libvirt virnetdevtap.c)
# 1) Create the tap device, set its MAC and MTU
# 2) Plug the tap into the bridge
# 3) Set the tap online
#
# Old lb implementation:
# A race could occur, if the lb agent removes the tap device
# right after step 1). Then libvirt will add it to the bridge
# again in step 2).
# New lb implementation:
# The race could occur if the lb-agent sets the taps device
# state to down right after step 2). In step 3) libvirt
# might set it to up again.
#
# This is not an issue if an instance is booted with a port
# configured with admin_state_up = True. Libvirt would just
# set the tap device up again.
#
# This refactoring is recommended for the following reasons:
# 1) An existing race with libvirt caused by the behavior of
# the old implementation. See Bug #1312016
# 2) The new code is much more readable
if interface_plugged:
self.mgr.ensure_port_admin_state(
device, device_details['admin_state_up'])
# update plugin about port status if admin_state is up
if device_details['admin_state_up']:
if interface_plugged:
self.plugin_rpc.update_device_up(self.context,
device,
self.agent_id,
cfg.CONF.host)
else:
self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
self._update_network_ports(device_details['network_id'],
device_details['port_id'],
device_details['device'])
self.ext_manager.handle_port(self.context, device_details)
registry.notify(local_resources.PORT_DEVICE,
events.AFTER_UPDATE, self,
context=self.context,
device_details=device_details)
else:
LOG.info("Device %s not defined on plugin", device)
@contextlib.contextmanager
def _ignore_missing_device_exceptions(self, device):
try:
yield
except Exception:
with excutils.save_and_reraise_exception() as ectx:
if device not in self.mgr.get_all_devices():
ectx.reraise = False
LOG.debug("%s was removed during processing.", device)
def treat_devices_removed(self, devices):
resync = False
self.sg_agent.remove_devices_filter(devices)
for device in devices:
LOG.info("Attachment %s removed", device)
details = None
try:
details = self.plugin_rpc.update_device_down(self.context,
device,
self.agent_id,
cfg.CONF.host)
except Exception:
LOG.exception("Error occurred while removing port %s",
device)
resync = True
if details and details['exists']:
LOG.info("Port %s updated.", device)
else:
LOG.debug("Device %s not defined on plugin", device)
port_id = self._clean_network_ports(device)
try:
self.ext_manager.delete_port(self.context,
{'device': device,
'port_id': port_id})
except Exception:
LOG.exception("Error occurred while processing extensions "
"for port removal %s", device)
resync = True
registry.notify(local_resources.PORT_DEVICE, events.AFTER_DELETE,
self, context=self.context, device=device,
port_id=port_id)
self.mgr.delete_arp_spoofing_protection(devices)
return resync
@staticmethod
def _get_devices_locally_modified(timestamps, previous_timestamps):
"""Returns devices with previous timestamps that do not match new.
If a device did not have a timestamp previously, it will not be
returned because this means it is new.
"""
return {device for device, timestamp in timestamps.items()
if previous_timestamps.get(device) and
timestamp != previous_timestamps.get(device)}
def scan_devices(self, previous, sync):
device_info = {}
updated_devices = self.rpc_callbacks.get_and_clear_updated_devices()
current_devices = self.mgr.get_all_devices()
device_info['current'] = current_devices
if previous is None:
# This is the first iteration of daemon_loop().
previous = {'added': set(),
'current': set(),
'updated': set(),
'removed': set(),
'timestamps': {}}
# clear any orphaned ARP spoofing rules (e.g. interface was
# manually deleted)
self.mgr.delete_unreferenced_arp_protection(current_devices)
# check to see if any devices were locally modified based on their
# timestamps changing since the previous iteration. If a timestamp
# doesn't exist for a device, this calculation is skipped for that
# device.
device_info['timestamps'] = self.mgr.get_devices_modified_timestamps(
current_devices)
locally_updated = self._get_devices_locally_modified(
device_info['timestamps'], previous['timestamps'])
if locally_updated:
LOG.debug("Adding locally changed devices to updated set: %s",
locally_updated)
updated_devices |= locally_updated
if sync:
# This is the first iteration, or the previous one had a problem.
# Re-add all existing devices.
device_info['added'] = current_devices
# Retry cleaning devices that may not have been cleaned properly.
# And clean any that disappeared since the previous iteration.
device_info['removed'] = (previous['removed'] | previous['current']
- current_devices)
# Retry updating devices that may not have been updated properly.
# And any that were updated since the previous iteration.
# Only update devices that currently exist.
device_info['updated'] = (previous['updated'] | updated_devices
& current_devices)
else:
device_info['added'] = current_devices - previous['current']
device_info['removed'] = previous['current'] - current_devices
device_info['updated'] = updated_devices & current_devices
return device_info
def _device_info_has_changes(self, device_info):
return (device_info.get('added')
or device_info.get('updated')
or device_info.get('removed'))
def daemon_loop(self):
LOG.info("%s Agent RPC Daemon Started!", self.agent_type)
device_info = None
sync = True
while True:
start = time.time()
if self.fullsync:
sync = True
self.fullsync = False
if sync:
LOG.info("%s Agent out of sync with plugin!",
self.agent_type)
device_info = self.scan_devices(previous=device_info, sync=sync)
sync = False
if (self._device_info_has_changes(device_info)
or self.sg_agent.firewall_refresh_needed()):
LOG.debug("Agent loop found changes! %s", device_info)
try:
sync = self.process_network_devices(device_info)
except Exception:
LOG.exception("Error in agent loop. Devices info: %s",
device_info)
sync = True
# sleep till end of polling interval
elapsed = (time.time() - start)
if (elapsed < self.polling_interval):
time.sleep(self.polling_interval - elapsed)
else:
LOG.debug("Loop iteration exceeded interval "
"(%(polling_interval)s vs. %(elapsed)s)!",
{'polling_interval': self.polling_interval,
'elapsed': elapsed})
def set_rpc_timeout(self, timeout):
for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc,
self.state_rpc):
rpc_api.client.timeout = timeout
|
{
"content_hash": "662ae001a75c045f035e08a3947a99ce",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 79,
"avg_line_length": 44.800438596491226,
"alnum_prop": 0.5534778990650546,
"repo_name": "eayunstack/neutron",
"id": "5f781481de85149477c55b55a1099e91353adc0c",
"size": "21060",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron/plugins/ml2/drivers/agent/_common_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "10593193"
},
{
"name": "Shell",
"bytes": "8804"
}
],
"symlink_target": ""
}
|
from project.extensions import db
class Problem(db.Model):
__tablename__ = 'problems'
id = db.Column(db.Integer, primary_key=True)
number = db.Column(db.Integer, nullable=False)
rate = db.Column(db.SMALLINT)
time_limit = db.Column(db.SMALLINT, nullable=False) ## seconds
space_limit = db.Column(db.SMALLINT, nullable=False) ## megabytes
body = db.Column(db.String(100 * 1024), nullable=False)
fk_contest = db.Column(db.Integer, db.ForeignKey('contests.id'), index=True, nullable=False)
contest = db.relationship('Contest', foreign_keys='Problem.fk_contest')
__table_args__ = (db.UniqueConstraint('number', 'fk_contest'), )
|
{
"content_hash": "0c15ee08e2e36833d48c6512ef5e5e52",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 96,
"avg_line_length": 32.04761904761905,
"alnum_prop": 0.6835066864784547,
"repo_name": "ez4u/ijust_backend",
"id": "5fa531eebe31fe2082c1839439235ae94185e4b2",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/models/problem.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31"
},
{
"name": "HTML",
"bytes": "229"
},
{
"name": "Python",
"bytes": "24085"
}
],
"symlink_target": ""
}
|
import json
import codecs
from collections import defaultdict
import re
def make_label_kb(kbfilestr, lang):
with codecs.open(kbfilestr, 'r', encoding='utf-8') as kb:
labs = dict()
for line in kb:
lineitem = line.split()
if lineitem[0].startswith('Q') and lineitem[1].startswith('label') and lineitem[2].startswith('{'+lang+':'):
labs[lineitem[0]] = line.split('{')[1].split('}')[0][3:].decode('utf-8')
return labs
def make_counts_sitelinks_kb(kbfilestr):
with codecs.open(kbfilestr, 'r', encoding='utf-8') as kb:
slcounts = defaultdict(int)
for line in kb:
lineitem = line.split()
try:
if lineitem[1].startswith('link'):
slcounts[lineitem[0]] += 1
except IndexError:
pass
return slcounts
def make_sitelink_reverse_index(kbfile):
#TODO finish this
'''This makes a dict of languages whose value is a list of qids of which it is a part'''
re.compile
with codecs.open(kbfilestr, 'r', encoding='utf-8') as kb:
reverse_index = defaultdict(list)
for line in kb:
lineitem = line.split()
try:
if lineitem[1].startswith('link'):
slcounts[lineitem[2]] += 1
except IndexError:
pass
def make_relations(kbfilestr, pid):
with codecs.open(kbfilestr, 'r', encoding='utf-8') as kb:
relations = list()
for line in kb:
lineitem = line.split()
if lineitem[0].startswith('Q') and lineitem[1] == pid:
#using mathetmatical aRb notation
reldict = {'a' : lineitem[0], 'r' : lineitem[1], 'b' : lineitem[2]}
relations.append(reldict)
return relations
if __name__ == '__main__':
labels = False
slcount = True
relations = False
kbfilestr = '1mkb.txt'
langname = 'en'
if relations:
outfile = open('P31relations.json', 'w')
relations = make_relations(kbfilestr, 'P31')
json.dump(relations, outfile)
outfile.close()
textfile = open('P31relations.txt', 'w')
text = ''
for relation in relations:
line = relation['a'] + ' ' + relation['r'] + ' ' + relation['b'] + '\n'
text += line
textfile.write(text)
textfile.close()
if labels:
outfile = open(langname+'labels.json', 'w')
labels = make_label_kb(kbfilestr, langname)
print labels
json.dump(labels, outfile)
outfile.close()
if slcount:
outfile = open(langname+'sitelinkcounts.json', 'w')
slcounts = make_counts_sitelinks_kb(kbfilestr)
json.dump(slcounts, outfile)
outfile.close()
print slcounts
|
{
"content_hash": "8cd4b8388aecfc5f61a8556af00772a8",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 120,
"avg_line_length": 33.47674418604651,
"alnum_prop": 0.5477596387634596,
"repo_name": "notconfusing/wikidata-subclass-chains",
"id": "40354591100d2c9070c6b71f5d0d1c314240ed65",
"size": "2879",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kbutils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12321"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import PySide.QtGui as QtGui
import PySide.QtCore as QtCore
class ModifiedMarkdownHighlighter(QtGui.QSyntaxHighlighter):
def __init__(self, parent=None):
QtGui.QSyntaxHighlighter.__init__(self, parent)
# Initialise the highlighting rules
self.highlightingRules = []
self.underliningRules = []
# Date rules
dateFormat = QtGui.QTextCharFormat()
dateFormat.setForeground(QtCore.Qt.darkGray)
self.highlightingRules.append(
(QtCore.QRegExp("\*{1,1}(\\d{1,2}/\\d{2,2}/\\d{4,4}\*{1,1}"),
dateFormat))
# Italics rules
italicsFormat = QtGui.QTextCharFormat()
italicsFormat.setFontItalic(True)
italicsFormat.setForeground(QtCore.Qt.darkGreen)
self.highlightingRules.append(
(QtCore.QRegExp("\*{1,1}([^\n^\*]+)\*{1,1}"), italicsFormat))
# Bold rules
boldFormat = QtGui.QTextCharFormat()
boldFormat.setForeground(QtCore.Qt.darkBlue)
boldFormat.setFontWeight(QtGui.QFont.Bold)
self.highlightingRules.append(
(QtCore.QRegExp("\*{2,2}([^\n^\*]+)\*{2,2}"), boldFormat))
# Code rules
self.codeFormat = QtGui.QTextCharFormat()
self.codeFormat.setForeground(QtCore.Qt.darkBlue)
self.highlightingRules.append(
(QtCore.QRegExp("`{1,1}([^\n^`]+)`{1,1}"), self.codeFormat))
# Tags
tagFormat = QtGui.QTextCharFormat()
tagFormat.setForeground(QtCore.Qt.darkRed)
self.highlightingRules.append(
(QtCore.QRegExp(
"^#\s?((\w(?:[-\w .]*\w)?)+)(,\s*(\w(?:[-\w .]*\w)?)+)*$"),
tagFormat))
# Code blocks (several lines)
self.blockStartExpression = QtCore.QRegExp("^~~~(\s.*)?$")
self.blockEndExpression = QtCore.QRegExp("^~~~$")
# Main title rule
self.mainTitleUnderlineExpression = QtCore.QRegExp("^={2,}$")
mainTitleFormat = QtGui.QTextCharFormat()
mainTitleFormat.setForeground(QtCore.Qt.darkGreen)
# Sections rule
self.sectionUnderlineExpression = QtCore.QRegExp("^-{2,}$")
sectionFormat = QtGui.QTextCharFormat()
sectionFormat.setForeground(QtCore.Qt.darkBlue)
self.underliningRules.extend(
[(self.mainTitleUnderlineExpression, mainTitleFormat),
(self.sectionUnderlineExpression, sectionFormat)])
def highlightBlock(self, text):
# Deal first with simple expressions (one line)
# Note: the _format syntax is there to avoid naming conflict with the
# restricted word `format`.
for pattern, _format in self.highlightingRules:
expression = QtCore.QRegExp(pattern)
index = expression.indexIn(text)
while index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, _format)
index = expression.indexIn(text, index + length)
# Deal with block type highlighting
self.setCurrentBlockState(0)
startIndex = 0
if self.previousBlockState() != 1:
startIndex = self.blockStartExpression.indexIn(text)
while startIndex >= 0:
endIndex = self.blockEndExpression.indexIn(
text, startIndex)
if endIndex == -1:
self.setCurrentBlockState(1)
blockLength = len(text) - startIndex
elif endIndex == 0:
# This is a symmetric code block
if self.previousBlockState() == 0:
self.setCurrentBlockState(1)
blockLength = len(text) - startIndex
else:
blockLength = endIndex - startIndex + \
self.blockEndExpression.matchedLength()
self.setFormat(startIndex, blockLength,
self.codeFormat)
startIndex = self.blockStartExpression.indexIn(
text, startIndex + blockLength)
# Match the underlines
if startIndex == -1: # If outside of a block
if self.previousBlockState() != 2:
for pattern, _format in self.underliningRules:
expression = QtCore.QRegExp(pattern)
# Match the next line for underlines
index = expression.indexIn(
self.currentBlock().next().text())
if index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, _format)
index = expression.indexIn(text, index + length)
self.setCurrentBlockState(2)
# If the title has already been highlighted, the state is '2'
else:
for pattern, _format in self.underliningRules:
expression = QtCore.QRegExp(pattern)
# Match the next line for underlines
index = expression.indexIn(text)
if index >= 0:
length = expression.matchedLength()
self.setFormat(index, length, _format)
index = expression.indexIn(text, index + length)
|
{
"content_hash": "790fcca6a2e9dc4aac6880d5b2f6e7c8",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 77,
"avg_line_length": 41.803149606299215,
"alnum_prop": 0.5705405914484837,
"repo_name": "egolus/NoteOrganiser",
"id": "4935f9a2b606dd09585af37a9bb74e91a232f904",
"size": "5309",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "noteorganiser/syntax.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14284"
},
{
"name": "HTML",
"bytes": "38802"
},
{
"name": "Makefile",
"bytes": "380"
},
{
"name": "Python",
"bytes": "134104"
}
],
"symlink_target": ""
}
|
import os
import sys
from Bio import Seq
from Bio import SeqIO
from Bio import SeqRecord
import pandas as pd
import numpy as np
import ms_module as ms
import re
############################
from Bio import Entrez
from Bio import SeqIO
from StringIO import StringIO
import time
from urllib2 import HTTPError # for Python 2
import argparse
# do some arguments parsing to make the script looks civilized ...
parser = argparse.ArgumentParser()
parser.add_argument("-f","--raw_fetch", help="speicfy input data fname (with fetchid column(!), with/without path)",required=True)
# we don't need spectrum file for downloading proteins, it is too redundant for that purpose ...
parser.add_argument("--verbose", help="verbose output", action="store_true")
parser.add_argument("--prefix", help="specify common part of the path for peptide and spectrum files")
parser.add_argument("--email", help="Provide your email for NCBI servers abuse-feedback")
args = parser.parse_args()
# print args
###############################################
if args.verbose:
print "Verbose output is to follow ...\n\n"
###############################################
if args.prefix is not None:
raw_info_with_fetch_fname = os.path.join( args.prefix, args.raw_fetch )
else:
raw_info_with_fetch_fname = args.raw_fetch
# get the common path for later use ...
raw_path = os.path.dirname(raw_info_with_fetch_fname)
#
# don'r forget to provide you email
Entrez.email = args.email if args.email else "your_email@mail_server.com"
#
# peptides_with_fetch.csv
# raw_info_with_fetch_fname
# raw_info_with_fetch
raw_info_with_fetch = pd.read_csv(raw_info_with_fetch_fname)
assert 'fetchid' in raw_info_with_fetch.columns
############################################
# columns that needs to be delivered ... #
############################################
# A gsites, 1 per line
# B pept, 1 per line
# B1 enzyme, G or T, derive from 'Biological sample category', like this: {'TrypsinSample1':'T','GluC_Sample2':'G'}
# C peptide_start, 1 per line accordingly
# D all_uids, REPLACE WITH col:H
# E prot_seq, try to get those from NCBI, not from UniProt ...
# F protein, ??? sequence, name or what???
# G uid_max, UID for major form instead or something like that ...
# H prot_name, parsed out human-readable name from 'Protein Name'
# H1 gene_name, parsed out GN=xxx from 'Protein Name'
# I uniq_peptide_count, discrad that column ...
# J pept_probability, output number not the string - this would be the criteria
# K gsites_predicted, OK
# L gsites_predicted_number, OK
# M gsite_start, beware of 0 or 1 type of indexing ...
# N,O,P - gsites AAs in separate columns
# M1, NOP combined, gsite sequence basically!
# Q signal, from GeneBank record on the protein, simply Y,N on whether there is a 'Signal' in gb.
# R signal_location, location of the signal from Q
# S tm_span, Y,N just for the fact of having TM span as a protein feature.
#
#
print
print "Posting and fetching genebank records corresponding to the available FetchIDs from the Protein DB ..."
pulled_gb_recs_fname = os.path.join( raw_path, "pulled_proteins.gb" )
batch_size = 60
attempts_limit = 3
# THEN WE'D NEED TO DO POST AND ONLY AFTER EFETCH ...
# there might be some EMPTY fetchids ...
non_empty_fetchids = raw_info_with_fetch['fetchid'][raw_info_with_fetch['fetchid'].notnull()].apply(int)
with_empty_fetchids = raw_info_with_fetch[raw_info_with_fetch['fetchid'].isnull()]
#
print
print "BEWARE! There are %d empty fetchids ..."%with_empty_fetchids.shape[0]
print with_empty_fetchids[['Protein Name','Peptide Sequence']]
print
#
search_results = Entrez.read( Entrez.epost("protein", id=",".join( non_empty_fetchids.apply(str).unique() )) )
webenv = search_results["WebEnv"]
query_key = search_results["QueryKey"]
# download results in batches using history and coockies ....
count, = non_empty_fetchids.unique().shape
out_handle = open(pulled_gb_recs_fname, "w")
for start in range(0, count, batch_size):
end = min(count, start+batch_size)
print("Going to download record %i to %i" % (start+1, end))
attempt = 0
while attempt < attempts_limit:
attempt += 1
try:
fetch_handle = Entrez.efetch(db="protein", rettype="gb", retmode="text",
retstart=start, retmax=batch_size,
webenv=webenv, query_key=query_key)
break # skip subsequent attempts is succeeded ...
except HTTPError as err:
if 500 <= err.code <= 599:
print("Received error from server %s" % err)
print("Attempt %d of %d"%(attempt,attempts_limit))
# attempt += 1
time.sleep(15)
else:
print "oh Shut! %d"%attempt
raise
data = fetch_handle.read()
fetch_handle.close()
out_handle.write(data)
out_handle.close()
#
print "Fetched genebank records are stored in %s."%pulled_gb_recs_fname
print "Check for BioPython gb consistency before processing ..."
print "THE END"
|
{
"content_hash": "58d8cc58a3b92a5518863d06a44fda2e",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 130,
"avg_line_length": 32.33757961783439,
"alnum_prop": 0.6560961197557613,
"repo_name": "sergpolly/GlycoMadness",
"id": "8c31d3cdf9bba7291b24631dadeec7f27c137c31",
"size": "5077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SILAC_stage1b_pull_proteins_NCBI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "277468"
},
{
"name": "Shell",
"bytes": "26340"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from sentry.testutils import AcceptanceTestCase
class AuthTest(AcceptanceTestCase):
def enter_auth(self, username, password):
self.browser.get('/auth/login/')
self.browser.find_element_by_id('id_username').send_keys(username)
self.browser.find_element_by_id('id_password').send_keys(password)
self.browser.find_element_by_xpath("//button[contains(text(), 'Continue')]").click()
def test_renders(self):
self.browser.get('/auth/login/')
self.browser.snapshot(name='login')
def test_no_credentials(self):
self.enter_auth('', '')
self.browser.snapshot(name='login fields required')
def test_invalid_credentials(self):
self.enter_auth('bad-username', 'bad-username')
self.browser.snapshot(name='login fields invalid')
def test_success(self):
email = 'dummy@example.com'
password = 'dummy'
user = self.create_user(email=email)
user.set_password(password)
user.save()
self.enter_auth(email, password)
self.browser.wait_until_not('.loading')
self.browser.snapshot(name='login success')
|
{
"content_hash": "ef956d4e2bc62554826b9466a395c71b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 92,
"avg_line_length": 34.970588235294116,
"alnum_prop": 0.6534903280067283,
"repo_name": "gencer/sentry",
"id": "e5b3c913b348af62323fd9680d40b71deee32f58",
"size": "1189",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/acceptance/test_auth.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "318167"
},
{
"name": "HTML",
"bytes": "281885"
},
{
"name": "JavaScript",
"bytes": "2342569"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "8393"
},
{
"name": "Python",
"bytes": "28161647"
},
{
"name": "Ruby",
"bytes": "4233"
},
{
"name": "Shell",
"bytes": "2149"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import gzip
import os
import tempfile
import unittest
import zlib
from io import BytesIO, StringIO
from django.core.files import File
from django.core.files.base import ContentFile
from django.core.files.move import file_move_safe
from django.core.files.temp import NamedTemporaryFile
from django.core.files.uploadedfile import SimpleUploadedFile, UploadedFile
from django.utils import six
from django.utils._os import upath
try:
from PIL import Image
except ImportError:
Image = None
else:
from django.core.files import images
class FileTests(unittest.TestCase):
def test_unicode_uploadedfile_name(self):
uf = UploadedFile(name='¿Cómo?', content_type='text')
self.assertIs(type(repr(uf)), str)
def test_unicode_file_name(self):
f = File(None, 'djángö')
self.assertIs(type(repr(f)), str)
def test_context_manager(self):
orig_file = tempfile.TemporaryFile()
base_file = File(orig_file)
with base_file as f:
self.assertIs(base_file, f)
self.assertFalse(f.closed)
self.assertTrue(f.closed)
self.assertTrue(orig_file.closed)
def test_namedtemporaryfile_closes(self):
"""
The symbol django.core.files.NamedTemporaryFile is assigned as
a different class on different operating systems. In
any case, the result should minimally mock some of the API of
tempfile.NamedTemporaryFile from the Python standard library.
"""
tempfile = NamedTemporaryFile()
self.assertTrue(hasattr(tempfile, "closed"))
self.assertFalse(tempfile.closed)
tempfile.close()
self.assertTrue(tempfile.closed)
def test_file_mode(self):
# Should not set mode to None if it is not present.
# See #14681, stdlib gzip module crashes if mode is set to None
file = SimpleUploadedFile("mode_test.txt", b"content")
self.assertFalse(hasattr(file, 'mode'))
gzip.GzipFile(fileobj=file)
def test_file_iteration(self):
"""
File objects should yield lines when iterated over.
Refs #22107.
"""
file = File(BytesIO(b'one\ntwo\nthree'))
self.assertEqual(list(file), [b'one\n', b'two\n', b'three'])
def test_file_iteration_windows_newlines(self):
"""
#8149 - File objects with \r\n line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_mac_newlines(self):
"""
#8149 - File objects with \r line endings should yield lines
when iterated over.
"""
f = File(BytesIO(b'one\rtwo\rthree'))
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_mixed_newlines(self):
f = File(BytesIO(b'one\rtwo\nthree\r\nfour'))
self.assertEqual(list(f), [b'one\r', b'two\n', b'three\r\n', b'four'])
def test_file_iteration_with_unix_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\ntwo\nthree'))
# Set chunk size to create a boundary after \n:
# b'one\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\n', b'two\n', b'three'])
def test_file_iteration_with_windows_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\r\ntwo\r\nthree'))
# Set chunk size to create a boundary between \r and \n:
# b'one\r\n...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r\n', b'two\r\n', b'three'])
def test_file_iteration_with_mac_newline_at_chunk_boundary(self):
f = File(BytesIO(b'one\rtwo\rthree'))
# Set chunk size to create a boundary after \r:
# b'one\r...
# ^
f.DEFAULT_CHUNK_SIZE = 4
self.assertEqual(list(f), [b'one\r', b'two\r', b'three'])
def test_file_iteration_with_text(self):
f = File(StringIO('one\ntwo\nthree'))
self.assertEqual(list(f), ['one\n', 'two\n', 'three'])
class NoNameFileTestCase(unittest.TestCase):
"""
Other examples of unnamed files may be tempfile.SpooledTemporaryFile or
urllib.urlopen()
"""
def test_noname_file_default_name(self):
self.assertEqual(File(BytesIO(b'A file with no name')).name, None)
def test_noname_file_get_size(self):
self.assertEqual(File(BytesIO(b'A file with no name')).size, 19)
class ContentFileTestCase(unittest.TestCase):
def test_content_file_default_name(self):
self.assertEqual(ContentFile(b"content").name, None)
def test_content_file_custom_name(self):
"""
Test that the constructor of ContentFile accepts 'name' (#16590).
"""
name = "I can have a name too!"
self.assertEqual(ContentFile(b"content", name=name).name, name)
def test_content_file_input_type(self):
"""
Test that ContentFile can accept both bytes and unicode and that the
retrieved content is of the same type.
"""
self.assertIsInstance(ContentFile(b"content").read(), bytes)
if six.PY3:
self.assertIsInstance(ContentFile("español").read(), six.text_type)
else:
self.assertIsInstance(ContentFile("español").read(), bytes)
class DimensionClosingBug(unittest.TestCase):
"""
Test that get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = BytesIO()
try:
images.get_image_dimensions(empty_io)
finally:
self.assertTrue(not empty_io.closed)
@unittest.skipUnless(Image, "Pillow not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper(object):
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
images.open = catching_open
try:
images.get_image_dimensions(os.path.join(os.path.dirname(upath(__file__)), "test1.png"))
finally:
del images.open
self.assertTrue(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
Test that get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "Pillow not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "test.png")
with open(img_path, 'rb') as fh:
image = images.ImageFile(fh)
image_pil = Image.open(fh)
size_1 = images.get_image_dimensions(image)
size_2 = images.get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
@unittest.skipUnless(Image, "Pillow not installed")
def test_bug_19457(self):
"""
Regression test for #19457
get_image_dimensions fails on some pngs, while Image.size is working good on them
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "magic.png")
try:
size = images.get_image_dimensions(img_path)
except zlib.error:
self.fail("Exception raised from get_image_dimensions().")
with open(img_path, 'rb') as fh:
self.assertEqual(size, Image.open(fh).size)
class GetImageDimensionsOnInvalidImages(unittest.TestCase):
@unittest.skipUnless(Image, "Pillow not installed")
def test_invalid_image(self):
"""
get_image_dimensions() should return (None, None) for the dimensions of
invalid images (#24441).
brokenimg.png is not a valid image and it has been generated by:
$ echo "123" > brokenimg.png
"""
img_path = os.path.join(os.path.dirname(upath(__file__)), "brokenimg.png")
with open(img_path, 'rb') as fh:
size = images.get_image_dimensions(fh)
self.assertEqual(size, (None, None))
class FileMoveSafeTests(unittest.TestCase):
def test_file_move_overwrite(self):
handle_a, self.file_a = tempfile.mkstemp()
handle_b, self.file_b = tempfile.mkstemp()
# file_move_safe should raise an IOError exception if destination file exists and allow_overwrite is False
self.assertRaises(IOError, lambda: file_move_safe(self.file_a, self.file_b, allow_overwrite=False))
# should allow it and continue on if allow_overwrite is True
self.assertIsNone(file_move_safe(self.file_a, self.file_b, allow_overwrite=True))
os.close(handle_a)
os.close(handle_b)
class SpooledTempTests(unittest.TestCase):
def test_in_memory_spooled_temp(self):
with tempfile.SpooledTemporaryFile() as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
def test_written_spooled_temp(self):
with tempfile.SpooledTemporaryFile(max_size=4) as temp:
temp.write(b"foo bar baz quux\n")
django_file = File(temp, name="something.txt")
self.assertEqual(django_file.size, 17)
|
{
"content_hash": "f20da37fd2df430be5de1ccfbceb03b7",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 114,
"avg_line_length": 36.1660777385159,
"alnum_prop": 0.6257938446507083,
"repo_name": "RevelSystems/django",
"id": "d5d952d1e04d8bf01ea71eff4e6e9eec5a5e18c7",
"size": "10265",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/files/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "43000"
},
{
"name": "HTML",
"bytes": "171155"
},
{
"name": "JavaScript",
"bytes": "105066"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "10706379"
},
{
"name": "Shell",
"bytes": "3056"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
}
|
import operator
import os
import boto
from boto.exception import S3ResponseError
from pyramid.decorator import reify
from pyramid.events import NewResponse
from pyramid.events import subscriber
from pyramid.renderers import get_renderer
from pyramid.response import FileResponse
from pyramid.response import Response
from pyramid.view import view_config
from six.moves.urllib import parse as urlparse
from ichnaea.content.stats import (
global_stats,
histogram,
leaders,
leaders_weekly,
regions,
)
from ichnaea.internaljson import internal_dumps, internal_loads
from ichnaea.models.content import StatKey
from ichnaea import util
HERE = os.path.dirname(__file__)
IMAGE_PATH = os.path.join(HERE, 'static', 'images')
FAVICON_PATH = os.path.join(IMAGE_PATH, 'favicon.ico')
TOUCHICON_PATH = os.path.join(IMAGE_PATH, 'apple-touch-icon.png')
# cache year lookup, needs server restart after new year :)
THIS_YEAR = u'%s' % util.utcnow().year
CSP_BASE = "'self' https://*.cdn.mozilla.net"
CSP_POLICY = """\
default-src 'self' *.tiles.mapbox.com;
font-src {base};
img-src {base} {tiles} *.google-analytics.com *.tiles.mapbox.com data:;
script-src {base} *.google-analytics.com 'unsafe-eval';
style-src {base};
"""
CSP_POLICY = CSP_POLICY.replace("\n", ' ').strip()
LOCAL_TILES_BASE = 'http://127.0.0.1:7001/static/tiles/'
TILES_PATTERN = '{z}/{x}/{y}.png'
LOCAL_TILES = LOCAL_TILES_BASE + TILES_PATTERN
BASE_MAP_KEY = 'mozilla-webprod.map-05ad0a21'
def map_tiles_url(base_url):
if base_url is None:
return LOCAL_TILES
elif not base_url.endswith('/'): # pragma: no cover
base_url = base_url + '/'
return urlparse.urljoin(base_url, 'tiles/' + TILES_PATTERN)
def configure_content(config):
config.add_view(favicon_view, name='favicon.ico',
http_cache=(86400, {'public': True}))
config.registry.skip_logging.add('/favicon.ico')
config.add_view(robotstxt_view, name='robots.txt',
http_cache=(86400, {'public': True}))
config.registry.skip_logging.add('/robots.txt')
config.add_view(touchicon_view, name='apple-touch-icon-precomposed.png',
http_cache=(86400, {'public': True}))
config.registry.skip_logging.add('/apple-touch-icon-precomposed.png')
config.add_static_view(
name='static', path='ichnaea.content:static', cache_max_age=86400)
config.add_route('leaders_weekly', '/leaders/weekly')
config.add_route('leaders', '/leaders')
config.add_route('stats_regions', '/stats/regions')
config.add_route('stats', '/stats')
config.scan('ichnaea.content.views')
assets_url = config.registry.settings.get('assets', {}).get('url', None)
config.registry.tiles_url = tiles_url = map_tiles_url(assets_url)
result = urlparse.urlsplit(tiles_url)
tiles = urlparse.urlunparse((result.scheme, result.netloc, '', '', '', ''))
config.registry.csp = CSP_POLICY.format(base=CSP_BASE, tiles=tiles)
@subscriber(NewResponse)
def security_headers(event):
response = event.response
if response.content_type == 'text/html':
csp = event.request.registry.csp
response.headers.add('Strict-Transport-Security', 'max-age=31536000')
response.headers.add('Content-Security-Policy', csp)
response.headers.add('X-Content-Type-Options', 'nosniff')
response.headers.add('X-XSS-Protection', '1; mode=block')
response.headers.add('X-Frame-Options', 'DENY')
def s3_list_downloads(assets_bucket, assets_url, raven_client):
if not assets_url.endswith('/'): # pragma: no cover
assets_url = assets_url + '/'
conn = boto.connect_s3()
bucket = conn.lookup(assets_bucket, validate=False)
if bucket is None: # pragma: no cover
return []
files = []
try:
for key in bucket.list(prefix='export/'):
name = key.name.split('/')[-1]
path = urlparse.urljoin(assets_url, key.name)
# round to kilobyte
size = int(round(key.size / 1024.0, 0))
files.append(dict(name=name, path=path, size=size))
except S3ResponseError: # pragma: no cover
raven_client.captureException()
return []
return sorted(files, key=operator.itemgetter('name'), reverse=True)
class Layout(object):
@reify
def base_template(self):
renderer = get_renderer('templates/base.pt')
return renderer.implementation().macros['layout']
@reify
def base_macros(self):
renderer = get_renderer('templates/base_macros.pt')
return renderer.implementation().macros
@property
def this_year(self):
return THIS_YEAR
class ContentViews(Layout):
def __init__(self, request):
self.request = request
def _tiles_url(self):
tiles_url = getattr(self.request.registry, 'tiles_url', None)
if not tiles_url:
tiles_url = map_tiles_url(None)
return tiles_url
@view_config(renderer='templates/homepage.pt', http_cache=3600)
def homepage_view(self):
tiles_url = self._tiles_url()
map_url = tiles_url.format(z=0, x=0, y=0)
scheme = urlparse.urlparse(self.request.url).scheme
map_base_url = '%s://a.tiles.mapbox.com/v3/%s/0/0/0.png' % (
scheme, BASE_MAP_KEY)
return {
'page_title': 'Overview',
'map_url': map_url,
'map_url_2': map_url.replace('/0.png', '/0@2x.png'),
'map_base_url': map_base_url,
}
@view_config(renderer='templates/api.pt',
name='api', http_cache=3600)
def api_view(self):
return {'page_title': 'API'}
@view_config(renderer='templates/apps.pt',
name='apps', http_cache=3600)
def apps_view(self):
return {'page_title': 'Client Applications'}
@view_config(renderer='templates/contact.pt',
name='contact', http_cache=3600)
def contact_view(self):
return {'page_title': 'Contact Us'}
@view_config(renderer='templates/downloads.pt',
name='downloads', http_cache=3600)
def downloads_view(self):
redis_client = self.request.registry.redis_client
cache_key = redis_client.cache_keys['downloads']
cached = redis_client.get(cache_key)
if cached:
data = internal_loads(cached)
else:
settings = self.request.registry.settings
assets_bucket = settings['assets']['bucket']
assets_url = settings['assets']['url']
raven_client = self.request.registry.raven_client
data = s3_list_downloads(assets_bucket, assets_url, raven_client)
# cache the download files
redis_client.set(cache_key, internal_dumps(data), ex=1800)
return {'page_title': 'Downloads', 'files': data}
@view_config(renderer='templates/optout.pt',
name='optout', http_cache=3600)
def optout_view(self):
return {'page_title': 'Opt-Out'}
@view_config(renderer='templates/privacy.pt',
name='privacy', http_cache=3600)
def privacy_view(self):
return {'page_title': 'Privacy Notice'}
@view_config(renderer='templates/leaders.pt',
route_name='leaders', http_cache=3600)
def leaders_view(self):
redis_client = self.request.registry.redis_client
cache_key = redis_client.cache_keys['leaders']
cached = redis_client.get(cache_key)
if cached:
data = internal_loads(cached)
else:
session = self.request.db_ro_session
data = list(enumerate(leaders(session)))
data = [
{
'pos': l[0] + 1,
'num': l[1]['num'],
'nickname': l[1]['nickname'],
'anchor': l[1]['nickname'],
} for l in data]
redis_client.set(cache_key, internal_dumps(data), ex=1800)
half = len(data) // 2 + len(data) % 2
leaders1 = data[:half]
leaders2 = data[half:]
return {
'page_title': 'Leaderboard',
'leaders1': leaders1,
'leaders2': leaders2,
}
@view_config(renderer='templates/leaders_weekly.pt',
route_name='leaders_weekly', http_cache=3600)
def leaders_weekly_view(self):
redis_client = self.request.registry.redis_client
cache_key = redis_client.cache_keys['leaders_weekly']
cached = redis_client.get(cache_key)
if cached:
data = internal_loads(cached)
else:
session = self.request.db_ro_session
data = {
'new_cell': {'leaders1': [], 'leaders2': []},
'new_wifi': {'leaders1': [], 'leaders2': []},
}
for name, value in leaders_weekly(session).items():
value = [
{
'pos': l[0] + 1,
'num': l[1]['num'],
'nickname': l[1]['nickname'],
} for l in list(enumerate(value))]
half = len(value) // 2 + len(value) % 2
data[name] = {
'leaders1': value[:half],
'leaders2': value[half:],
}
redis_client.set(cache_key, internal_dumps(data), ex=3600)
return {
'page_title': 'Weekly Leaderboard',
'scores': data,
}
@view_config(renderer='templates/map.pt', name='map', http_cache=3600)
def map_view(self):
return {'page_title': 'Map', 'tiles': self._tiles_url()}
@view_config(
renderer='json', name='map.json', http_cache=3600)
def map_json(self):
tiles_url = self._tiles_url()
offset = tiles_url.find(TILES_PATTERN)
base_url = tiles_url[:offset]
return {'tiles_url': base_url}
@view_config(
renderer='json', name='stats_cell.json', http_cache=3600)
def stats_cell_json(self):
redis_client = self.request.registry.redis_client
cache_key = redis_client.cache_keys['stats_cell_json']
cached = redis_client.get(cache_key)
if cached:
data = internal_loads(cached)
else:
session = self.request.db_ro_session
mls_data = histogram(session, StatKey.unique_cell)
ocid_data = histogram(session, StatKey.unique_ocid_cell)
data = [
{'title': 'MLS Cells', 'data': mls_data[0]},
{'title': 'OCID Cells', 'data': ocid_data[0]},
]
redis_client.set(cache_key, internal_dumps(data), ex=3600)
return {'series': data}
@view_config(
renderer='json', name='stats_wifi.json', http_cache=3600)
def stats_wifi_json(self):
redis_client = self.request.registry.redis_client
cache_key = redis_client.cache_keys['stats_wifi_json']
cached = redis_client.get(cache_key)
if cached:
data = internal_loads(cached)
else:
session = self.request.db_ro_session
data = histogram(session, StatKey.unique_wifi)
redis_client.set(cache_key, internal_dumps(data), ex=3600)
return {'series': [{'title': 'MLS WiFi', 'data': data[0]}]}
@view_config(renderer='templates/stats.pt',
route_name='stats', http_cache=3600)
def stats_view(self):
redis_client = self.request.registry.redis_client
cache_key = redis_client.cache_keys['stats']
cached = redis_client.get(cache_key)
if cached:
data = internal_loads(cached)
else:
session = self.request.db_ro_session
data = {
'leaders': [],
'metrics1': [],
'metrics2': [],
}
metrics = global_stats(session)
metric_names = [
(StatKey.unique_cell.name, 'MLS Cells'),
(StatKey.unique_ocid_cell.name, 'OpenCellID Cells'),
(StatKey.cell.name, 'MLS Cell Observations'),
(StatKey.unique_wifi.name, 'Wifi Networks'),
(StatKey.wifi.name, 'Wifi Observations'),
]
for mid, name in metric_names[:3]:
data['metrics1'].append({'name': name, 'value': metrics[mid]})
for mid, name in metric_names[3:]:
data['metrics2'].append({'name': name, 'value': metrics[mid]})
redis_client.set(cache_key, internal_dumps(data), ex=3600)
result = {'page_title': 'Statistics'}
result.update(data)
return result
@view_config(renderer='templates/stats_regions.pt',
route_name='stats_regions', http_cache=3600)
def stats_regions_view(self):
redis_client = self.request.registry.redis_client
cache_key = redis_client.cache_keys['stats_regions']
cached = redis_client.get(cache_key)
if cached:
data = internal_loads(cached)
else:
session = self.request.db_ro_session
data = regions(session)
redis_client.set(cache_key, internal_dumps(data), ex=3600)
return {'page_title': 'Cell Statistics', 'metrics': data}
def favicon_view(request):
return FileResponse(FAVICON_PATH, request=request)
def touchicon_view(request):
return FileResponse(TOUCHICON_PATH, request=request)
_robots_response = Response(
content_type='text/plain',
body='''\
User-agent: *
Disallow: /downloads
Disallow: /leaders
Disallow: /stats/regions
Disallow: /static/
Disallow: /v1/
Disallow: /v2/
Disallow: /__heartbeat__
Disallow: /__monitor__
Disallow: /__version__
'''
)
def robotstxt_view(context, request):
return _robots_response
|
{
"content_hash": "6fa34765781b659c0772bd34b1e7b2e4",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 79,
"avg_line_length": 35.80051813471503,
"alnum_prop": 0.5910702655763803,
"repo_name": "therewillbecode/ichnaea",
"id": "ef20e0229311cae588104a978e97298f287a11b6",
"size": "13819",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ichnaea/content/views.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "64264"
},
{
"name": "JavaScript",
"bytes": "1621672"
},
{
"name": "Makefile",
"bytes": "6964"
},
{
"name": "Mako",
"bytes": "432"
},
{
"name": "Python",
"bytes": "691003"
},
{
"name": "Shell",
"bytes": "253"
}
],
"symlink_target": ""
}
|
"""Script for unittesting the ssh module"""
import os
import tempfile
import unittest
import shutil
import testutils
import mocks
from ganeti import constants
from ganeti import utils
from ganeti import ssh
from ganeti import errors
class TestKnownHosts(testutils.GanetiTestCase):
"""Test case for function writing the known_hosts file"""
def setUp(self):
testutils.GanetiTestCase.setUp(self)
self.tmpfile = self._CreateTempFile()
def test(self):
cfg = mocks.FakeConfig()
ssh.WriteKnownHostsFile(cfg, self.tmpfile)
self.assertFileContent(self.tmpfile,
"%s ssh-rsa %s\n%s ssh-dss %s\n" %
(cfg.GetClusterName(), mocks.FAKE_CLUSTER_KEY,
cfg.GetClusterName(), mocks.FAKE_CLUSTER_KEY))
class TestGetUserFiles(unittest.TestCase):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmpdir)
@staticmethod
def _GetNoHomedir(_):
return None
def _GetTempHomedir(self, _):
return self.tmpdir
def testNonExistantUser(self):
for kind in constants.SSHK_ALL:
self.assertRaises(errors.OpExecError, ssh.GetUserFiles, "example",
kind=kind, _homedir_fn=self._GetNoHomedir)
def testUnknownKind(self):
kind = "something-else"
assert kind not in constants.SSHK_ALL
self.assertRaises(errors.ProgrammerError, ssh.GetUserFiles, "example4645",
kind=kind, _homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
def testNoSshDirectory(self):
for kind in constants.SSHK_ALL:
self.assertRaises(errors.OpExecError, ssh.GetUserFiles, "example29694",
kind=kind, _homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
def testSshIsFile(self):
utils.WriteFile(os.path.join(self.tmpdir, ".ssh"), data="")
for kind in constants.SSHK_ALL:
self.assertRaises(errors.OpExecError, ssh.GetUserFiles, "example26237",
kind=kind, _homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
def testMakeSshDirectory(self):
sshdir = os.path.join(self.tmpdir, ".ssh")
self.assertEqual(os.listdir(self.tmpdir), [])
for kind in constants.SSHK_ALL:
ssh.GetUserFiles("example20745", mkdir=True, kind=kind,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(os.stat(sshdir).st_mode & 0777, 0700)
def testFilenames(self):
sshdir = os.path.join(self.tmpdir, ".ssh")
os.mkdir(sshdir)
for kind in constants.SSHK_ALL:
result = ssh.GetUserFiles("example15103", mkdir=False, kind=kind,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(result, [
os.path.join(self.tmpdir, ".ssh", "id_%s" % kind),
os.path.join(self.tmpdir, ".ssh", "id_%s.pub" % kind),
os.path.join(self.tmpdir, ".ssh", "authorized_keys"),
])
self.assertEqual(os.listdir(self.tmpdir), [".ssh"])
self.assertEqual(os.listdir(sshdir), [])
def testNoDirCheck(self):
self.assertEqual(os.listdir(self.tmpdir), [])
for kind in constants.SSHK_ALL:
ssh.GetUserFiles("example14528", mkdir=False, dircheck=False, kind=kind,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
def testGetAllUserFiles(self):
result = ssh.GetAllUserFiles("example7475", mkdir=False, dircheck=False,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(result,
(os.path.join(self.tmpdir, ".ssh", "authorized_keys"), {
constants.SSHK_RSA:
(os.path.join(self.tmpdir, ".ssh", "id_rsa"),
os.path.join(self.tmpdir, ".ssh", "id_rsa.pub")),
constants.SSHK_DSA:
(os.path.join(self.tmpdir, ".ssh", "id_dsa"),
os.path.join(self.tmpdir, ".ssh", "id_dsa.pub")),
}))
self.assertEqual(os.listdir(self.tmpdir), [])
def testGetAllUserFilesNoDirectoryNoMkdir(self):
self.assertRaises(errors.OpExecError, ssh.GetAllUserFiles,
"example17270", mkdir=False, dircheck=True,
_homedir_fn=self._GetTempHomedir)
self.assertEqual(os.listdir(self.tmpdir), [])
if __name__ == "__main__":
testutils.GanetiTestProgram()
|
{
"content_hash": "5a86b5f60408e4a930d38d81f8b1559c",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 78,
"avg_line_length": 33.595419847328245,
"alnum_prop": 0.6489434219495569,
"repo_name": "apyrgio/snf-ganeti",
"id": "b520a97cbf875a57645fca00c6ff95d965fb122c",
"size": "5771",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable-2.10-bpo2",
"path": "test/py/ganeti.ssh_unittest.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Haskell",
"bytes": "1557814"
},
{
"name": "Python",
"bytes": "5311638"
},
{
"name": "Shell",
"bytes": "96816"
}
],
"symlink_target": ""
}
|
"""Unit tests for upload_command_stats_unittest.py."""
import os
import sys
sys.path.insert(0, os.path.abspath('%s/../..' % os.path.dirname(__file__)))
from chromite.cbuildbot import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import stats
from chromite.lib import stats_unittest
from chromite.scripts import upload_command_stats
TEST_FILE = """\
Chromium OS Build Command Stats - Version 1
cpu_count 32
cmd_args --board=lumpy
host typewriter.mtv.corp.google.com
run_time 0
cmd_line ./build_packages --board=lumpy
username monkey@chromium.org
cmd_base build_packages
cpu_type Intel(R) Xeon(R) CPU E5-2690 0 @ 2.90GHz
board lumpy
"""
class RunScriptTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Test the main functionality."""
# pylint: disable=W0212
def setUp(self):
self.upload_file = os.path.join(self.tempdir, 'upload_File')
osutils.WriteFile(self.upload_file, TEST_FILE)
self.argv = [self.upload_file]
self.PatchObject(cros_build_lib, 'GetHostDomain', autospec=True,
return_value='noname.com')
self.StartPatcher(stats_unittest.StatsUploaderMock())
def testNormalRun(self):
"""Going for code coverage."""
upload_command_stats.main(self.argv)
self.assertEquals(stats.StatsUploader._Upload.call_count, 1)
def testStatsDebugMsg(self, golo=False):
"""We hide debug messages from stats module when not in golo."""
stats.StatsUploader._Upload.side_effect = EnvironmentError()
with cros_test_lib.LoggingCapturer() as logs:
upload_command_stats.main(self.argv)
self.AssertLogsContain(
logs, stats.StatsUploader.ENVIRONMENT_ERROR, inverted=(not golo))
def testGoloRun(self):
"""Test when running in the golo."""
cros_build_lib.GetHostDomain.return_value = constants.GOLO_DOMAIN
upload_command_stats.main(self.argv)
self.assertEquals(stats.StatsUploader._Upload.call_count, 1)
self.testStatsDebugMsg(golo=True)
def LogContainsOnError(self, msg):
"""Verifies a logging.error() message is printed."""
with cros_test_lib.LoggingCapturer() as logs:
self.assertRaises2(SystemExit, upload_command_stats.main, self.argv,
check_attrs={'code': 1})
self.AssertLogsContain(logs, msg)
def testLoadFileErrorIgnore(self):
"""We don't propagate timeouts during upload."""
self.PatchObject(
upload_command_stats.StatsLoader, 'LoadFile',
side_effect=upload_command_stats.LoadError(), autospec=True)
self.LogContainsOnError(
upload_command_stats.FILE_LOAD_ERROR % self.upload_file)
def testUploadErrorIgnore(self):
"""We don't propagate timeouts during upload."""
stats.StatsUploader._Upload.side_effect = Exception()
# Logging level for the error is logging.ERROR.
self.LogContainsOnError(upload_command_stats.UNCAUGHT_ERROR)
if __name__ == '__main__':
cros_test_lib.main()
|
{
"content_hash": "5a9199fef5c1fc4746da4e4d79f560ec",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 75,
"avg_line_length": 36.095238095238095,
"alnum_prop": 0.7133905013192612,
"repo_name": "bpsinc-native/src_third_party_chromite",
"id": "17049d1af161c477df3ff66783d1a270ac00e10b",
"size": "3220",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "scripts/upload_command_stats_unittest.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85"
},
{
"name": "HTML",
"bytes": "2661"
},
{
"name": "Python",
"bytes": "3534807"
},
{
"name": "Shell",
"bytes": "24031"
}
],
"symlink_target": ""
}
|
"""NDArray API of MXNet."""
from __future__ import absolute_import
from __future__ import division
try:
from __builtin__ import slice as py_slice
except ImportError:
from builtins import slice as py_slice
import ctypes
import warnings
import os as _os
import sys as _sys
import operator
import numpy as np
from .base import _LIB, string_types, numeric_types
from .base import c_array, py_str, c_str, mx_real_t, _Null # pylint: disable=unused-import
from .base import mx_uint, NDArrayHandle, check_call, OpHandle
from .base import ctypes2buffer
from .context import Context
from . import _ndarray_internal as _internal
from .ndarray_doc import _build_doc
# Use different verison of SymbolBase
# When possible, use cython to speedup part of computation.
# pylint: disable=unused-import
try:
if int(_os.environ.get("MXNET_ENABLE_CYTHON", True)) == 0:
from ._ctypes.ndarray import NDArrayBase, _set_ndarray_class
from ._ctypes.ndarray import CachedOp, _imperative_invoke
elif _sys.version_info >= (3, 0):
from ._cy3.ndarray import NDArrayBase, _set_ndarray_class, _imperative_invoke
from ._cy3.ndarray import CachedOp, _imperative_invoke
else:
from ._cy2.ndarray import NDArrayBase, _set_ndarray_class, _imperative_invoke
from ._cy2.ndarray import CachedOp, _imperative_invoke
except ImportError:
if int(_os.environ.get("MXNET_ENFORCE_CYTHON", False)) != 0:
raise ImportError("Cython Module cannot be loaded but MXNET_ENFORCE_CYTHON=1")
from ._ctypes.ndarray import NDArrayBase, _set_ndarray_class, _imperative_invoke
from ._ctypes.ndarray import CachedOp, _imperative_invoke
# pylint: enable=unused-import
# pylint: disable= no-member
_DTYPE_NP_TO_MX = {
np.float32 : 0,
np.float64 : 1,
np.float16 : 2,
np.uint8 : 3,
np.int32 : 4
}
_DTYPE_MX_TO_NP = {
0 : np.float32,
1 : np.float64,
2 : np.float16,
3 : np.uint8,
4 : np.int32
}
# pylint: enable= no-member
def _new_empty_handle():
"""Returns a new empty handle.
Empty handle can be used to hold a result.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateNone(ctypes.byref(hdl)))
return hdl
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t):
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `NDArray` handle.
"""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayCreateEx(
c_array(mx_uint, shape),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[np.dtype(dtype).type])),
ctypes.byref(hdl)))
return hdl
def waitall():
"""Wait for all async operations to finish in MXNet.
This function is used for benchmarking only.
"""
check_call(_LIB.MXNDArrayWaitAll())
class NDArray(NDArrayBase):
"""An array object representing a multidimensional, homogeneous array of
fixed-size items.
"""
__slots__ = []
# pylint: disable= no-member, undefined-variable
def __repr__(self):
"""Returns a string representation of the array."""
shape_info = 'x'.join(['%d' % x for x in self.shape])
return '<%s %s @%s>' % (self.__class__.__name__,
shape_info, self.context)
def __add__(self, other):
"""x.__add__(y) <=> x+y <=> mx.nd.add(x, y) """
return add(self, other)
def __iadd__(self, other):
"""x.__iadd__(y) <=> x+=y """
if not self.writable:
raise ValueError('trying to add to a readonly NDArray')
if isinstance(other, NDArray):
return broadcast_add(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._plus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
"""x.__sub__(y) <=> x-y <=> mx.nd.subtract(x, y) """
return subtract(self, other)
def __isub__(self, other):
"""x.__isub__(y) <=> x-=y """
if not self.writable:
raise ValueError('trying to subtract from a readonly NDArray')
if isinstance(other, NDArray):
return broadcast_sub(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._minus_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rsub__(self, other):
"""x.__rsub__(y) <=> y-x <=> mx.nd.subtract(y, x) """
return subtract(other, self)
def __mul__(self, other):
"""x.__mul__(y) <=> x*y <=> mx.nd.multiply(x, y) """
return multiply(self, other)
def __neg__(self):
"""x.__neg__(y) <=> -x """
return _internal._mul_scalar(self, -1.0)
def __imul__(self, other):
"""x.__imul__(y) <=> x*=y """
if not self.writable:
raise ValueError('trying to multiply to a readonly NDArray')
if isinstance(other, NDArray):
return broadcast_mul(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._mul_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
"""x.__div__(y) <=> x/y <=> mx.nd.divide(x, y) """
return divide(self, other)
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y/x <=> mx.nd.divide(y, x) """
return divide(other, self)
def __idiv__(self, other):
"""x.__rdiv__(y) <=> x/=y """
if not self.writable:
raise ValueError('trying to divide from a readonly NDArray')
if isinstance(other, NDArray):
return broadcast_div(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._div_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __truediv__(self, other):
return divide(self, other)
def __rtruediv__(self, other):
return divide(other, self)
def __itruediv__(self, other):
return self.__idiv__(other)
def __mod__(self, other):
"""x.__mod__(y) <=> x%y <=> mx.nd.modulo(x, y) """
return modulo(self, other)
def __rmod__(self, other):
"""x.__rmod__(y) <=> y%x <=> mx.nd.modulo(y, x) """
return modulo(other, self)
def __imod__(self, other):
"""x.__rmod__(y) <=> x%=y """
if not self.writable:
raise ValueError('trying to take modulo from a readonly NDArray')
if isinstance(other, NDArray):
return broadcast_mod(self, other, out=self)
elif isinstance(other, numeric_types):
return _internal._mod_scalar(self, float(other), out=self)
else:
raise TypeError('type %s not supported' % str(type(other)))
def __pow__(self, other):
"""x.__pow__(y) <=> x**y <=> mx.nd.power(x,y) """
return power(self, other)
def __rpow__(self, other):
"""x.__pow__(y) <=> y**x <=> mx.nd.power(y,x) """
return power(other, self)
def __eq__(self, other):
"""x.__eq__(y) <=> x==y <=> mx.nd.equal(x, y) """
return equal(self, other)
def __ne__(self, other):
"""x.__ne__(y) <=> x!=y <=> mx.nd.not_equal(x, y) """
return not_equal(self, other)
def __gt__(self, other):
"""x.__gt__(y) <=> x>y <=> mx.nd.greater(x, y) """
return greater(self, other)
def __ge__(self, other):
"""x.__ge__(y) <=> x>=y <=> mx.nd.greater_equal(x, y) """
return greater_equal(self, other)
def __lt__(self, other):
"""x.__lt__(y) <=> x<y <=> mx.nd.lesser(x, y) """
return lesser(self, other)
def __le__(self, other):
"""x.__le__(y) <=> x<=y <=> mx.nd.less_equal(x, y) """
return lesser_equal(self, other)
def __bool__(self):
raise ValueError("The truth value of an NDArray with more than one element is ambiguous.")
__nonzero__ = __bool__
def __getstate__(self):
handle = self.handle
this = {'handle' : None}
if handle is not None:
length = ctypes.c_size_t()
cptr = ctypes.POINTER(ctypes.c_char)()
check_call(_LIB.MXNDArraySaveRawBytes(self.handle,
ctypes.byref(length),
ctypes.byref(cptr)))
this['handle'] = ctypes2buffer(cptr, length.value)
return this
def __setstate__(self, state):
# pylint: disable=assigning-non-slot
handle = state['handle']
if handle is not None:
buf = handle
handle = NDArrayHandle()
ptr = (ctypes.c_char * len(buf)).from_buffer(buf)
length = ctypes.c_size_t(len(buf))
check_call(_LIB.MXNDArrayLoadFromRawBytes(ptr, length, ctypes.byref(handle)))
self.handle = handle
else:
self.handle = None
def __setitem__(self, key, value):
"""x.__setitem__(i, y) <=> x[i]=y
Set self[key] to value.
Parameters
----------
key : int, slice or tuple
The indexing key.
value : scalar, NDArray or numpy.ndarray
The value to set.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x[:] = 1
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> x[:,1:2] = 2
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 1., 2., 1.]], dtype=float32)
>>> x[1:2,1:] = 3
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 1., 3., 3.]], dtype=float32)
>>> x[1:,0:2] = mx.nd.zeros((1,2))
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 0., 0., 3.]], dtype=float32)
>>> x[1,2] = 4
>>> x.asnumpy()
array([[ 1., 2., 1.],
[ 0., 0., 4.]], dtype=float32)
"""
# pylint: disable=too-many-branches
if not self.writable:
raise ValueError('Failed to assign to a readonly NDArray')
if isinstance(key, int):
sliced_arr = self._at(key)
sliced_arr[:] = value
return
if isinstance(key, py_slice):
if key.step is not None:
raise ValueError('NDArray only supports continuous slicing on axis 0')
if key.start is not None or key.stop is not None:
sliced_arr = self._slice(key.start, key.stop)
sliced_arr[:] = value
return
if isinstance(value, NDArray):
if value.handle is not self.handle:
value.copyto(self)
elif isinstance(value, numeric_types):
_internal._set_value(float(value), out=self)
elif isinstance(value, (np.ndarray, np.generic)):
self._sync_copyfrom(value)
else:
raise TypeError('type %s not supported' % str(type(value)))
if isinstance(key, tuple):
# multi-dimension indexing
my_shape = self.shape
assert len(key) == len(my_shape)
for slice_i in key:
assert isinstance(slice_i, (py_slice, int))
begin = [0 for _ in my_shape]
end = [x for x in my_shape]
for i, slice_i in enumerate(key):
if isinstance(slice_i, int):
assert slice_i < my_shape[i]
begin[i] = slice_i
end[i] = slice_i + 1
if isinstance(slice_i, py_slice):
# only support continuous slicing
assert slice_i.step is None
begin[i] = slice_i.start or 0
end[i] = slice_i.stop or my_shape[i]
assert begin[i] < end[i]
assert end[i] <= my_shape[i]
begin = tuple(begin)
end = tuple(end)
if isinstance(value, NDArray):
value = value.as_in_context(self.context)
_internal._crop_assign(self, value, out=self,
begin=begin, end=end)
elif isinstance(value, numeric_types):
_internal._crop_assign_scalar(self, out=self,
begin=begin, end=end,
scalar=value)
elif isinstance(value, (np.ndarray, np.generic)):
value = array(value, ctx=self.context)
_internal._crop_assign(self, value, out=self,
begin=begin, end=end)
else:
raise TypeError('type %s not supported' % str(type(value)))
# pylint: enable=too-many-branches
def __getitem__(self, key):
"""x.__getitem__(i) <=> x[i]
Returns a sliced view of this array.
Parameters
----------
key : int or slice
Indexing key.
Examples
--------
>>> x = mx.nd.arange(0,6).reshape((2,3))
>>> x.asnumpy()
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x[1].asnumpy()
array([ 3., 4., 5.], dtype=float32)
>>> y = x[0:1]
>>> y[:] = 2
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 3., 4., 5.]], dtype=float32)
"""
# multi-dimensional slicing is not supported yet
if isinstance(key, int):
if key > self.shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, self.shape[0]))
return self._at(key)
if isinstance(key, py_slice):
if key.step is not None:
raise ValueError('NDArray only supports continuous slicing on axis 0')
if key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
if isinstance(key, tuple):
raise ValueError('Multi-dimension indexing is not supported')
def _sync_copyfrom(self, source_array):
"""Performs a synchronized copy from the `source_array` to the current array.
This is called through ``x[:] = source_array``, where the `source_array`
is a `numpy.ndarray` or array-like object.
This function blocks until all the pending read/write operations with respect
to the current `NDArray` are finished and carry out the copy operation to the
current NDArray.
Parameters
----------
source_array : array_like
The data source we would like to copy from.
Example
-------
>>> a = mx.nd.array([1, 2])
>>> a.asnumpy()
array([ 1., 2.], dtype=float32)
>>> a[:] = np.array([3, 4])
>> a.asnumpy()
array([ 3., 4.], dtype=float32)
"""
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=self.dtype)
except:
raise TypeError('array must consist of array-like data,' +
'type %s is not supported' % str(type(array)))
source_array = np.ascontiguousarray(source_array, dtype=self.dtype)
if source_array.shape != self.shape:
raise ValueError('Shape inconsistent: expected %s vs got %s'%(
str(self.shape), str(source_array.shape)))
check_call(_LIB.MXNDArraySyncCopyFromCPU(
self.handle,
source_array.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(source_array.size)))
def _slice(self, start, stop):
"""Returns a sliced NDArray that shares memory with the current one.
This is called through ``x[start:stop]``.
Parameters
----------
start : int
Starting inclusive index of slice in the first dim.
stop : int
Finishing exclusive index of slice in the first dim.
Returns
-------
`NDArray` sharing the memory with the current one sliced from
start to stop in the first dim.
Examples:
>>> a = mx.nd.array([[1,2], [3, 4], [5, 6], [7, 8]])
>>> a[1:2].asnumpy()
array([[ 3., 4.]], dtype=float32)
>>> a[1:1].asnumpy()
array([], shape=(0, 2), dtype=float32)
"""
handle = NDArrayHandle()
start = mx_uint(start) if start else mx_uint(0)
stop = mx_uint(stop) if stop else mx_uint(self.shape[0])
check_call(_LIB.MXNDArraySlice(
self.handle, start, stop, ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def _at(self, idx):
"""Returns a view of the array sliced at `idx` in the first dim.
This is called through ``x[idx]``.
Parameters
----------
idx : int
index for slicing the `NDArray` in the first dim.
Returns
-------
NDArray
`NDArray` sharing the memory with the current one sliced at `idx` in the first dim.
Examples
--------
>>> a = mx.nd.array([[1,2], [3, 4]])
>>> a[1].asnumpy()
array([ 3., 4.], dtype=float32)
>>> b = mx.nd.array([1, 2, 3, 4])
>>> b[0].asnumpy()
array([ 1.], dtype=float32)
"""
handle = NDArrayHandle()
idx = mx_uint(idx)
check_call(_LIB.MXNDArrayAt(
self.handle, idx, ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
def reshape(self, shape):
"""Returns a **view** of this array with a new shape without altering any data.
Parameters
----------
shape : tuple of int
The new shape should not change the array size, namely
``np.prod(new_shape)`` should be equal to ``np.prod(self.shape)``.
One dimension can be -1. In this case, the value is inferred
from the length of the array and remaining dimensions.
0 Dimensions in shape will be copied from original shape, i.e.
if x.shape == (3, 4, 5), x.reshape((0, 20)).shape will be (3, 20).
Returns
-------
NDArray
An array with desired shape that shares data with this array.
Examples
--------
>>> x = mx.nd.arange(0,6).reshape((2,3))
>>> x.asnumpy()
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> y = x.reshape((3,2))
>>> y.asnumpy()
array([[ 0., 1.],
[ 2., 3.],
[ 4., 5.]], dtype=float32)
>>> y = x.reshape((3,-1))
>>> y.asnumpy()
array([[ 0., 1.],
[ 2., 3.],
[ 4., 5.]], dtype=float32)
>>> y[:] = -1
>>> x.asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
"""
handle = NDArrayHandle()
# Actual reshape
check_call(_LIB.MXNDArrayReshape(self.handle,
len(shape),
c_array(ctypes.c_int, shape),
ctypes.byref(handle)))
return NDArray(handle=handle, writable=self.writable)
# pylint: disable= undefined-variable
def broadcast_to(self, shape):
"""Broadcasts the input array to a new shape.
Broadcasting is only allowed on axes with size 1. The new shape cannot change
the number of dimensions.
For example, you could broadcast from shape (2, 1) to (2, 3), but not from
shape (2, 3) to (2, 3, 3).
Parameters
----------
shape : tuple of int
The shape of the desired array.
Returns
-------
NDArray
A NDArray with the desired shape that is not sharing data with this
array, even if the new shape is the same as ``self.shape``.
Examples
--------
>>> x = mx.nd.arange(0,3).reshape((1,3,1))
>>> x.asnumpy()
array([[[ 0.],
[ 1.],
[ 2.]]], dtype=float32)
>>> y = x.broadcast_to((2,3,3))
>>> y.asnumpy()
array([[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]],
<BLANKLINE>
[[ 0., 0., 0.],
[ 1., 1., 1.],
[ 2., 2., 2.]]], dtype=float32)
"""
cur_shape = self.shape
err_str = 'operands could not be broadcast together with remapped shapes' \
'[original->remapped]: {} and requested shape {}'.format(cur_shape, shape)
if len(shape) < len(cur_shape):
raise ValueError(err_str)
cur_shape = (1,) * (len(shape) - len(cur_shape)) + cur_shape
cur_shape_arr = np.array(cur_shape)
broadcasting_axes = np.nonzero(cur_shape_arr != np.array(shape))
if (cur_shape_arr[broadcasting_axes] != 1).any():
raise ValueError(err_str)
if cur_shape != self.shape:
return broadcast_to(self.reshape(cur_shape), shape=shape)
else:
return broadcast_to(self, shape=tuple(shape))
# pylint: enable= undefined-variable
def wait_to_read(self):
"""Waits until all previous write operations on the current array are finished.
This method guarantees that all previous write operations that pushed
into the backend engine for execution are actually finished.
Examples
--------
>>> import time
>>> tic = time.time()
>>> a = mx.nd.ones((1000,1000))
>>> b = mx.nd.dot(a, a)
>>> print(time.time() - tic) # doctest: +SKIP
0.003854036331176758
>>> b.wait_to_read()
>>> print(time.time() - tic) # doctest: +SKIP
0.0893700122833252
"""
check_call(_LIB.MXNDArrayWaitToRead(self.handle))
@property
def ndim(self):
"""Returns the number of dimensions of this array
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.ndim
1
>>> x = mx.nd.array([[1, 2], [3, 4]])
>>> x.ndim
2
"""
return len(self.shape)
@property
def shape(self):
"""Tuple of array dimensions.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.shape
(4L,)
>>> y = mx.nd.zeros((2, 3, 4))
>>> y.shape
(2L, 3L, 4L)
"""
ndim = mx_uint()
pdata = ctypes.POINTER(mx_uint)()
check_call(_LIB.MXNDArrayGetShape(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
return tuple(pdata[:ndim.value])
@property
def size(self):
"""Number of elements in the array.
Equivalent to the product of the array’s dimensions.
Examples
--------
>>> import numpy as np
>>> x = mx.nd.zeros((3, 5, 2))
>>> x.size
30
>>> np.prod(x.shape)
30
"""
return np.prod(self.shape)
@property
def context(self):
"""Device context of the array.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.context
cpu(0)
>>> type(x.context)
<class 'mxnet.context.Context'>
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> y.context
gpu(0)
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value)
@property
def dtype(self):
"""Data-type of the array’s elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> x.dtype
<type 'numpy.float32'>
>>> y = mx.nd.zeros((2,3), dtype='int32')
>>> y.dtype
<type 'numpy.int32'>
"""
mx_dtype = ctypes.c_int()
check_call(_LIB.MXNDArrayGetDType(
self.handle, ctypes.byref(mx_dtype)))
return _DTYPE_MX_TO_NP[mx_dtype.value]
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Returns a copy of the array with axes transposed.
Equivalent to ``mx.nd.transpose(self)`` except that
self is returned if ``self.ndim < 2``.
Unlike ``numpy.ndarray.T``, this function returns a copy
rather than a view of the array unless ``self.ndim < 2``.
Examples
--------
>>> x = mx.nd.arange(0,6).reshape((2,3))
>>> x.asnumpy()
array([[ 0., 1., 2.],
[ 3., 4., 5.]], dtype=float32)
>>> x.T.asnumpy()
array([[ 0., 3.],
[ 1., 4.],
[ 2., 5.]], dtype=float32)
"""
if len(self.shape) < 2:
return self
return transpose(self)
# pylint: enable= invalid-name, undefined-variable
@property
def _fresh_grad(self):
"""Whether this array's corresponding gradient array
(registered via `autograd.mark_variables`) has been
updated by `autograd.backward` since last reset.
`_fresh_grad` need to be manually set to False
after consuming gradient (usually after updating this
array).
"""
out = ctypes.c_int()
check_call(_LIB.MXNDArrayGetGradState(self.handle, ctypes.byref(out)))
return out.value
@_fresh_grad.setter
def _fresh_grad(self, state):
check_call(_LIB.MXNDArraySetGradState(self.handle, ctypes.c_int(state)))
def asnumpy(self):
"""Returns a ``numpy.ndarray`` object with value copied from this array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.asnumpy()
>>> type(y)
<type 'numpy.ndarray'>
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> z = mx.nd.ones((2,3), dtype='int32')
>>> z.asnumpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
"""
data = np.empty(self.shape, dtype=self.dtype)
check_call(_LIB.MXNDArraySyncCopyToCPU(
self.handle,
data.ctypes.data_as(ctypes.c_void_p),
ctypes.c_size_t(data.size)))
return data
def asscalar(self):
"""Returns a scalar whose value is copied from this array.
This function is equivalent to ``self.asnumpy()[0]``. This NDArray must have shape (1,).
Examples
--------
>>> x = mx.nd.ones((1,), dtype='int32')
>>> x.asscalar()
1
>>> type(x.asscalar())
<type 'numpy.int32'>
"""
if self.shape != (1,):
raise ValueError("The current array is not a scalar")
return self.asnumpy()[0]
def astype(self, dtype):
"""Returns a copy of the array after casting to a specified type.
Parameters
----------
dtype : numpy.dtype or str
The type of the returned array.
Examples
--------
>>> x = mx.nd.zeros((2,3), dtype='float32')
>>> y = x.astype('int32')
>>> y.dtype
<type 'numpy.int32'>
"""
res = empty(self.shape, ctx=self.context, dtype=dtype)
self.copyto(res)
return res
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``NDArray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``NDArray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : NDArray or Context
The destination array or context.
Returns
-------
NDArray
The copied array. If ``other`` is an ``NDArray``, then the return value
and ``other`` will point to the same ``NDArray``.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.zeros((2,3), mx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.copyto(mx.gpu(0))
<NDArray 2x3 @gpu(0)>
"""
if isinstance(other, NDArray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return
return _internal._copyto(self, out=other)
elif isinstance(other, Context):
hret = NDArray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _internal._copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def copy(self):
"""Makes a copy of this ``NDArray``, keeping the same context.
Returns
-------
NDArray
The copied array
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.copy()
>>> y.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
"""
return self.copyto(self.context)
def as_in_context(self, context):
"""Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
NDArray
The target array.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = x.as_in_context(mx.cpu())
>>> y is x
True
>>> z = x.as_in_context(mx.gpu(0))
>>> z is x
False
"""
if self.context == context:
return self
return self.copyto(context)
def detach(self):
"""Returns a new NDArray, detached from the current graph."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))
return NDArray(hdl)
def backward(self, out_grad=None, retain_graph=False):
"""Compute the gradients of this NDArray w.r.t variables.
Parameters
----------
out_grad: list of NDArray or None
"""
if out_grad is None:
ograd_handles = [NDArrayHandle(0)]
else:
ograd_handles = [out_grad.handle]
check_call(_LIB.MXAutogradBackward(
1, c_array(NDArrayHandle, [self.handle]),
c_array(NDArrayHandle, ograd_handles),
ctypes.c_int(retain_graph)))
def onehot_encode(indices, out):
"""One-hot encoding indices into matrix out.
.. note:: `onehot_encode` is deprecated. Use `one_hot` instead.
"""
# pylint: disable= no-member, protected-access
return _internal._onehot_encode(indices, out, out=out)
# pylint: enable= no-member, protected-access
def empty(shape, ctx=None, dtype=mx_real_t):
"""Returns a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
Returns
-------
NDArray
A created array.
Examples
--------
>>> mx.nd.empty(1)
<NDArray 1 @cpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.empty((1,2), mx.gpu(0), 'float16')
<NDArray 1x2 @gpu(0)>
"""
if isinstance(shape, int):
shape = (shape, )
if ctx is None:
ctx = Context.default_ctx
return NDArray(handle=_new_alloc_handle(shape, ctx, False, dtype))
def zeros(shape, ctx=None, dtype=mx_real_t, **kwargs):
"""Returns a new array filled with all zeros, with the given shape and type.
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
ctx : Context, optional
An optional device context (default is the current default context).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A created array
Examples
--------
>>> mx.nd.zeros(1).asnumpy()
array([ 0.], dtype=float32)
>>> mx.nd.zeros((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.zeros((1,2), mx.gpu(0), 'float16').asnumpy()
array([[ 0., 0.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = Context.default_ctx
# pylint: disable= no-member, protected-access
return _internal._zeros(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
# pylint: enable= no-member, protected-access
def ones(shape, ctx=None, dtype=mx_real_t, **kwargs):
"""Returns a new array filled with all ones, with the given shape and type.
Parameters
----------
shape : int or tuple of int or list of int
The shape of the empty array.
ctx : Context, optional
An optional device context.
Defaults to the current default context (``mxnet.Context.default_ctx``).
dtype : str or numpy.dtype, optional
An optional value type (default is `float32`).
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
A new array of the specified shape filled with all ones.
Examples
--------
>>> mx.nd.ones(1).asnumpy()
array([ 1.], dtype=float32)
>>> mx.nd.ones((1,2), mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.ones((1,2), dtype='float16').asnumpy()
array([[ 1., 1.]], dtype=float16)
"""
# pylint: disable= unused-argument
if ctx is None:
ctx = Context.default_ctx
# pylint: disable= no-member, protected-access
return _internal._ones(shape=shape, ctx=ctx, dtype=dtype, **kwargs)
# pylint: enable= no-member, protected-access
def full(shape, val, ctx=None, dtype=mx_real_t, out=None):
"""Returns a new array of given shape and type, filled with the given value `val`.
Parameters
--------
shape : int or tuple of int
The shape of the new array.
val : scalar
Fill value.
ctx : Context, optional
Device context (default is the current default context).
dtype : `str` or `numpy.dtype`, optional
The data type of the returned `NDArray`. The default datatype is `float32`.
out : NDArray, optional
The output NDArray (default is `None`).
Returns
-------
NDArray
`NDArray` filled with `val`, with the given shape, ctx, and dtype.
Examples
--------
>>> mx.nd.full(1, 2.0).asnumpy()
array([ 2.], dtype=float32)
>>> mx.nd.full((1, 2), 2.0, mx.gpu(0))
<NDArray 1x2 @gpu(0)>
>>> mx.nd.full((1, 2), 2.0, dtype='float16').asnumpy()
array([[ 2., 2.]], dtype=float16)
"""
out = empty(shape, ctx, dtype) if out is None else out
out[:] = val
return out
def array(source_array, ctx=None, dtype=None):
"""Creates an array from any object exposing the array interface.
Parameters
----------
source_array : array_like
An object exposing the array interface, an object whose `__array__`
method returns an array, or any (nested) sequence.
ctx : Context, optional
Device context (default is the current default context).
dtype : str or numpy.dtype, optional
The data type of the output array. The default dtype is ``source_array.dtype``
if `source_array` is an `NDArray`, `float32` otherwise.
Returns
-------
NDArray
An `NDArray` with the same contents as the `source_array`.
Examples
--------
>>> import numpy as np
>>> mx.nd.array([1, 2, 3])
<NDArray 3 @cpu(0)>
>>> mx.nd.array([[1, 2], [3, 4]])
<NDArray 2x2 @cpu(0)>
>>> mx.nd.array(np.zeros((3, 2)))
<NDArray 3x2 @cpu(0)>
>>> mx.nd.array(np.zeros((3, 2)), mx.gpu(0))
<NDArray 3x2 @gpu(0)>
"""
if isinstance(source_array, NDArray):
dtype = source_array.dtype if dtype is None else dtype
else:
dtype = mx_real_t if dtype is None else dtype
if not isinstance(source_array, np.ndarray):
try:
source_array = np.array(source_array, dtype=dtype)
except:
raise TypeError('source_array must be array like object')
arr = empty(source_array.shape, ctx, dtype)
arr[:] = source_array
return arr
def moveaxis(tensor, source, destination):
"""Moves the `source` axis into the `destination` position
while leaving the other axes in their original order
Parameters
----------
tensor : mx.nd.array
The array which axes should be reordered
source : int
Original position of the axes to move.
destination : int
Destination position for each of the original axes.
Returns
-------
result : mx.nd.array
Array with moved axes.
Examples
--------
>>> X = mx.nd.array([[1, 2, 3], [4, 5, 6]])
>>> mx.nd.moveaxis(X, 0, 1).shape
(3L, 2L)
"""
axes = list(range(tensor.ndim))
try:
axes.pop(source)
except IndexError:
raise ValueError('Source should verify 0 <= source < tensor.ndim'
'Got %d' % source)
try:
axes.insert(destination, source)
except IndexError:
raise ValueError('Destination should verify 0 <= destination < tensor.ndim'
'Got %d' % destination)
return transpose(tensor, axes)
# pylint: disable= no-member, protected-access, too-many-arguments
def arange(start, stop=None, step=1.0, repeat=1, ctx=None, dtype=mx_real_t):
"""Returns evenly spaced values within a given interval.
Values are generated within the half-open interval [`start`, `stop`). In other
words, the interval includes `start` but excludes `stop`. The function is
similar to the built-in Python function `range` and to `numpy.arange`,
but returns an `NDArray`.
Parameters
----------
start : float, optional
Start of interval. The default start value is 0.
stop : float
End of interval.
step : float, optional
Spacing between values. The default step size is 1.
repeat : int, optional
Number of times to repeat each element. The default repeat count is 1.
ctx : Context, optional
Device context. Default context is the current default context.
dtype : str or numpy.dtype, optional
The data type of the `NDArray`. The default datatype is `np.float32`.
Returns
-------
NDArray
`NDArray` of evenly spaced values in the specified range.
Examples
--------
>>> mx.nd.arange(3).asnumpy()
array([ 0., 1., 2.], dtype=float32)
>>> mx.nd.arange(2, 6).asnumpy()
array([ 2., 3., 4., 5.], dtype=float32)
>>> mx.nd.arange(2, 6, step=2).asnumpy()
array([ 2., 4.], dtype=float32)
>>> mx.nd.arange(2, 6, step=1.5, repeat=2).asnumpy()
array([ 2. , 2. , 3.5, 3.5, 5. , 5. ], dtype=float32)
>>> mx.nd.arange(2, 6, step=2, repeat=3, dtype='int32').asnumpy()
array([2, 2, 2, 4, 4, 4], dtype=int32)
"""
if ctx is None:
ctx = Context.default_ctx
return _internal._arange(start=start, stop=stop, step=step, repeat=repeat,
dtype=dtype, ctx=str(ctx))
# pylint: enable= no-member, protected-access, too-many-arguments
#pylint: disable= too-many-arguments, no-member, protected-access
def _ufunc_helper(lhs, rhs, fn_array, fn_scalar, lfn_scalar, rfn_scalar=None):
""" Helper function for element-wise operation.
The function will perform numpy-like broadcasting if needed and call different functions.
Parameters
--------
lhs : NDArray or numeric value
Left-hand side operand.
rhs : NDArray or numeric value
Right-hand operand,
fn_array : function
Function to be called if both lhs and rhs are of ``NDArray`` type.
fn_scalar : function
Function to be called if both lhs and rhs are numeric values.
lfn_scalar : function
Function to be called if lhs is ``NDArray`` while rhs is numeric value
rfn_scalar : function
Function to be called if lhs is numeric value while rhs is ``NDArray``;
if none is provided, then the function is commutative, so rfn_scalar is equal to lfn_scalar
Returns
--------
NDArray
result array
"""
if isinstance(lhs, numeric_types):
if isinstance(rhs, numeric_types):
return fn_scalar(lhs, rhs)
else:
if rfn_scalar is None:
# commutative function
return lfn_scalar(rhs, float(lhs))
else:
return rfn_scalar(rhs, float(lhs))
elif isinstance(rhs, numeric_types):
return lfn_scalar(lhs, float(rhs))
elif isinstance(rhs, NDArray):
return fn_array(lhs, rhs)
else:
raise TypeError('type %s not supported' % str(type(rhs)))
#pylint: enable= too-many-arguments, no-member, protected-access
def add(lhs, rhs):
"""Returns element-wise sum of the input arrays with broadcasting.
Equivalent to ``lhs + rhs``, ``mx.nd.broadcast_add(lhs, rhs)`` and
``mx.nd.broadcast_plus(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be added.
rhs : scalar or array
Second array to be added.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise sum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x+2).asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
>>> (x+y).asnumpy()
array([[ 1., 1., 1.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.add(x,y).asnumpy()
array([[ 1., 1., 1.],
[ 2., 2., 2.]], dtype=float32)
>>> (z + y).asnumpy()
array([[ 0., 1.],
[ 1., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_add,
operator.add,
_internal._plus_scalar,
None)
# pylint: enable= no-member, protected-access
def subtract(lhs, rhs):
"""Returns element-wise difference of the input arrays with broadcasting.
Equivalent to ``lhs - rhs``, ``mx.nd.broadcast_sub(lhs, rhs)`` and
``mx.nd.broadcast_minus(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be subtracted.
rhs : scalar or array
Second array to be subtracted.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise difference of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x-2).asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
>>> (x-y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.subtract(x,y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> (z-y).asnumpy()
array([[ 0., 1.],
[-1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_sub,
operator.sub,
_internal._minus_scalar,
_internal._rminus_scalar)
# pylint: enable= no-member, protected-access
def multiply(lhs, rhs):
"""Returns element-wise product of the input arrays with broadcasting.
Equivalent to ``lhs * rhs`` and ``mx.nd.broadcast_mul(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be multiplied.
rhs : scalar or array
Second array to be multiplied.
If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise multiplication of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x*2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x*y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.multiply(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (z*y).asnumpy()
array([[ 0., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_mul,
operator.mul,
_internal._mul_scalar,
None)
# pylint: enable= no-member, protected-access
def divide(lhs, rhs):
"""Returns element-wise division of the input arrays with broadcasting.
Equivalent to ``lhs / rhs`` and ``mx.nd.broadcast_div(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array in division.
rhs : scalar or array
Second array in division.
The arrays to be divided. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise division of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))*6
>>> y = mx.nd.ones((2,1))*2
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 2.],
[ 2.]], dtype=float32)
>>> x/2
<NDArray 2x3 @cpu(0)>
>>> (x/3).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> (x/y).asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
>>> mx.nd.divide(x,y).asnumpy()
array([[ 3., 3., 3.],
[ 3., 3., 3.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_div,
operator.truediv,
_internal._div_scalar,
_internal._rdiv_scalar)
# pylint: enable= no-member, protected-access
def modulo(lhs, rhs):
"""Returns element-wise modulo of the input arrays with broadcasting.
Equivalent to ``lhs % rhs`` and ``mx.nd.broadcast_mod(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array in modulo.
rhs : scalar or array
Second array in modulo.
The arrays to be taken modulo. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise modulo of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))*6
>>> y = mx.nd.ones((2,1))*4
>>> x.asnumpy()
array([[ 6., 6., 6.],
[ 6., 6., 6.]], dtype=float32)
>>> y.asnumpy()
array([[ 4.],
[ 4.]], dtype=float32)
>>> x%5
<NDArray 2x3 @cpu(0)>
>>> (x%5).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x%y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.modulo(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_mod,
operator.mod,
_internal._mod_scalar,
_internal._rmod_scalar)
# pylint: enable= no-member, protected-access
def power(base, exp):
"""Returns result of first array elements raised to powers from second array, element-wise
with broadcasting.
Equivalent to ``base ** exp`` and ``mx.nd.broadcast_power(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
base : scalar or NDArray
The base array
exp : scalar or NDArray
The exponent array. If ``base.shape != exp.shape``, they must be
broadcastable to a common shape.
Returns
--------
NDArray
The bases in x raised to the exponents in y.
Examples
--------
>>> x = mx.nd.ones((2,3))*2
>>> y = mx.nd.arange(1,3).reshape((2,1))
>>> z = mx.nd.arange(1,3).reshape((2,1))
>>> x.asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> y.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> z.asnumpy()
array([[ 1.],
[ 2.]], dtype=float32)
>>> (x**2).asnumpy()
array([[ 4., 4., 4.],
[ 4., 4., 4.]], dtype=float32)
>>> (x**y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> mx.nd.power(x,y).asnumpy()
array([[ 2., 2., 2.],
[ 4., 4., 4.]], dtype=float32)
>>> (z**y).asnumpy()
array([[ 1.],
[ 4.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
base,
exp,
broadcast_power,
operator.pow,
_internal._power_scalar,
_internal._rpower_scalar)
# pylint: enable= no-member, protected-access
def maximum(lhs, rhs):
"""Returns element-wise maximum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_maximum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise maximum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.maximum(x, 2).asnumpy()
array([[ 2., 2., 2.],
[ 2., 2., 2.]], dtype=float32)
>>> mx.nd.maximum(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.maximum(y, z).asnumpy()
array([[ 0., 1.],
[ 1., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_maximum,
lambda x, y: x if x > y else y,
_internal._maximum_scalar,
None)
# pylint: enable= no-member, protected-access
def minimum(lhs, rhs):
"""Returns element-wise minimum of the input arrays with broadcasting.
Equivalent to ``mx.nd.broadcast_minimum(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
The element-wise minimum of the input arrays.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> mx.nd.minimum(x, 2).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.minimum(z, y).asnumpy()
array([[ 0., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_minimum,
lambda x, y: x if x < y else y,
_internal._minimum_scalar,
None)
# pylint: enable= no-member, protected-access
def equal(lhs, rhs):
"""Returns the result of element-wise **equal to** (==) comparison operation with
broadcasting.
For each element in input arrays, return 1(true) if corresponding elements are same,
otherwise return 0(false).
Equivalent to ``lhs == rhs`` and ``mx.nd.broadcast_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x == 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x == y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.equal(x,y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (z == y).asnumpy()
array([[ 1., 0.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_equal,
lambda x, y: 1 if x == y else 0,
_internal._equal_scalar,
None)
# pylint: enable= no-member, protected-access
def not_equal(lhs, rhs):
"""Returns the result of element-wise **not equal to** (!=) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if corresponding elements are different,
otherwise return 0(false).
Equivalent to ``lhs != rhs`` and ``mx.nd.broadcast_not_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (z == y).asnumpy()
array([[ 1., 0.],
[ 0., 1.]], dtype=float32)
>>> (x != 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x != y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.not_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> (z != y).asnumpy()
array([[ 0., 1.],
[ 1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_not_equal,
lambda x, y: 1 if x != y else 0,
_internal._not_equal_scalar,
None)
# pylint: enable= no-member, protected-access
def greater(lhs, rhs):
"""Returns the result of element-wise **greater than** (>) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than rhs,
otherwise return 0(false).
Equivalent to ``lhs > rhs`` and ``mx.nd.broadcast_greater(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x > 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x > y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.greater(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 0., 0., 0.]], dtype=float32)
>>> (z > y).asnumpy()
array([[ 0., 1.],
[ 0., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_greater,
lambda x, y: 1 if x > y else 0,
_internal._greater_scalar,
_internal._lesser_scalar)
# pylint: enable= no-member, protected-access
def greater_equal(lhs, rhs):
"""Returns the result of element-wise **greater than or equal to** (>=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are greater than equal to rhs,
otherwise return 0(false).
Equivalent to ``lhs >= rhs`` and ``mx.nd.broadcast_greater_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x >= 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x >= y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.greater_equal(x, y).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (z >= y).asnumpy()
array([[ 1., 1.],
[ 0., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_greater_equal,
lambda x, y: 1 if x >= y else 0,
_internal._greater_equal_scalar,
_internal._lesser_equal_scalar)
# pylint: enable= no-member, protected-access
def lesser(lhs, rhs):
"""Returns the result of element-wise **lesser than** (<) comparison operation
with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are less than rhs,
otherwise return 0(false).
Equivalent to ``lhs < rhs`` and ``mx.nd.broadcast_lesser(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x < 1).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (x < y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> mx.nd.lesser(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 0., 0., 0.]], dtype=float32)
>>> (z < y).asnumpy()
array([[ 0., 0.],
[ 1., 0.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_lesser,
lambda x, y: 1 if x < y else 0,
_internal._lesser_scalar,
_internal._greater_scalar)
# pylint: enable= no-member, protected-access
def lesser_equal(lhs, rhs):
"""Returns the result of element-wise **lesser than or equal to** (<=) comparison
operation with broadcasting.
For each element in input arrays, return 1(true) if lhs elements are
lesser than equal to rhs, otherwise return 0(false).
Equivalent to ``lhs <= rhs`` and ``mx.nd.broadcast_lesser_equal(lhs, rhs)``.
.. note::
If the corresponding dimensions of two arrays have the same size or one of them has size 1,
then the arrays are broadcastable to a common shape.
Parameters
----------
lhs : scalar or array
First array to be compared.
rhs : scalar or array
Second array to be compared. If ``lhs.shape != rhs.shape``, they must be
broadcastable to a common shape.
Returns
-------
NDArray
Output array of boolean values.
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> y = mx.nd.arange(2).reshape((2,1))
>>> z = mx.nd.arange(2).reshape((1,2))
>>> x.asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> y.asnumpy()
array([[ 0.],
[ 1.]], dtype=float32)
>>> z.asnumpy()
array([[ 0., 1.]], dtype=float32)
>>> (x <= 1).asnumpy()
array([[ 1., 1., 1.],
[ 1., 1., 1.]], dtype=float32)
>>> (x <= y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> mx.nd.lesser_equal(x, y).asnumpy()
array([[ 0., 0., 0.],
[ 1., 1., 1.]], dtype=float32)
>>> (z <= y).asnumpy()
array([[ 1., 0.],
[ 1., 1.]], dtype=float32)
"""
# pylint: disable= no-member, protected-access
return _ufunc_helper(
lhs,
rhs,
broadcast_lesser_equal,
lambda x, y: 1 if x <= y else 0,
_internal._lesser_equal_scalar,
_internal._greater_equal_scalar)
# pylint: enable= no-member, protected-access
def true_divide(lhs, rhs):
"""This function is similar to :meth:`divide`.
"""
return divide(lhs, rhs)
def negative(arr):
"""Numerical negative, element-wise.
Equals ``-arr``
Parameters
----------
arr : NDArray
The input array
Returns
-------
NDArray
``-arr``
Examples
--------
>>> x = mx.nd.ones((2,3))
>>> (-x).asnumpy()
array([[-1., -1., -1.],
[-1., -1., -1.]], dtype=float32)
"""
return multiply(arr, -1.0)
def load(fname):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray or dict of str to NDArray
Loaded data.
"""
if not isinstance(fname, string_types):
raise TypeError('fname required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(fname),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [NDArray(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), NDArray(NDArrayHandle(handles[i]))) for i in range(out_size.value))
def save(fname, data):
"""Saves a list of arrays or a dict of str->array to file.
Examples of filenames:
- ``/path/to/file``
- ``s3://my-bucket/path/to/file`` (if compiled with AWS S3 supports)
- ``hdfs://path/to/file`` (if compiled with HDFS supports)
Parameters
----------
fname : str
The filename.
data : list of ``NDArray` or dict of str to ``NDArray``
The data to save.
Examples
--------
>>> x = mx.nd.zeros((2,3))
>>> y = mx.nd.ones((1,4))
>>> mx.nd.save('my_list', [x,y])
>>> mx.nd.save('my_dict', {'x':x, 'y':y})
>>> mx.nd.load('my_list')
[<NDArray 2x3 @cpu(0)>, <NDArray 1x4 @cpu(0)>]
>>> mx.nd.load('my_dict')
{'y': <NDArray 1x4 @cpu(0)>, 'x': <NDArray 2x3 @cpu(0)>}
"""
handles = []
if isinstance(data, dict):
keys = []
for key, val in data.items():
if not isinstance(key, string_types):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
if not isinstance(val, NDArray):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
keys.append(c_str(key))
handles.append(val.handle)
keys = c_array(ctypes.c_char_p, keys)
else:
for val in data:
if not isinstance(val, NDArray):
raise TypeError('save only accept dict str->NDArray or list of NDArray')
handles.append(val.handle)
keys = None
check_call(_LIB.MXNDArraySave(c_str(fname),
mx_uint(len(handles)),
c_array(NDArrayHandle, handles),
keys))
def concatenate(arrays, axis=0, always_copy=True):
"""DEPRECATED, use ``concat`` instead
Parameters
----------
arrays : list of `NDArray`
Arrays to be concatenate. They must have identical shape except
the first dimension. They also must have the same data type.
axis : int
The axis along which to concatenate.
always_copy : bool
Default `True`. When not `True`, if the arrays only contain one
`NDArray`, that element will be returned directly, avoid copying.
Returns
-------
NDArray
An `NDArray` that lives on the same context as `arrays[0].context`.
"""
assert isinstance(arrays, list)
assert len(arrays) > 0
assert isinstance(arrays[0], NDArray)
if not always_copy and len(arrays) == 1:
return arrays[0]
shape_axis = arrays[0].shape[axis]
shape_rest1 = arrays[0].shape[0:axis]
shape_rest2 = arrays[0].shape[axis+1:]
dtype = arrays[0].dtype
for arr in arrays[1:]:
shape_axis += arr.shape[axis]
assert shape_rest1 == arr.shape[0:axis]
assert shape_rest2 == arr.shape[axis+1:]
assert dtype == arr.dtype
ret_shape = shape_rest1 + (shape_axis,) + shape_rest2
ret = empty(ret_shape, ctx=arrays[0].context, dtype=dtype)
idx = 0
begin = [0 for _ in ret_shape]
end = list(ret_shape)
for arr in arrays:
if axis == 0:
ret[idx:idx+arr.shape[0]] = arr
else:
begin[axis] = idx
end[axis] = idx+arr.shape[axis]
# pylint: disable=no-member,protected-access
_internal._crop_assign(ret, arr, out=ret,
begin=tuple(begin),
end=tuple(end))
# pylint: enable=no-member,protected-access
idx += arr.shape[axis]
return ret
def imdecode(str_img, clip_rect=(0, 0, 0, 0), out=None, index=0, channels=3, mean=None):
"""DEPRECATED, use mx.img instead
Parameters
----------
str_img : str
Binary image data
clip_rect : iterable of 4 int
Clip decoded image to rectangle (x0, y0, x1, y1).
out : NDArray
Output buffer. Can be 3 dimensional (c, h, w) or 4 dimensional (n, c, h, w).
index : int
Output decoded image to i-th slice of 4 dimensional buffer.
channels : int
Number of channels to output. Decode to grey scale when channels = 1.
mean : NDArray
Subtract mean from decode image before outputing.
"""
# pylint: disable= no-member, protected-access, too-many-arguments
if mean is None:
mean = NDArray(_new_empty_handle())
if out is None:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img)
else:
return _internal._imdecode(mean, index,
clip_rect[0],
clip_rect[1],
clip_rect[2],
clip_rect[3],
channels,
len(str_img),
str_img=str_img,
out=out)
# pylint: disable=too-many-locals, invalid-name
def _make_ndarray_function(handle, name):
"""Create a NDArray function from the FunctionHandle."""
real_name = ctypes.c_char_p()
desc = ctypes.c_char_p()
num_args = mx_uint()
arg_names = ctypes.POINTER(ctypes.c_char_p)()
arg_types = ctypes.POINTER(ctypes.c_char_p)()
arg_descs = ctypes.POINTER(ctypes.c_char_p)()
key_var_num_args = ctypes.c_char_p()
ret_type = ctypes.c_char_p()
check_call(_LIB.MXSymbolGetAtomicSymbolInfo(
handle, ctypes.byref(real_name), ctypes.byref(desc),
ctypes.byref(num_args),
ctypes.byref(arg_names),
ctypes.byref(arg_types),
ctypes.byref(arg_descs),
ctypes.byref(key_var_num_args),
ctypes.byref(ret_type)))
narg = int(num_args.value)
arg_names = [py_str(arg_names[i]) for i in range(narg)]
arg_types = [py_str(arg_types[i]) for i in range(narg)]
func_name = name
key_var_num_args = py_str(key_var_num_args.value)
ret_type = py_str(ret_type.value) if ret_type.value is not None else ''
doc_str = _build_doc(func_name,
py_str(desc.value),
arg_names,
arg_types,
[py_str(arg_descs[i]) for i in range(narg)],
key_var_num_args,
ret_type)
dtype_name = None
arr_name = None
ndsignature = []
signature = []
ndarg_names = []
kwarg_names = []
for i in range(narg):
name, atype = arg_names[i], arg_types[i]
if name == 'dtype':
dtype_name = name
signature.append('%s=_Null'%name)
elif atype.startswith('NDArray') or atype.startswith('Symbol'):
assert not arr_name, \
"Op can only have one argument with variable " \
"size and it must be the last argument."
if atype.endswith('[]'):
ndsignature.append('*%s'%name)
arr_name = name
else:
ndsignature.append('%s=None'%name)
ndarg_names.append(name)
else:
signature.append('%s=_Null'%name)
kwarg_names.append(name)
#signature.append('is_train=False')
signature.append('out=None')
signature.append('name=None')
signature.append('**kwargs')
signature = ndsignature + signature
code = []
if arr_name:
code.append("""
def %s(*%s, **kwargs):"""%(func_name, arr_name))
code.append("""
ndargs = []
for i in {}:
assert isinstance(i, NDArrayBase), \\
"Positional arguments must have NDArray type, " \\
"but got %s"%str(i)
ndargs.append(i)""".format(arr_name))
if dtype_name is not None:
code.append("""
if '%s' in kwargs:
kwargs['%s'] = np.dtype(kwargs['%s']).name"""%(
dtype_name, dtype_name, dtype_name))
code.append("""
_ = kwargs.pop('name', None)
out = kwargs.pop('out', None)
keys = list(kwargs.keys())
vals = list(kwargs.values())""")
else:
code.append("""
def %s(%s):
ndargs = []
keys = list(kwargs.keys())
vals = list(kwargs.values())"""%(func_name, ', '.join(signature)))
# NDArray args
for name in ndarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if {name} is not None:
assert isinstance({name}, NDArrayBase), \\
"Argument {name} must have NDArray type, but got %s"%str({name})
ndargs.append({name})""".format(name=name))
# kwargs
for name in kwarg_names: # pylint: disable=redefined-argument-from-local
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(%s)"""%(name, name, name))
# dtype
if dtype_name is not None:
code.append("""
if %s is not _Null:
keys.append('%s')
vals.append(np.dtype(%s).name)"""%(dtype_name, dtype_name, dtype_name))
code.append("""
return _imperative_invoke(%d, ndargs, keys, vals, out)"""%(
handle.value))
local = {}
exec(''.join(code), None, local) # pylint: disable=exec-used
ndarray_function = local[func_name]
ndarray_function.__name__ = func_name
ndarray_function.__doc__ = doc_str
ndarray_function.__module__ = 'mxnet.ndarray'
return ndarray_function
# pylint: enable=too-many-locals, invalid-name
def _init_ndarray_module(ndarray_class, root_namespace):
"""List and add all the ndarray functions to current module."""
_set_ndarray_class(ndarray_class)
plist = ctypes.POINTER(ctypes.c_char_p)()
size = ctypes.c_uint()
check_call(_LIB.MXListAllOpNames(ctypes.byref(size),
ctypes.byref(plist)))
op_names = []
for i in range(size.value):
op_names.append(py_str(plist[i]))
module_obj = _sys.modules["%s.ndarray" % root_namespace]
module_internal = _sys.modules["%s._ndarray_internal" % root_namespace]
module_contrib = _sys.modules["%s.contrib.ndarray" % root_namespace]
for name in op_names:
hdl = OpHandle()
check_call(_LIB.NNGetOpHandle(c_str(name), ctypes.byref(hdl)))
function = _make_ndarray_function(hdl, name)
if function.__name__.startswith('_contrib_'):
function.__name__ = function.__name__[9:]
function.__module__ = 'mxnet.contrib.ndarray'
setattr(module_contrib, function.__name__, function)
elif function.__name__.startswith('_'):
setattr(module_internal, function.__name__, function)
else:
setattr(module_obj, function.__name__, function)
_init_ndarray_module(NDArray, "mxnet")
# from .base import add_fileline_to_docstring
# add_fileline_to_docstring(__name__)
|
{
"content_hash": "64fc07db92fc44db953a68ed74c08c7a",
"timestamp": "",
"source": "github",
"line_count": 2514,
"max_line_length": 99,
"avg_line_length": 31.933174224343677,
"alnum_prop": 0.5297334329845541,
"repo_name": "coder-james/mxnet",
"id": "31b7d7cfb944b1a8174ef78da7425931be8d0034",
"size": "80441",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "python/mxnet/ndarray.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "10619"
},
{
"name": "C",
"bytes": "89222"
},
{
"name": "C++",
"bytes": "3174603"
},
{
"name": "CMake",
"bytes": "47349"
},
{
"name": "Cuda",
"bytes": "566663"
},
{
"name": "Java",
"bytes": "2868"
},
{
"name": "Jupyter Notebook",
"bytes": "1229390"
},
{
"name": "Makefile",
"bytes": "39572"
},
{
"name": "Matlab",
"bytes": "30187"
},
{
"name": "Perl",
"bytes": "589576"
},
{
"name": "Perl 6",
"bytes": "21768"
},
{
"name": "Protocol Buffer",
"bytes": "77256"
},
{
"name": "Python",
"bytes": "2734324"
},
{
"name": "R",
"bytes": "256944"
},
{
"name": "Scala",
"bytes": "853194"
},
{
"name": "Shell",
"bytes": "108857"
}
],
"symlink_target": ""
}
|
"""
The values are set to run the application either on localhost or
within docker using docker-compose. If you would like to run it
a different waay fell free to change the appropriate values.
"""
import os
try:
MONGO_HOST = os.environ['ANCHOR_DB_1_PORT_27017_TCP_ADDR']
except:
MONGO_HOST = 'localhost'
MONGO_PORT = 27017
MONGO_KWARGS = {'tz_aware': True}
MONGO_DATABASE = 'anchor'
ADMIN_USERNAME = 'cloud_username'
ADMIN_NAME = 'Admin Full Name'
ADMIN_EMAIL = 'Admin email'
SECRET_KEY = 'secret_key_for_cookie'
|
{
"content_hash": "7842d5fc2d4caf7c574898b0f7164612",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 68,
"avg_line_length": 21.68,
"alnum_prop": 0.7047970479704797,
"repo_name": "oldarmyc/anchor",
"id": "ff7d745384bb3282e72e2dabee9e5a4692af5f03",
"size": "1117",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "anchor/config/config.example.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2978"
},
{
"name": "HTML",
"bytes": "23641"
},
{
"name": "JavaScript",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "131316"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import re
from math import floor, log10
from benchexec.tablegenerator import util
__all__ = ['Column, ColumnType, ColumnMeasureType']
DEFAULT_TIME_PRECISION = 3
DEFAULT_TOOLTIP_PRECISION = 2
REGEX_SIGNIFICANT_DIGITS = re.compile('([-\+])?(\d+)\.?(0*(\d+))?([eE]([-\+])(\d+))?') # compile regular expression only once for later uses
GROUP_SIGN = 1
GROUP_INT_PART = 2
GROUP_DEC_PART = 3
GROUP_SIG_DEC_DIGITS = 4
GROUP_EXP = 5
GROUP_EXP_SIGN = 6
GROUP_EXP_VAL = 7
POSSIBLE_FORMAT_TARGETS = ['html', 'html_cell', 'tooltip', 'tooltip_stochastic', 'csv']
def enum(**enums):
return type('Enum', (), enums)
class ColumnEnumType(object):
def __init__(self, _type, name):
self._type = _type
self.name = name
@property
def type(self):
return self
def __str__(self):
return self.name
def __eq__(self, other):
try:
return self._type == other._type
except:
return False
class ColumnType(object):
column_types = enum(text=1, count=2, measure=3, status=4, main_status=5)
text = ColumnEnumType(column_types.text, 'text')
count = ColumnEnumType(column_types.count, 'count')
measure = ColumnEnumType(column_types.measure, 'measure')
status = ColumnEnumType(column_types.status, 'status')
main_status = ColumnEnumType(column_types.main_status, 'main_status')
class ColumnMeasureType(object):
"""
Column type 'Measure', contains the column's unit and the largest amount of digits after the decimal point.
"""
def __init__(self, max_decimal_digits):
self._type = ColumnType.measure
self._max_decimal_digits = max_decimal_digits
@property
def type(self):
return self._type
@property
def max_decimal_digits(self):
return self._max_decimal_digits
def __str__(self):
return "{}({})".format(self._type, self._max_decimal_digits)
class Column(object):
"""
The class Column contains title, pattern (to identify a line in log_file),
number_of_significant_digits of a column, the type of the column's values,
their unit, a scale factor to apply to all values of the column (mostly to fit the unit)
and href (to create a link to a resource).
It does NOT contain the value of a column.
The following conditions must be kept, but cannot be checked in the constructor.
If they are violated, they may lead to errors in other parts of the program.
* If 'scale_factor' is a value other than the default, 'unit' must be set.
* If 'unit' and 'scale_factor' are set, 'source_unit' must be set, or the column's cells must not have a
source unit, i.e. the source unit "".
* If set, 'source_unit' must fit the source unit of the column's cells.
* In addition, if 'unit' and 'source_unit' are set and of different values,
'scale_factor' must be a value other than 'None'.
"""
def __init__(self, title, pattern, num_of_digits, href, col_type=None,
unit=None, source_unit=None, scale_factor=None, relevant_for_diff=None, display_title=None):
# If scaling on the variables is performed, a display unit must be defined, explicitly
if scale_factor is not None and scale_factor != 1 and unit is None:
raise util.TableDefinitionError("Scale factor is defined, but display unit is not (in column {})"
.format(title))
self.title = title
self.pattern = pattern
self.number_of_significant_digits = int(num_of_digits) if num_of_digits else None
self.type = col_type
self.unit = unit
self.source_unit = source_unit
self.scale_factor = float(scale_factor) if scale_factor else scale_factor
self.href = href
if relevant_for_diff is None:
self.relevant_for_diff = False
else:
self.relevant_for_diff = True \
if relevant_for_diff.lower() == "true" else False
self.display_title = display_title
def is_numeric(self):
return self.type.type == ColumnType.measure or self.type.type == ColumnType.count
def format_title(self):
title = self.display_title or self.title
if self.is_numeric() and (self.unit or self.source_unit):
used_unit = self.unit or self.source_unit
return "{} ({})".format(title, used_unit)
else:
return title
def format_value(self, value, isToAlign=False, format_target="html"):
"""
Format a value nicely for human-readable output (including rounding).
@param value: the value to format
@param isToAlign: if True, spaces will be added to the returned String representation to align it to all
other values in this column, correctly
@param format_target the target the value should be formatted for
@return: a formatted String representation of the given value.
"""
if format_target not in POSSIBLE_FORMAT_TARGETS:
raise ValueError('Unknown format target')
if value is None:
return ''
# If the number ends with "s" or another unit, remove it.
# Units should not occur in table cells, but in the table head.
number_str = util.remove_unit(str(value).strip())
try:
number = float(number_str)
except ValueError: # If value is no float, don't format it.
return value
# Apply the scale factor to the value
if self.scale_factor is not None:
number *= self.scale_factor
number_of_significant_digits = self.number_of_significant_digits
max_dec_digits = 0
if number_of_significant_digits is None and format_target is "tooltip_stochastic":
return str(round(number, DEFAULT_TOOLTIP_PRECISION))
elif self.type.type == ColumnType.measure:
if number_of_significant_digits is None and format_target is not "csv":
number_of_significant_digits = DEFAULT_TIME_PRECISION
max_dec_digits = self.type.max_decimal_digits
if number_of_significant_digits is not None:
current_significant_digits = _get_significant_digits(number_str)
return _format_number(number, current_significant_digits, number_of_significant_digits, max_dec_digits, isToAlign, format_target)
else:
if number == float(number_str):
# TODO remove as soon as scaled values are handled correctly
return number_str
if int(number) == number:
number = int(number)
return str(number)
def __str__(self):
return "{}(title={}, pattern={}, num_of_digits={}, href={}, col_type={}, unit={}, scale_factor={})".format(
self.__class__.__name__, self.title, self.pattern, self.number_of_significant_digits, self.href, self.type, self.unit, self.scale_factor)
def _format_number_align(formattedValue, max_number_of_dec_digits, format_target="html"):
alignment = max_number_of_dec_digits
if formattedValue.find('.') >= 0:
# Subtract spaces for digits after the decimal point.
alignment -= len(formattedValue) - formattedValue.find('.') - 1
elif max_number_of_dec_digits > 0 and format_target.startswith('html'):
# Add punctuation space.
formattedValue += ' '
if format_target.startswith('html'):
whitespace = ' '
else:
whitespace = ' '
formattedValue += whitespace * alignment
return formattedValue
def _get_significant_digits(value):
# Regular expression returns multiple groups:
#
# Group GROUP_SIGN: Optional sign of value
# Group GROUP_INT_PART: Digits in front of decimal point
# Group GROUP_DEC_PART: Optional digits after decimal point
# Group GROUP_SIG_DEC_DIGITS: Digits after decimal point, starting at the first value not 0
# Group GROUP_EXP: Optional exponent part (e.g. 'e-5')
# Group GROUP_EXP_SIGN: Optional sign of exponent part
# Group GROUP_EXP_VALUE: Value of exponent part (e.g. '5' for 'e-5')
# Use these groups to compute the number of zeros that have to be added to the current number's
# decimal positions.
match = REGEX_SIGNIFICANT_DIGITS.match(value)
if int(match.group(GROUP_INT_PART)) == 0 and float(value) != 0:
sig_digits = len(match.group(GROUP_SIG_DEC_DIGITS))
else:
if float(value) != 0:
sig_digits = len(match.group(GROUP_INT_PART))
else:
# If the value consists of only zeros, do not count the 0 in front of the decimal
sig_digits = 0
if match.group(GROUP_DEC_PART):
sig_digits += len(match.group(GROUP_DEC_PART))
return sig_digits
def _format_number(number, initial_value_sig_digits, number_of_significant_digits, max_digits_after_decimal, isToAlign, format_target):
"""
If the value is a number (or number followed by a unit),
this function returns a string-representation of the number
with the specified number of significant digits,
optionally aligned at the decimal point.
"""
assert format_target in POSSIBLE_FORMAT_TARGETS, "Invalid format " + format_target
# Round to the given amount of significant digits
intended_digits = min(initial_value_sig_digits, number_of_significant_digits)
if number != 0:
float_value = round(number, - int(floor(log10(abs(number)))) + (number_of_significant_digits - 1))
if not format_target.startswith('tooltip'):
max_digits_to_display = max_digits_after_decimal
else:
max_digits_to_display = len(str(float_value)) # This value may be too big, but extra digits will be cut below
formatted_value = "{0:.{1}f}".format(float_value, max_digits_to_display)
# Get the number of intended significant digits and the number of current significant digits.
# If we have not enough digits due to rounding, 0's have to be re-added.
# If we have too many digits due to conversion of integers to float (e.g. 1234.0), the decimals have to be cut
current_sig_digits = _get_significant_digits(formatted_value)
digits_to_add = intended_digits - current_sig_digits
if digits_to_add > 0:
if '.' not in formatted_value:
raise AssertionError(
"Unexpected string '{}' after rounding '{}' to '{}' with {} significant digits and {} decimal digits for format '{}'"
.format(formatted_value, number, float_value, intended_digits, max_digits_to_display, format_target))
formatted_value += "".join(['0'] * digits_to_add)
elif digits_to_add < 0:
if '.' in formatted_value[:digits_to_add]:
formatted_value = formatted_value[:digits_to_add]
else:
formatted_value = str(round(float_value))
if formatted_value.endswith('.'):
formatted_value = formatted_value[:-1]
else:
formatted_value = '0'
if max_digits_after_decimal > 0 and initial_value_sig_digits > 0:
formatted_value += '.' + '0' * min(max_digits_after_decimal, initial_value_sig_digits)
# Cut the 0 in front of the decimal point for values < 1.
# Example: 0.002 => .002
if _is_to_cut(formatted_value, format_target, isToAlign):
assert formatted_value[0] == '0'
formatted_value = formatted_value[1:]
# Alignment
if isToAlign:
formatted_value = _format_number_align(formatted_value, max_digits_after_decimal, format_target)
return formatted_value
def _is_to_cut(value, format_target, is_to_align):
correct_target = format_target == "html_cell" or (format_target == 'csv' and is_to_align)
return correct_target and '.' in value and 1 > float(value) >= 0
|
{
"content_hash": "9c92f46045b11a95217a2b45436eb199",
"timestamp": "",
"source": "github",
"line_count": 293,
"max_line_length": 149,
"avg_line_length": 40.9419795221843,
"alnum_prop": 0.6401300433477826,
"repo_name": "IljaZakharov/benchexec",
"id": "5d7dae28edcd06f001485baa564bdd93c9d247ee",
"size": "12717",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "benchexec/tablegenerator/columns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gnuplot",
"bytes": "3882"
},
{
"name": "HTML",
"bytes": "63337"
},
{
"name": "PHP",
"bytes": "4704"
},
{
"name": "Python",
"bytes": "790249"
},
{
"name": "Roff",
"bytes": "3321"
},
{
"name": "Shell",
"bytes": "5516"
},
{
"name": "TeX",
"bytes": "6538"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from leonardo.module.media.fields import FileField
from leonardo.module.web.models import Widget
from leonardo.module.web.widgets.forms import WidgetUpdateForm
class FileForm(WidgetUpdateForm):
file = FileField(
help_text=_("Type to search file or upload new one."),
cls_name='media.file',
form_cls='leonardo.module.media.admin.fileadmin.FileAdminChangeFrom')
class DownloadItemWidget(Widget):
feincms_item_editor_form = FileForm
icon = "fa fa-download"
file = models.ForeignKey("media.File", verbose_name=_("file"),
related_name="%(app_label)s_%(class)s_files")
class Meta:
abstract = True
verbose_name = _("download item")
verbose_name_plural = _('download items')
|
{
"content_hash": "35ea77404bdf8299d75aa98000ec335f",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 30.642857142857142,
"alnum_prop": 0.682983682983683,
"repo_name": "django-leonardo/django-leonardo",
"id": "a3187d2a0642783f1a7e217bb0a66902d9a7e324",
"size": "883",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "leonardo/module/media/widget/downloaditem/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "33187"
},
{
"name": "Dockerfile",
"bytes": "835"
},
{
"name": "HTML",
"bytes": "323851"
},
{
"name": "JavaScript",
"bytes": "264957"
},
{
"name": "Makefile",
"bytes": "1390"
},
{
"name": "Python",
"bytes": "705902"
},
{
"name": "SCSS",
"bytes": "68482"
},
{
"name": "Shell",
"bytes": "5569"
}
],
"symlink_target": ""
}
|
from sqlalchemy import create_engine, MetaData, Table, Column, Integer, String
from sqlalchemy.orm import mapper, create_session, polymorphic_union
metadata = MetaData()
managers_table = Table('managers', metadata,
Column('employee_id', Integer, primary_key=True),
Column('name', String(50)),
Column('manager_data', String(40))
)
engineers_table = Table('engineers', metadata,
Column('employee_id', Integer, primary_key=True),
Column('name', String(50)),
Column('engineer_info', String(40))
)
engine = create_engine('sqlite:///', echo=True)
metadata.create_all(engine)
class Employee(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return self.__class__.__name__ + " " + self.name
class Manager(Employee):
def __init__(self, name, manager_data):
self.name = name
self.manager_data = manager_data
def __repr__(self):
return self.__class__.__name__ + " " + self.name + " " + self.manager_data
class Engineer(Employee):
def __init__(self, name, engineer_info):
self.name = name
self.engineer_info = engineer_info
def __repr__(self):
return self.__class__.__name__ + " " + self.name + " " + self.engineer_info
pjoin = polymorphic_union({
'manager':managers_table,
'engineer':engineers_table
}, 'type', 'pjoin')
employee_mapper = mapper(Employee, pjoin, polymorphic_on=pjoin.c.type)
manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager')
engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer')
session = create_session(bind=engine)
m1 = Manager("pointy haired boss", "manager1")
e1 = Engineer("wally", "engineer1")
e2 = Engineer("dilbert", "engineer2")
session.add(m1)
session.add(e1)
session.add(e2)
session.flush()
employees = session.query(Employee)
print [e for e in employees]
|
{
"content_hash": "99ab198f2b17b900ba6c07de1a31fc49",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 125,
"avg_line_length": 30.166666666666668,
"alnum_prop": 0.6670015067805123,
"repo_name": "obeattie/sqlalchemy",
"id": "c50513b55559ad14d7e600d352a65c7576dfd3f9",
"size": "1991",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "examples/inheritance/concrete.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "29642"
},
{
"name": "JavaScript",
"bytes": "190"
},
{
"name": "Python",
"bytes": "4600520"
}
],
"symlink_target": ""
}
|
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
from hyde.version import __version__
from distutils.util import convert_path
from fnmatch import fnmatchcase
import os
import sys
PROJECT = 'hyde'
try:
long_description = open('README.rst', 'rt').read()
except IOError:
long_description = ''
################################################################################
# find_package_data is an Ian Bicking creation.
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
################################################################################
setup(name=PROJECT,
version=__version__,
description='hyde is a static website generator',
long_description = long_description,
author='Lakshmi Vyas',
author_email='lakshmi.vyas@gmail.com',
url='http://hyde.github.com',
packages=find_packages(),
install_requires=(
'argparse',
'commando',
'jinja2',
'pyYAML',
'markdown',
'smartypants',
'pygments',
'typogrify'
),
tests_require=(
'nose',
),
test_suite='nose.collector',
include_package_data = True,
# Scan the input for package information
# to grab any data files (text, images, etc.)
# associated with sub-packages.
package_data = find_package_data(PROJECT,
package=PROJECT,
only_in_packages=False,
),
entry_points={
'console_scripts': [
'hyde = hyde.main:main'
]
},
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Code Generators',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP :: Site Management',
],
zip_safe=False,
)
|
{
"content_hash": "4dbef22f08532a1993db92f463b066f8",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 86,
"avg_line_length": 35.52760736196319,
"alnum_prop": 0.5050941115524089,
"repo_name": "Valloric/hyde",
"id": "2e0a53b461f042d60096aa89c664e64a1cd9d6d6",
"size": "5830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "73"
},
{
"name": "Python",
"bytes": "389945"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20160321_0954'),
]
operations = [
migrations.AlterModelOptions(
name='topic',
options={'ordering': ['order']},
),
migrations.AddField(
model_name='topic',
name='order',
field=models.PositiveIntegerField(db_index=True, default=0, editable=False),
),
]
|
{
"content_hash": "68659d8924101b81529d228df3e12df7",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 88,
"avg_line_length": 24.045454545454547,
"alnum_prop": 0.5746691871455577,
"repo_name": "anehx/anonboard-backend",
"id": "6442fb1b9ac4dc2bf216667ea9acd036504c47ce",
"size": "601",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/migrations/0004_auto_20160322_0821.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "371"
},
{
"name": "Python",
"bytes": "27653"
}
],
"symlink_target": ""
}
|
import pandas
import pandasql
def avg_min_temperature(filename):
'''
This function should run a SQL query on a dataframe of
weather data. More specifically you want to find the average
minimum temperature on rainy days where the minimum temperature
is greater than 55 degrees.
You might also find that interpreting numbers as integers or floats may not
work initially. In order to get around this issue, it may be equal to cast
these numbers as integers. This can be done by writing cast(column as integer).
So for example, if we wanted to cast the maxtempi column as an integer, we would actually
write something like where cast(maxtempi as integer) = 76, as opposed to simply
where maxtempi = 76.
You can see the weather data that we are passing in below:
https://www.dropbox.com/s/7sf0yqc9ykpq3w8/weather_underground.csv
'''
weather_data = pandas.read_csv(filename)
q = """
SELECT avg(cast (mintempi as integer))
FROM weather_data
WHERE rain == 1 and mintempi > 55
"""
#Execute your SQL command against the pandas frame
mean_temp_weekends = pandasql.sqldf(q.lower(), locals())
return mean_temp_weekends
|
{
"content_hash": "bea757e398c8885488a9e532c012a49c",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 93,
"avg_line_length": 39.32258064516129,
"alnum_prop": 0.7095980311730927,
"repo_name": "KellyChan/python-examples",
"id": "f9f7d7a0744e55d27e726f44c886e1f93a02f535",
"size": "1219",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/data_science/NYC/wrangle03_avg_min_temp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "86277"
},
{
"name": "HTML",
"bytes": "320182"
},
{
"name": "JavaScript",
"bytes": "154998"
},
{
"name": "Jupyter Notebook",
"bytes": "30660"
},
{
"name": "Python",
"bytes": "238130"
}
],
"symlink_target": ""
}
|
"""Contains the base Layer class, from which all layers inherit."""
import copy
import warnings
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.engine import base_layer_utils
from tensorflow.python.keras.legacy_tf_layers import variable_scope_shim
from tensorflow.python.keras.mixed_precision import policy
from tensorflow.python.keras.utils import tf_contextlib
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.trackable import base as trackable
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import keras_export
from tensorflow.python.util.tf_export import tf_export
# Avoid breaking users who directly import this symbol from this file.
# TODO(fchollet): remove this.
InputSpec = base_layer.InputSpec # pylint: disable=invalid-name
_KERAS_STYLE_SCOPE = False
@keras_export(
v1=['keras.__internal__.legacy.layers.experimental.keras_style_scope'])
@tf_export(v1=['layers.experimental.keras_style_scope'])
@tf_contextlib.contextmanager
def keras_style_scope():
"""Use Keras-style variable management.
All tf.layers and tf RNN cells created in this scope use Keras-style
variable management. Creating such layers with a scope= argument is
disallowed, and reuse=True is disallowed.
The purpose of this scope is to allow users of existing layers to
slowly transition to a Keras layers API without breaking existing
functionality.
One example of this is when using TensorFlow's RNN classes with Keras
Models or Networks. Because Keras models do not properly set variable
scopes, users of RNNs may either accidentally share scopes between two
different models, or get errors about variables that already exist.
Example:
```python
class RNNModel(tf.keras.Model):
def __init__(self, name):
super(RNNModel, self).__init__(name=name)
self.rnn = tf.compat.v1.nn.rnn_cell.MultiRNNCell(
[tf.compat.v1.nn.rnn_cell.LSTMCell(64) for _ in range(2)])
def call(self, input, state):
return self.rnn(input, state)
model_1 = RNNModel("model_1")
model_2 = RNNModel("model_2")
# OK
output_1, next_state_1 = model_1(input, state)
# Raises an error about trying to create an already existing variable.
output_2, next_state_2 = model_2(input, state)
```
The solution is to wrap the model construction and execution in a keras-style
scope:
```python
with keras_style_scope():
model_1 = RNNModel("model_1")
model_2 = RNNModel("model_2")
# model_1 and model_2 are guaranteed to create their own variables.
output_1, next_state_1 = model_1(input, state)
output_2, next_state_2 = model_2(input, state)
assert len(model_1.weights) > 0
assert len(model_2.weights) > 0
assert(model_1.weights != model_2.weights)
```
Yields:
A keras layer style scope.
"""
global _KERAS_STYLE_SCOPE
stack = _KERAS_STYLE_SCOPE
_KERAS_STYLE_SCOPE = True
try:
yield
finally:
_KERAS_STYLE_SCOPE = stack
@keras_export(
v1=['keras.__internal__.legacy.layers.experimental.set_keras_style'])
@tf_export(v1=['layers.experimental.set_keras_style'])
def set_keras_style():
"""Use Keras-style variable management.
All tf.layers and tf RNN cells created after keras style ha been enabled
use Keras-style variable management. Creating such layers with a
scope= argument is disallowed, and reuse=True is disallowed.
The purpose of this function is to allow users of existing layers to
slowly transition to Keras layers API without breaking existing
functionality.
For more details, see the documentation for `keras_style_scope`.
Note, once keras style has been set, it is set globally for the entire
program and cannot be unset.
Example:
```python
set_keras_style()
model_1 = RNNModel(name="model_1")
model_2 = RNNModel(name="model_2")
# model_1 and model_2 are guaranteed to create their own variables.
output_1, next_state_1 = model_1(input, state)
output_2, next_state_2 = model_2(input, state)
assert len(model_1.weights) > 0
assert len(model_2.weights) > 0
assert(model_1.weights != model_2.weights)
```
"""
global _KERAS_STYLE_SCOPE
_KERAS_STYLE_SCOPE = True
def _is_in_keras_style_scope():
global _KERAS_STYLE_SCOPE
return _KERAS_STYLE_SCOPE
@keras_export(v1=['keras.__internal__.legacy.layers.Layer'])
@tf_export(v1=['layers.Layer'])
class Layer(base_layer.Layer):
"""Base layer class.
It is considered legacy, and we recommend the use of `tf.keras.layers.Layer`
instead.
Args:
trainable: Boolean, whether the layer's variables should be trainable.
name: String name of the layer.
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
Read-only properties:
name: The name of the layer (string).
dtype: Default dtype of the layer's weights (default of `None` means use the
type of the first input).
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and
non-trainable.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
trainable_weights: List of variables to be included in backprop.
non_trainable_weights: List of variables that should not be
included in backprop.
weights: The concatenation of the lists trainable_weights and
non_trainable_weights (in this order).
Mutable properties:
trainable: Whether the layer should be trained (boolean).
input_spec: Optional (list of) `InputSpec` object(s) specifying the
constraints on inputs that can be accepted by the layer.
"""
def __init__(self, trainable=True, name=None, dtype=None,
**kwargs):
# For backwards compatibility, legacy layers do not use `ResourceVariable`
# by default.
self._use_resource_variables = False
scope = kwargs.pop('_scope', None)
self._reuse = kwargs.pop('_reuse', None)
# Avoid an incorrect lint error
self._trainable_weights = []
self.built = False
if dtype is None:
# Indicates to infer dtype from inputs. When the V2 dtype behavior is
# enabled, Keras layers default their dtype to floatx instead, so we pass
# an "_infer" policy to keep the old V1 behavior.
dtype = policy.Policy('_infer')
if 'autocast' not in kwargs:
kwargs['autocast'] = False
# Mark that legacy layers should not be instrumented as Keras usage
self._disable_keras_instrumentation = True
super(Layer, self).__init__(trainable=trainable, name=name, dtype=dtype,
**kwargs)
if _is_in_keras_style_scope():
if scope is not None:
raise ValueError(
'scope argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(scope))
if self._reuse is not None:
raise ValueError(
'reuse argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(self._reuse))
self._keras_style = True
else:
self._keras_style = False
self._call_has_scope_arg = 'scope' in self._call_fn_args
if scope:
with vs.variable_scope(scope) as captured_scope:
self._scope = captured_scope
else:
self._scope = None
self._current_scope = None
# We no longer track graph in tf.layers layers. This property is only kept to
# maintain API backward compatibility.
@property
def graph(self):
warnings.warn('`Layer.graph` is deprecated and '
'will be removed in a future version. '
'Please stop using this property because tf.layers layers no '
'longer track their graph.')
if context.executing_eagerly():
raise RuntimeError('Layer.graph not supported when executing eagerly.')
return None
def _init_set_name(self, name):
# Determine layer name (non-unique).
if isinstance(name, vs.VariableScope):
base_name = name.name
self._name, _ = self._make_unique_name()
else:
base_name = name
self._name = name
if not name:
self._name, base_name = self._make_unique_name()
self._base_name = base_name
def _make_unique_name(self, name_uid_map=None, avoid_names=None,
namespace='', zero_based=False):
base_name = base_layer.to_snake_case(self.__class__.__name__)
name = backend.unique_object_name(
base_name,
name_uid_map=name_uid_map,
avoid_names=avoid_names,
namespace=namespace,
zero_based=zero_based)
return (name, base_name)
@property
def scope_name(self):
if not self._scope:
raise ValueError('No name available for layer scope because the layer "' +
self._name + '" has not been used yet. The scope name ' +
' is determined the first time the layer instance is ' +
'called. You must therefore call the layer before ' +
'querying `scope_name`.')
return self._scope.name
def add_loss(self, losses, inputs=None):
previous_losses_length = len(self._losses)
previous_callable_losses_length = len(self._callable_losses)
super(Layer, self).add_loss(losses, inputs=inputs)
if not context.executing_eagerly():
# TODO(fchollet): deprecate collection below.
new_losses = self._losses[previous_losses_length:]
new_callable_losses = self._callable_losses[
previous_callable_losses_length:]
for regularizer in new_callable_losses:
loss_tensor = regularizer()
if loss_tensor is not None:
new_losses.append(loss_tensor)
_add_elements_to_collection(
new_losses,
ops.GraphKeys.REGULARIZATION_LOSSES)
def _name_scope(self): # pylint: disable=method-hidden
"""Determines op naming for the Layer."""
if self._keras_style:
return super(Layer, self)._name_scope()
return self._current_scope.original_name_scope
def _set_scope(self, scope=None):
if self._scope is None:
# If constructed with _scope=None, lazy setting of scope.
if self._reuse:
with vs.variable_scope(
scope if scope is not None else self._base_name) as captured_scope:
self._scope = captured_scope
else:
with vs.variable_scope(
scope, default_name=self._base_name) as captured_scope:
self._scope = captured_scope
def add_weight(self,
name,
shape,
dtype=None,
initializer=None,
regularizer=None,
trainable=None,
constraint=None,
use_resource=None,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE,
partitioner=None,
**kwargs):
"""Adds a new variable to the layer, or gets an existing one; returns it.
Args:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype` or `float32`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
Note, if the current variable scope is marked as non-trainable
then this parameter is ignored and any added variables are also
marked as non-trainable. `trainable` defaults to `True` unless
`synchronization` is set to `ON_READ`.
constraint: constraint instance (callable).
use_resource: Whether to use `ResourceVariable`.
synchronization: Indicates when a distributed a variable will be
aggregated. Accepted values are constants defined in the class
`tf.VariableSynchronization`. By default the synchronization is set to
`AUTO` and the current `DistributionStrategy` chooses
when to synchronize. If `synchronization` is set to `ON_READ`,
`trainable` must not be set to `True`.
aggregation: Indicates how a distributed variable will be aggregated.
Accepted values are constants defined in the class
`tf.VariableAggregation`.
partitioner: (optional) partitioner instance (callable). If
provided, when the requested variable is created it will be split
into multiple partitions according to `partitioner`. In this case,
an instance of `PartitionedVariable` is returned. Available
partitioners include `tf.compat.v1.fixed_size_partitioner` and
`tf.compat.v1.variable_axis_size_partitioner`. For more details, see
the documentation of `tf.compat.v1.get_variable` and the "Variable
Partitioners and Sharding" section of the API guide.
**kwargs: Additional keyword arguments.
Returns:
The created variable. Usually either a `Variable` or `ResourceVariable`
instance. If `partitioner` is not `None`, a `PartitionedVariable`
instance is returned.
Raises:
RuntimeError: If called with partitioned variable regularization and
eager execution is enabled.
ValueError: When trainable has been set to True with synchronization
set as `ON_READ`.
"""
for kwarg in kwargs:
if kwarg != 'experimental_autocast':
raise TypeError('Unknown keyword argument:', kwarg)
if self._keras_style:
return super(Layer, self).add_weight(
name=name,
shape=shape,
dtype=dtype,
initializer=initializer,
regularizer=regularizer,
trainable=trainable and self.trainable,
constraint=constraint,
use_resource=use_resource,
synchronization=vs.VariableSynchronization.AUTO,
aggregation=vs.VariableAggregation.NONE,
partitioner=partitioner,
**kwargs)
if synchronization == vs.VariableSynchronization.ON_READ:
if trainable:
raise ValueError(
'Synchronization value can be set to '
'VariableSynchronization.ON_READ only for non-trainable variables. '
'You have specified trainable=True and '
'synchronization=VariableSynchronization.ON_READ.')
else:
# Set trainable to be false when variable is to be synced on read.
trainable = False
elif trainable is None:
trainable = True
def _should_add_regularizer(variable, existing_variable_set):
if base_layer_utils.is_split_variable(variable):
for var in variable:
if var in existing_variable_set:
return False
return True
else:
return variable not in existing_variable_set
init_graph = None
if not context.executing_eagerly():
default_graph = ops.get_default_graph()
if default_graph.building_function:
with ops.init_scope():
# Retrieve the variables from the graph into which variables
# will be lifted; if initialization ops will be lifted into
# the eager context, then there is nothing to retrieve, since variable
# collections are not supported when eager execution is enabled.
if not context.executing_eagerly():
init_graph = ops.get_default_graph()
existing_variables = set(tf_variables.global_variables())
else:
# Initialization ops will not be lifted out of the default graph.
init_graph = default_graph
existing_variables = set(tf_variables.global_variables())
if dtype is None:
dtype = self.dtype or dtypes.float32
self._set_scope(None)
reuse = self.built or self._reuse
prev_len_trainable = len(self._trainable_weights)
with vs.variable_scope(
self._scope, reuse=reuse, auxiliary_name_scope=False) as scope:
self._current_scope = scope
with backend.name_scope(self._name_scope()): # pylint: disable=not-callable
use_resource = (use_resource or
self._use_resource_variables or
scope.use_resource)
if initializer is None:
initializer = scope.initializer
variable = super(Layer, self).add_weight(
name,
shape,
dtype=dtypes.as_dtype(dtype),
initializer=initializer,
trainable=trainable and self.trainable,
constraint=constraint,
partitioner=partitioner,
use_resource=use_resource,
synchronization=synchronization,
aggregation=aggregation,
getter=vs.get_variable,
**kwargs)
if regularizer:
if (ops.executing_eagerly_outside_functions()
or _should_add_regularizer(variable, existing_variables)):
self._handle_weight_regularization(name, variable, regularizer)
var_store = vs._get_default_variable_store() # pylint: disable=protected-access
# When the shim to get variable scope working in TF2 is used,
# We need to explicitly make the shim track the regularization
# losses as the collections will not be accessible.
if hasattr(var_store, 'add_regularizer'):
var_store.add_regularizer(variable, regularizer)
if init_graph is not None:
# Handle edge case where a custom getter has overridden `trainable`.
# There is one known occurrence of this, in unit test
# testBasicRNNCellNotTrainable in
# contrib.rnn.python.kernel_tests.core_rnn_cell_test
with init_graph.as_default():
trainable_variables = tf_variables.trainable_variables()
if (trainable and self.trainable and
variable not in trainable_variables):
# A custom getter / variable scope overrode the trainable flag.
extra_trainable_vars = self._trainable_weights[prev_len_trainable:]
self._trainable_weights = self._trainable_weights[
:prev_len_trainable]
self._non_trainable_weights += extra_trainable_vars
return variable
def __call__(self, inputs, *args, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Args:
inputs: input tensor(s).
*args: additional positional arguments to be passed to `self.call`.
**kwargs: additional keyword arguments to be passed to `self.call`.
**Note**: kwarg `scope` is reserved for use by the layer.
Returns:
Output tensor(s).
Note:
- If the layer's `call` method takes a `scope` keyword argument,
this argument will be automatically set to the current variable scope.
- If the layer's `call` method takes a `mask` argument (as some Keras
layers do), its default value will be set to the mask generated
for `inputs` by the previous layer (if `input` did come from
a layer that generated a corresponding mask, i.e. if it came from
a Keras layer with masking support.
Raises:
ValueError: if the layer's `call` method returns None (an invalid value).
"""
scope = kwargs.pop('scope', None)
if self._keras_style:
if scope is not None:
raise ValueError(
'scope argument not allowed when keras style layers are enabled, '
'but saw: {}'.format(scope))
return super(Layer, self).__call__(inputs, *args, **kwargs)
self._set_scope(scope)
if self.built:
try:
# Some classes which inherit from Layer do not use its constructor, so
# rather than initializing to None we check for an AttributeError.
scope_context_manager = self._always_reuse_variable_scope # pylint: disable=access-member-before-definition
except AttributeError:
scope_context_manager = None
if scope_context_manager is None:
# From this point we will always set reuse=True, so create a "final"
# variable scope with this setting. We avoid re-creating variable scopes
# after this point as an optimization.
scope_context_manager = vs.variable_scope(
self._scope, reuse=True, auxiliary_name_scope=False)
# Do not cache variable scopes if Eager mode is enabled. If Eager mode
# is enabled then we don't want to reuse scopes because the cached scope
# might be from a FuncGraph or Eager scope we are no longer in.
if not ops.executing_eagerly_outside_functions():
self._always_reuse_variable_scope = scope_context_manager
else:
scope_context_manager = vs.variable_scope(
self._scope, reuse=self._reuse, auxiliary_name_scope=False)
with scope_context_manager as scope:
self._current_scope = scope
try:
call_has_scope_arg = self._call_has_scope_arg
except AttributeError:
self._call_fn_args = variable_scope_shim.fn_args(self.call)
self._call_has_scope_arg = 'scope' in self._call_fn_args
call_has_scope_arg = self._call_has_scope_arg
if call_has_scope_arg:
kwargs['scope'] = scope
# Actually call layer
outputs = super(Layer, self).__call__(inputs, *args, **kwargs)
if not context.executing_eagerly():
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs
def __deepcopy__(self, memo):
no_copy = set(['_graph', '_thread_local', '_metrics_lock'])
shallow_copy = set(['_scope', '_always_reuse_variable_scope'])
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k in no_copy:
setattr(result, k, v)
elif k in shallow_copy:
setattr(result, k, copy.copy(v))
elif base_layer.is_tensor_or_tensor_list(v):
setattr(result, k, v)
else:
setattr(result, k, copy.deepcopy(v, memo))
return result
def __setattr__(self, value, name):
# By-pass the automatic dependency tracking performed by the parent Layer.
super(trackable.Trackable, self).__setattr__(value, name) # pylint: disable=bad-super-call
@property
def _is_legacy_layer(self):
"""Used by keras to check compatibility. This should not be overridden."""
return True
def _add_elements_to_collection(elements, collection_list):
if context.executing_eagerly():
raise RuntimeError('Using collections from Layers not supported in Eager '
'mode. Tried to add %s to %s' % (elements,
collection_list))
elements = nest.flatten(elements)
collection_list = nest.flatten(collection_list)
for name in collection_list:
collection = ops.get_collection_ref(name)
collection_set = {id(e) for e in collection}
for element in elements:
if id(element) not in collection_set:
collection.append(element)
|
{
"content_hash": "8d7c257d2e00505f7f91c98f37b83e2f",
"timestamp": "",
"source": "github",
"line_count": 599,
"max_line_length": 116,
"avg_line_length": 39.13856427378965,
"alnum_prop": 0.6612779389182734,
"repo_name": "yongtang/tensorflow",
"id": "8fba78fa4ca557ea8e06396b0567d0c63a6de31d",
"size": "24176",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tensorflow/python/keras/legacy_tf_layers/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1368342"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125162438"
},
{
"name": "CMake",
"bytes": "179878"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2118448"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792868"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11205807"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300198"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42642473"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621427"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14607"
},
{
"name": "Starlark",
"bytes": "7577804"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
__description__ = 'Facts Listener'
__author__ = 'fla'
from flask import Flask, request, Response, json
from facts.myredis import myredis
from facts.queue import myqueue
from facts.jsoncheck import jsoncheck
from gevent.pywsgi import WSGIServer
from keystoneclient.exceptions import NotFound
from facts.config import fact_attributes, __version__
from facts import cloto_db_client
from facts.constants import CONTENT_HEADER, JSON_TYPE, REMOTE_ADDR, REMOTE_PORT, CONTEXT_ATTRIBUTES, \
CONTEXT_RESPONSES, CONTEXT_ATTRIBUTES_NAME, CONTEXT_ATTRIBUTES_VALUE, CONTEXT_ELEMENT
import logging.config
import sys
import datetime
import gevent.monkey
import os
import httplib
import gevent
__version_info__ = tuple([int(num) for num in __version__.split('.')])
gevent.monkey.patch_all()
content_type = JSON_TYPE
from facts.config import config, cfg_filename, cfg_defaults
"""Flask server initialization.
Uses Redis server as a message queue and server exchange with the fiware-cloto.
"""
app = Flask(__name__)
"""
Initialize the redis connection library
"""
mredis = myredis()
"""
Initialize the mysql connection library
"""
myClotoDBClient = cloto_db_client.cloto_db_client()
"""
Initialize the pid of the process
"""
pid = 0
# Flask/Gevent server need to send {'serverId': 'serverId', 'cpu': 80, 'mem': 80, 'time': '2014-03-24 16:21:29.384631'}
# to the topic
@app.route('/v1.0', methods=['GET'])
def factsinfo():
"""API endpoint for receiving keep alive information
"""
return Response(response="{\"fiware-facts\":\"Up and running...\"}\n",
status=httplib.OK,
content_type=content_type)
@app.route('/v1.0/<tenantid>/servers/<serverid>', methods=['POST'])
def facts(tenantid, serverid):
"""API endpoint for receiving data from Monitoring system
:param string tenantid: the id of the tenant
:param string serverid: the id of the monitored instance (server)
:return: status code 405 - invalid JSON or invalid request type
:return: status code 400 - unsupported Content-Type or invalid publisher
:return: status code 200 - successful submission
"""
# Ensure post's Content-Type is supported
if request.headers[CONTENT_HEADER] == content_type:
try:
# Ensure that received data is a valid JSON
user_submission = json.loads(request.data) # @UnusedVariable
except ValueError:
# Data is not a well-formed json
message = "[{}] received {} from ip {}:{}"\
.format("-", json, request.environ[REMOTE_ADDR], request.environ[REMOTE_PORT])
logging.warning(message)
return Response(response="{\"error\":\"Bad request. The payload is not well-defined json format\"}\n",
status=httplib.BAD_REQUEST,
content_type=content_type)
# It is a valid payload and we start to process it
try:
result = process_request(request, tenantid, serverid)
except NotFound as ex:
return Response(response=ex.message, status=ex.http_status, content_type=content_type)
except UnboundLocalError as ex:
return Response(response="{\"error\":\"Some attribute is missing: " + ex.message + "\"}\n",
status=httplib.BAD_REQUEST, content_type=content_type)
except ValueError as ex:
return Response(response="{\"error\":\""+ ex.message + "\"}\n",
status=httplib.BAD_REQUEST, content_type=content_type)
except Exception as ex:
return Response(response="{\"error\": \"" + ex.message + "\"}\n",
status=httplib.BAD_REQUEST, content_type=content_type)
if result == True:
return Response(status=httplib.OK)
else:
return Response(response="{\"error\":\"Internal Server Error. "
"Unable to contact with RabbitMQ process\"}\n",
status=httplib.INTERNAL_SERVER_ERROR,
content_type=content_type)
# User submitted an unsupported Content-Type (only is valid application/json)
else:
return Response(response="{\"error\":\"Bad request. Content-type is not application/json\"}\n",
status=httplib.BAD_REQUEST,
content_type=content_type)
def process_request(request, tenantid, serverid):
"""Get the parsed contents of the form data
:param string request: The information of the received request
:param string serverid: the id of the monitored instance (server)
:return: True
"""
json = request.json
if request.remote_addr:
message = "[{}] received {} from ip {}:{}"\
.format("-", json, request.environ[REMOTE_ADDR], request.environ[REMOTE_PORT])
else:
message = "[{}] received {} from test client"\
.format("-", json)
logging.info(message)
key = ['contextResponses', 'contextElement', 'attributes']
# Check that it contains the previous keys
try:
jsoncheck.checkit(json, key, 0)
except NotFound as err:
logging.error(err)
raise err
# Extract the list of attributes from the NGSI message
attrlist = request.json[CONTEXT_RESPONSES][0][CONTEXT_ELEMENT][CONTEXT_ATTRIBUTES]
data = list()
for item in attrlist:
name = item[CONTEXT_ATTRIBUTES_NAME]
value = item[CONTEXT_ATTRIBUTES_VALUE]
# Obtain the information of used memory and cpu
if name == 'usedMemPct':
verify_values(name, value)
mem = float(value)
elif name == 'cpuLoadPct':
verify_values(name, value)
cpu = float(value)
elif name == 'netLoadPct':
verify_values(name, value)
net = float(value)
elif name == 'freeSpacePct':
verify_values(name, value)
hdd = float(value)
data.insert(len(data), cpu)
data.insert(len(data), mem)
data.insert(len(data), hdd)
data.insert(len(data), net)
# fix the first value of the list with the server identity
data.insert(0, str(serverid))
# fix the last value with the current date and time
data.insert(len(fact_attributes) - 1, datetime.datetime.today().isoformat())
# Check data coherency of time stamps
# +1 is needed because the fact is not already added to the queue.
# It checks that queue will have at least 2 facts.
if len(mredis.range(tenantid, serverid)) + 1 >= 2:
mredis.check_time_stamps(tenantid, serverid, mredis.range(tenantid, serverid), data)
# Get the windowsize for the tenant from a redis queue
windowsize = mredis.get_windowsize(tenantid)
if windowsize == []:
windowsize = myClotoDBClient.get_window_size(tenantid)
mredis.insert_window_size(tenantid, windowsize)
# Insert the result into the queue system
mredis.insert(tenantid, serverid, data)
logging.info(data)
# If the queue has the number of facts defined by the windows size, it returns the
# last window-size values (range) and calculates the media of them (in terms of memory and cpu)
lo = mredis.media(mredis.range(tenantid, serverid), windowsize)
# If the number of facts is lt window size, the previous operation returns a null lists
if len(lo) != 0:
try:
rabbit = myqueue()
if len(lo) == 1:
lo.data = lo.data[0]
message = "{\"serverId\": \"%s\", \"cpu\": %s, \"mem\": %s, \"hdd\": %s, \"net\": %s, \"time\": \"%s\"}" \
% (lo.data[0][1:-1], lo.data[1], lo.data[2], lo.data[3], lo.data[4], lo.data[5])
logging_message = "[{}] sending message {}".format("-", message)
logging.info(logging_message)
# Send the message to the RabbitMQ components.
result = rabbit.publish_message(tenantid, message) # @UnusedVariable
except Exception as ex:
raise ex
return True
def verify_values(name, value):
"""Checks if rule operands are expected strings and values are valid floats
:param str name: The name
:param str value: The value
"""
myfloat = float(value)
if myfloat < 0.0 or myfloat > 100.0:
raise ValueError("Invalid value received for %s" % name)
def info(port):
"""Show some information about the execution of the process.
"""
data = config.get('common', 'name')
pid = os.getpid()
logging.info("{} {}\n".format(data, __version__))
logging.info("Running in stand alone mode")
logging.info("Port: {}".format(port))
logging.info("PID: {}\n".format(pid))
logging.info("https://github.com/telefonicaid/fiware-facts\n\n\n")
def check_config_file():
"""Checks if configuration has mysql user with a user. If user parameter is empty, shows an error
providing information about how to provide a valid settings file.
The original Settings file could be found in facts_conf and it could be copied to the required folder.
"""
if config.get('mysql', 'user') == '':
logging.error("Cloto's Mysql data is empty. You should provide this information in the configuration file")
logging.error("Please create a configuration file and add cloto MySql data to %s",
config.get('common', 'cfg_file_path'))
logging.error("You can provide a Settings file in other locations using an environment variable called"
" FACTS_SETTINGS_FILE")
# process configuration file (if exists) and setup logging
if config.read(cfg_filename):
logging.config.fileConfig(cfg_filename)
else:
logging.basicConfig(stream=sys.stdout, level=cfg_defaults['logLevel'], format=cfg_defaults['logFormat'])
# Define the port of our server, by default 5000
port = config.getint('common', 'brokerPort')
# execute the flask server, WSGI server
http = WSGIServer(('', port), app)
# show general information about the execution of the process
info(port)
check_config_file()
def windowsize_updater():
try:
import pika
connection = None
connection = pika.BlockingConnection(pika.ConnectionParameters(
host=config.get('common', 'rabbitMQ')))
channel = connection.channel()
channel.exchange_declare(exchange="windowsizes",
exchange_type='direct')
result = channel.queue_declare(exclusive=True)
queue_name = result.method.queue
channel.queue_bind(exchange="windowsizes",
queue=queue_name,
routing_key="windowsizes")
logging.info('Waiting for windowsizes')
def callback(ch, method, properties, body):
try:
logging.info("received windowsize: %s" % body)
tenantid = body.split(" ")[0]
windowsize = body.split(" ")[1]
mredis.insert_window_size(tenantid, int(windowsize))
except ValueError:
logging.info("receiving an invalid body: " + body)
except Exception as ex:
logging.info("ERROR UPDATING WINDOWSIZE: " + ex.message)
channel.basic_consume(callback,
queue=queue_name,
no_ack=True)
channel.start_consuming()
except Exception as ex:
if ex.message:
logging.error("Error %s:" % ex.message)
finally:
if connection == None:
logging.error("There is no connection with RabbitMQ. Please, check if it is alive")
else:
connection.close()
gevent.spawn(windowsize_updater)
def start_server():
http.serve_forever()
if __name__ == '__main__':
start_server()
|
{
"content_hash": "8b4c050d98de7d5bf1a119b1c6f6e490",
"timestamp": "",
"source": "github",
"line_count": 334,
"max_line_length": 119,
"avg_line_length": 35.68263473053892,
"alnum_prop": 0.6221681490182916,
"repo_name": "Fiware/cloud.Facts",
"id": "d02af79150ef29d3ce27b78eba596b5b453e109d",
"size": "12738",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "facts/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "12835"
},
{
"name": "Cucumber",
"bytes": "33954"
},
{
"name": "Python",
"bytes": "167098"
},
{
"name": "Ruby",
"bytes": "1891"
},
{
"name": "Shell",
"bytes": "6076"
}
],
"symlink_target": ""
}
|
"""Asserts and Boolean Checks.
See the [Asserts and
checks](https://tensorflow.org/api_guides/python/check_ops) guide.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
NUMERIC_TYPES = frozenset(
[dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16, dtypes.int32,
dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32, dtypes.quint8,
dtypes.complex64])
__all__ = [
'assert_negative',
'assert_positive',
'assert_proper_iterable',
'assert_non_negative',
'assert_non_positive',
'assert_equal',
'assert_none_equal',
'assert_near',
'assert_integer',
'assert_less',
'assert_less_equal',
'assert_greater',
'assert_greater_equal',
'assert_rank',
'assert_rank_at_least',
'assert_rank_in',
'assert_same_float_dtype',
'assert_scalar',
'assert_type',
'is_non_decreasing',
'is_numeric_tensor',
'is_strictly_increasing',
]
def _maybe_constant_value_string(t):
if not isinstance(t, ops.Tensor):
return str(t)
const_t = tensor_util.constant_value(t)
if const_t is not None:
return str(const_t)
return t
def _assert_static(condition, data):
"""Raises a InvalidArgumentError with as much information as possible."""
if not condition:
data_static = [_maybe_constant_value_string(x) for x in data]
raise errors.InvalidArgumentError(node_def=None, op=None,
message='\n'.join(data_static))
def _shape_and_dtype_str(tensor):
"""Returns a string containing tensor's shape and dtype."""
return 'shape=%s dtype=%s' % (tensor.shape, tensor.dtype.name)
@tf_export(
'debugging.assert_proper_iterable',
v1=['debugging.assert_proper_iterable', 'assert_proper_iterable'])
@deprecation.deprecated_endpoints('assert_proper_iterable')
def assert_proper_iterable(values):
"""Static assert that values is a "proper" iterable.
`Ops` that expect iterables of `Tensor` can call this to validate input.
Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
"""
unintentional_iterables = (
(ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)
+ compat.bytes_or_text_types
)
if isinstance(values, unintentional_iterables):
raise TypeError(
'Expected argument "values" to be a "proper" iterable. Found: %s' %
type(values))
if not hasattr(values, '__iter__'):
raise TypeError(
'Expected argument "values" to be iterable. Found: %s' % type(values))
@tf_export(
'debugging.assert_negative',
v1=['debugging.assert_negative', 'assert_negative'])
@deprecation.deprecated_endpoints('assert_negative')
def assert_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_negative(x)]):
output = tf.reduce_sum(x)
```
Negative means, for every element `x[i]` of `x`, we have `x[i] < 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x < 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(x, zero, data=data, summarize=summarize)
@tf_export(
'debugging.assert_positive',
v1=['debugging.assert_positive', 'assert_positive'])
@deprecation.deprecated_endpoints('assert_positive')
def assert_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_positive(x)]):
output = tf.reduce_sum(x)
```
Positive means, for every element `x[i]` of `x`, we have `x[i] > 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message, 'Condition x > 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(zero, x, data=data, summarize=summarize)
@tf_export(
'debugging.assert_non_negative',
v1=['debugging.assert_non_negative', 'assert_non_negative'])
@deprecation.deprecated_endpoints('assert_non_negative')
def assert_non_negative(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x >= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_negative(x)]):
output = tf.reduce_sum(x)
```
Non-negative means, for every element `x[i]` of `x`, we have `x[i] >= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-negative.
"""
message = message or ''
with ops.name_scope(name, 'assert_non_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x >= 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(zero, x, data=data, summarize=summarize)
@tf_export(
'debugging.assert_non_positive',
v1=['debugging.assert_non_positive', 'assert_non_positive'])
@deprecation.deprecated_endpoints('assert_non_positive')
def assert_non_positive(x, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= 0` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_non_positive(x)]):
output = tf.reduce_sum(x)
```
Non-positive means, for every element `x[i]` of `x`, we have `x[i] <= 0`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_non_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-positive.
"""
message = message or ''
with ops.name_scope(name, 'assert_non_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x <= 0 did not hold element-wise:'
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(x, zero, data=data, summarize=summarize)
@tf_export('debugging.assert_equal', 'assert_equal')
def assert_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x == y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] == y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_equal".
Returns:
Op that raises `InvalidArgumentError` if `x == y` is False.
@compatibility{eager} returns None
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x == y` is False. The check can be performed immediately during eager
execution or if `x` and `y` are statically known.
"""
message = message or ''
with ops.name_scope(name, 'assert_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.executing_eagerly():
eq = math_ops.equal(x, y)
condition = math_ops.reduce_all(eq)
if not condition:
# Prepare a message with first elements of x and y.
summary_msg = ''
# Default to printing 3 elements like control_flow_ops.Assert (used
# by graph mode) does.
summarize = 3 if summarize is None else summarize
if summarize:
# reshape((-1,)) is the fastest way to get a flat array view.
x_np = x.numpy().reshape((-1,))
y_np = y.numpy().reshape((-1,))
x_sum = min(x_np.size, summarize)
y_sum = min(y_np.size, summarize)
summary_msg = ('First %d elements of x:\n%s\n'
'First %d elements of y:\n%s\n' %
(x_sum, x_np[:x_sum],
y_sum, y_np[:y_sum]))
index_and_values_str = ''
if x.shape == y.shape and x.shape.as_list():
# If the shapes of x and y are the same (and not scalars),
# Get the values that actually differed and their indices.
# If shapes are different this information is more confusing
# than useful.
mask = math_ops.logical_not(eq)
indices = array_ops.where(mask)
indices_np = indices.numpy()
x_vals = array_ops.boolean_mask(x, mask)
y_vals = array_ops.boolean_mask(y, mask)
summarize = min(summarize, indices_np.shape[0])
index_and_values_str = (
'Indices of first %s different values:\n%s\n'
'Corresponding x values:\n%s\n'
'Corresponding y values:\n%s\n' %
(summarize, indices_np[:summarize],
x_vals.numpy().reshape((-1,))[:summarize],
y_vals.numpy().reshape((-1,))[:summarize]))
raise errors.InvalidArgumentError(
node_def=None, op=None,
message=('%s\nCondition x == y did not hold.\n%s%s' %
(message or '', index_and_values_str, summary_msg)))
return
if data is None:
data = [
message,
'Condition x == y did not hold element-wise:',
'x (%s) = ' % x.name, x,
'y (%s) = ' % y.name, y
]
condition = math_ops.reduce_all(math_ops.equal(x, y))
x_static = tensor_util.constant_value(x)
y_static = tensor_util.constant_value(y)
if x_static is not None and y_static is not None:
condition_static = (x_static == y_static).all()
_assert_static(condition_static, data)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export(
'debugging.assert_none_equal',
v1=['debugging.assert_none_equal', 'assert_none_equal'])
@deprecation.deprecated_endpoints('assert_none_equal')
def assert_none_equal(
x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x != y` holds for all elements.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_none_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] != y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_none_equal".
Returns:
Op that raises `InvalidArgumentError` if `x != y` is ever False.
"""
message = message or ''
with ops.name_scope(name, 'assert_none_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.executing_eagerly():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x != y did not hold for every single element:',
'x (%s) = ' % x_name, x,
'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.not_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('debugging.assert_near', v1=['debugging.assert_near', 'assert_near'])
@deprecation.deprecated_endpoints('assert_near')
def assert_near(
x, y, rtol=None, atol=None, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x` and `y` are close element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_near(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have
```tf.abs(x[i] - y[i]) <= atol + rtol * tf.abs(y[i])```.
If both `x` and `y` are empty, this is trivially satisfied.
The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest
representable positive number such that `1 + eps != 1`. This is about
`1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`.
See `numpy.finfo`.
Args:
x: Float or complex `Tensor`.
y: Float or complex `Tensor`, same `dtype` as, and broadcastable to, `x`.
rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The relative tolerance. Default is `10 * eps`.
atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The absolute tolerance. Default is `10 * eps`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_near".
Returns:
Op that raises `InvalidArgumentError` if `x` and `y` are not close enough.
@compatibility(numpy)
Similar to `numpy.assert_allclose`, except tolerance depends on data type.
This is due to the fact that `TensorFlow` is often used with `32bit`, `64bit`,
and even `16bit` data.
@end_compatibility
"""
message = message or ''
with ops.name_scope(name, 'assert_near', [x, y, rtol, atol, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y', dtype=x.dtype)
eps = np.finfo(x.dtype.as_numpy_dtype).eps
rtol = 10 * eps if rtol is None else rtol
atol = 10 * eps if atol is None else atol
rtol = ops.convert_to_tensor(rtol, name='rtol', dtype=x.dtype)
atol = ops.convert_to_tensor(atol, name='atol', dtype=x.dtype)
if context.executing_eagerly():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'x and y not equal to tolerance rtol = %s, atol = %s' % (rtol, atol),
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
tol = atol + rtol * math_ops.abs(y)
diff = math_ops.abs(x - y)
condition = math_ops.reduce_all(math_ops.less(diff, tol))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('debugging.assert_less', 'assert_less')
def assert_less(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x < y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_less(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] < y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_less".
Returns:
Op that raises `InvalidArgumentError` if `x < y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_less', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.executing_eagerly():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x < y did not hold element-wise:',
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.less(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export(
'debugging.assert_less_equal',
v1=['debugging.assert_less_equal', 'assert_less_equal'])
@deprecation.deprecated_endpoints('assert_less_equal')
def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x <= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_less_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] <= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_less_equal"
Returns:
Op that raises `InvalidArgumentError` if `x <= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_less_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.executing_eagerly():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x <= y did not hold element-wise:'
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.less_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('debugging.assert_greater', 'assert_greater')
def assert_greater(x, y, data=None, summarize=None, message=None, name=None):
"""Assert the condition `x > y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] > y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_greater".
Returns:
Op that raises `InvalidArgumentError` if `x > y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.executing_eagerly():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x > y did not hold element-wise:'
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.greater(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export(
'debugging.assert_greater_equal',
v1=['debugging.assert_greater_equal', 'assert_greater_equal'])
@deprecation.deprecated_endpoints('assert_greater_equal')
def assert_greater_equal(x, y, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x >= y` holds element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_greater_equal(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] >= y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_greater_equal"
Returns:
Op that raises `InvalidArgumentError` if `x >= y` is False.
"""
message = message or ''
with ops.name_scope(name, 'assert_greater_equal', [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.executing_eagerly():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'Condition x >= y did not hold element-wise:'
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
condition = math_ops.reduce_all(math_ops.greater_equal(x, y))
return control_flow_ops.Assert(condition, data, summarize=summarize)
def _assert_rank_condition(
x, rank, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
static_condition: A python function that takes `[actual_rank, given_rank]`
and returns `True` if the condition is satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_rank]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
rank_static = tensor_util.constant_value(rank)
if rank_static is not None:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, rank_static):
raise ValueError(
'Static rank condition failed', x_rank_static, rank_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), rank)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('debugging.assert_rank', 'assert_rank')
def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar integer `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_rank".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank == given_rank
dynamic_condition = math_ops.equal
if context.executing_eagerly():
name = ''
else:
name = x.name
if data is None:
data = [
message,
'Tensor %s must have rank' % name, rank, 'Received shape: ',
array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank %d. Received rank %d, shape %s' %
(message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
@tf_export(
'debugging.assert_rank_at_least',
v1=['debugging.assert_rank_at_least', 'assert_rank_at_least'])
@deprecation.deprecated_endpoints('assert_rank_at_least')
def assert_rank_at_least(
x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank` or higher.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_at_least(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_least".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or higher.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(
name, 'assert_rank_at_least', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank
dynamic_condition = math_ops.greater_equal
if context.executing_eagerly():
name = ''
else:
name = x.name
if data is None:
data = [
message,
'Tensor %s must have rank at least' % name, rank,
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank at least %d. Received rank %d, '
'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def _static_rank_in(actual_rank, given_ranks):
return actual_rank in given_ranks
def _dynamic_rank_in(actual_rank, given_ranks):
if len(given_ranks) < 1:
return ops.convert_to_tensor(False)
result = math_ops.equal(given_ranks[0], actual_rank)
for given_rank in given_ranks[1:]:
result = math_ops.logical_or(
result, math_ops.equal(given_rank, actual_rank))
return result
def _assert_ranks_condition(
x, ranks, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
ranks: Scalar `Tensor`.
static_condition: A python function that takes
`[actual_rank, given_ranks]` and returns `True` if the condition is
satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_ranks]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
for rank in ranks:
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks])
if not any(r is None for r in ranks_static):
for rank_static in ranks_static:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, ranks_static):
raise ValueError(
'Static rank condition failed', x_rank_static, ranks_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), ranks)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
for rank, rank_static in zip(ranks, ranks_static):
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export(
'debugging.assert_rank_in',
v1=['debugging.assert_rank_in', 'assert_rank_in'])
@deprecation.deprecated_endpoints('assert_rank_in')
def assert_rank_in(
x, ranks, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank in `ranks`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_rank_in(x, (2, 4))]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
ranks: Iterable of scalar `Tensor` objects.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_in".
Returns:
Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.
If static checks determine `x` has matching rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has mismatched rank.
"""
with ops.name_scope(
name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])
message = message or ''
if context.executing_eagerly():
name = ''
else:
name = x.name
if data is None:
data = [
message, 'Tensor %s must have rank in' % name
] + list(ranks) + [
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_ranks_condition(x, ranks, _static_rank_in,
_dynamic_rank_in, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank in %s. Received rank %d, '
'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
@tf_export(
'debugging.assert_integer',
v1=['debugging.assert_integer', 'assert_integer'])
@deprecation.deprecated_endpoints('assert_integer')
def assert_integer(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_integer(x)]):
output = tf.reduce_sum(x)
```
Args:
x: `Tensor` whose basetype is integer and is not quantized.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is anything other than non-quantized integer.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_integer', [x]):
x = ops.convert_to_tensor(x, name='x')
if not x.dtype.is_integer:
if context.executing_eagerly():
name = 'tensor'
else:
name = x.name
err_msg = (
'%s Expected "x" to be integer type. Found: %s of dtype %s'
% (message, name, x.dtype))
raise TypeError(err_msg)
return control_flow_ops.no_op('statically_determined_was_integer')
@tf_export('debugging.assert_type', v1=['debugging.assert_type', 'assert_type'])
@deprecation.deprecated_endpoints('assert_type')
def assert_type(tensor, tf_type, message=None, name=None):
"""Statically asserts that the given `Tensor` is of the specified type.
Args:
tensor: A tensorflow `Tensor`.
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
name: A name to give this `Op`. Defaults to "assert_type"
Raises:
TypeError: If the tensors data type doesn't match `tf_type`.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_type', [tensor]):
tensor = ops.convert_to_tensor(tensor, name='tensor')
if tensor.dtype != tf_type:
if context.executing_eagerly():
raise TypeError('%s tensor must be of type %s' % (message, tf_type))
else:
raise TypeError('%s %s must be of type %s' % (message, tensor.name,
tf_type))
return control_flow_ops.no_op('statically_determined_correct_type')
# pylint: disable=line-too-long
def _get_diff_for_monotonic_comparison(x):
"""Gets the difference x[1:] - x[:-1]."""
x = array_ops.reshape(x, [-1])
if not is_numeric_tensor(x):
raise TypeError('Expected x to be numeric, instead found: %s' % x)
# If x has less than 2 elements, there is nothing to compare. So return [].
is_shorter_than_two = math_ops.less(array_ops.size(x), 2)
short_result = lambda: ops.convert_to_tensor([], dtype=x.dtype)
# With 2 or more elements, return x[1:] - x[:-1]
s_len = array_ops.shape(x) - 1
diff = lambda: array_ops.strided_slice(x, [1], [1] + s_len)- array_ops.strided_slice(x, [0], s_len)
return control_flow_ops.cond(is_shorter_than_two, short_result, diff)
@tf_export(
'debugging.is_numeric_tensor',
v1=['debugging.is_numeric_tensor', 'is_numeric_tensor'])
@deprecation.deprecated_endpoints('is_numeric_tensor')
def is_numeric_tensor(tensor):
return isinstance(tensor, ops.Tensor) and tensor.dtype in NUMERIC_TYPES
@tf_export(
'debugging.is_non_decreasing',
v1=['debugging.is_non_decreasing', 'is_non_decreasing'])
@deprecation.deprecated_endpoints('is_non_decreasing')
def is_non_decreasing(x, name=None):
"""Returns `True` if `x` is non-decreasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
If `x` has less than two elements, it is trivially non-decreasing.
See also: `is_strictly_increasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional). Defaults to "is_non_decreasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_non_decreasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less_equal(zero, diff))
@tf_export(
'debugging.is_strictly_increasing',
v1=['debugging.is_strictly_increasing', 'is_strictly_increasing'])
@deprecation.deprecated_endpoints('is_strictly_increasing')
def is_strictly_increasing(x, name=None):
"""Returns `True` if `x` is strictly increasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
If `x` has less than two elements, it is trivially strictly increasing.
See also: `is_non_decreasing`
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional).
Defaults to "is_strictly_increasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_strictly_increasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less(zero, diff))
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_expected_type = expected_type
mismatch = False
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
elif expected_type != item_type:
mismatch = True
break
if mismatch:
# Loop back through and build up an informative error message (this is very
# slow, so we don't do it unless we found an error above).
expected_type = original_expected_type
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type # Should be unreachable
else:
return expected_type
@tf_export(
'debugging.assert_same_float_dtype',
v1=['debugging.assert_same_float_dtype', 'assert_same_float_dtype'])
@deprecation.deprecated_endpoints('assert_same_float_dtype')
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be a floating point type. If neither `tensors` nor `dtype` is supplied,
the function will return `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float, or the common type of the inputs is not a floating point type.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected floating point type, got %s.' % dtype)
return dtype
@tf_export(
'debugging.assert_scalar', v1=['debugging.assert_scalar', 'assert_scalar'])
@deprecation.deprecated_endpoints('assert_scalar')
def assert_scalar(tensor, name=None):
with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor, name=name_scope)
shape = tensor.get_shape()
if shape.ndims != 0:
if context.executing_eagerly():
raise ValueError('Expected scalar shape, saw shape: %s.'
% (shape,))
else:
raise ValueError('Expected scalar shape for %s, saw shape: %s.'
% (tensor.name, shape))
return tensor
@tf_export('ensure_shape')
def ensure_shape(x, shape, name=None):
"""Updates the shape of a tensor and checks at runtime that the shape holds.
For example:
```python
x = tf.placeholder(tf.int32)
print(x.shape)
==> TensorShape(None)
y = x * 2
print(y.shape)
==> TensorShape(None)
y = tf.ensure_shape(y, (None, 3, 3))
print(y.shape)
==> TensorShape([Dimension(None), Dimension(3), Dimension(3)])
with tf.Session() as sess:
# Raises tf.errors.InvalidArgumentError, because the shape (3,) is not
# compatible with the shape (None, 3, 3)
sess.run(y, feed_dict={x: [1, 2, 3]})
```
NOTE: This differs from `Tensor.set_shape` in that it sets the static shape
of the resulting tensor and enforces it at runtime, raising an error if the
tensor's runtime shape is incompatible with the specified shape.
`Tensor.set_shape` sets the static shape of the tensor without enforcing it
at runtime, which may result in inconsistencies between the statically-known
shape of tensors and the runtime value of tensors.
Args:
x: A `Tensor`.
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
name: A name for this operation (optional). Defaults to "EnsureShape".
Returns:
A `Tensor`. Has the same type and contents as `x`. At runtime, raises a
`tf.errors.InvalidArgumentError` if `shape` is incompatible with the shape
of `x`.
"""
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
return array_ops.ensure_shape(x, shape, name=name)
@ops.RegisterGradient('EnsureShape')
def _ensure_shape_grad(op, grad):
del op # Unused.
return grad
|
{
"content_hash": "649b07dea67bf62c79a13193dfbde517",
"timestamp": "",
"source": "github",
"line_count": 1335,
"max_line_length": 101,
"avg_line_length": 35.14681647940075,
"alnum_prop": 0.6472581573282752,
"repo_name": "girving/tensorflow",
"id": "40b111ea0c2bca773e3fe6744fcb1e7d95791371",
"size": "47658",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/check_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "343258"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50036869"
},
{
"name": "CMake",
"bytes": "196127"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1254086"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "867313"
},
{
"name": "Jupyter Notebook",
"bytes": "2604735"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "58787"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "42041620"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "477299"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import unittest
from future.moves.urllib.parse import urlparse, urljoin, parse_qs
import pytest
from addons.twofactor.tests.utils import _valid_code
from nose.tools import (assert_equal, assert_false, assert_is_none,
assert_is_not_none, assert_true)
from osf_tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestCallbacks(unittest.TestCase):
def setUp(self):
super(TestCallbacks, self).setUp()
self.user = UserFactory()
self.user.add_addon('twofactor')
self.user_settings = self.user.get_addon('twofactor')
def test_add_to_user(self):
assert_equal(self.user_settings.totp_drift, 0)
assert_is_not_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
def test_remove_from_unconfirmed_user(self):
# drift defaults to 0. Change it so we can test it was changed back.
self.user_settings.totp_drift = 1
self.user_settings.save()
self.user.delete_addon('twofactor')
self.user_settings.reload()
assert_equal(self.user_settings.totp_drift, 0)
assert_is_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
def test_remove_from_confirmed_user(self):
# drift defaults to 0. Change it so we can test it was changed back.
self.user_settings.totp_drift = 1
self.user_settings.is_confirmed = True
self.user_settings.save()
self.user.delete_addon('twofactor')
self.user_settings.reload()
assert_equal(self.user_settings.totp_drift, 0)
assert_is_none(self.user_settings.totp_secret)
assert_false(self.user_settings.is_confirmed)
class TestUserSettingsModel(unittest.TestCase):
TOTP_SECRET = 'b8f85986068f8079aa9d'
TOTP_SECRET_B32 = 'XD4FTBQGR6AHTKU5'
def setUp(self):
super(TestUserSettingsModel, self).setUp()
self.user = UserFactory()
self.user.add_addon('twofactor')
self.user_settings = self.user.get_addon('twofactor')
self.user_settings.totp_secret = self.TOTP_SECRET
self.user_settings.save()
def tearDown(self):
super(TestUserSettingsModel, self).tearDown()
self.user.__class__.delete(self.user)
def test_b32(self):
assert_equal(self.user_settings.totp_secret_b32, self.TOTP_SECRET_B32)
def test_otpauth_url(self):
url = urlparse(self.user_settings.otpauth_url)
assert_equal(url.scheme, 'otpauth')
assert_equal(url.netloc, 'totp')
assert_equal(url.path, '/OSF:{}'.format(self.user.username))
assert_equal(
parse_qs(url.query),
{'secret': [self.TOTP_SECRET_B32]}
)
def test_json(self):
# url = 'otpauth://totp/OSF:{}?secret=' + self.TOTP_SECRET_B32
settings = self.user_settings.to_json(user=None)
assert_equal(
settings,
{
'is_enabled': True,
'addon_full_name': 'Two-factor Authentication',
'addon_short_name': 'twofactor',
'drift': 0,
'is_confirmed': False,
'nodes': [],
'secret': self.TOTP_SECRET_B32,
'has_auth': False,
}
)
def test_verify_valid_code(self):
assert_true(
self.user_settings.verify_code(_valid_code(self.TOTP_SECRET))
)
def test_verify_valid_core_drift(self):
# use a code from 30 seconds in the future
assert_true(
self.user_settings.verify_code(
_valid_code(self.TOTP_SECRET, drift=1)
)
)
# make sure drift is updated.
assert_equal(self.user_settings.totp_drift, 1)
# use a code from 60 seconds in the future
assert_true(
self.user_settings.verify_code(
_valid_code(self.TOTP_SECRET, drift=2)
)
)
# make sure drift is updated.
assert_equal(self.user_settings.totp_drift, 2)
# use the current code (which is now 2 periods away from the drift)
assert_false(
self.user_settings.verify_code(_valid_code(self.TOTP_SECRET))
)
|
{
"content_hash": "791112f6094ad7f9885fd2042e255f50",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 78,
"avg_line_length": 32.66412213740458,
"alnum_prop": 0.6106566954896003,
"repo_name": "baylee-d/osf.io",
"id": "244f3e2e3fb995ca9d16d40813d6280f49f9d380",
"size": "4279",
"binary": false,
"copies": "10",
"ref": "refs/heads/develop",
"path": "addons/twofactor/tests/test_models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "92773"
},
{
"name": "Dockerfile",
"bytes": "5721"
},
{
"name": "HTML",
"bytes": "318459"
},
{
"name": "JavaScript",
"bytes": "1792442"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "654930"
},
{
"name": "Python",
"bytes": "10662092"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division
from Hypers import tuners
from Spider import *
from Edge import *
import time
import copy
import math
# Needs words in lowercase, and if multiple words, join them using '_'
class Wordclient:
def __init__(self, word):
'''
Constructor to crawl web for a word
'''
self.word = word
sp = Spider(word, spread=2, limit=0.01)
self.web = sp.crawl('Graph.shelve') # Crawled web
self.graph = Shelveopen('Graph.shelve')
self.paths = [] # To store all paths
self.scores = [] # To store corresponding pathscores
self.clientfeatures = [] # Feature vector for client
self.standardfeatures = [] # To compare against
# Reusable function for another client
def init_client(self, client=None):
'''
To initialize diff. parameters related to client
'''
if client is None:
client = self.client #Takes previous client
else:
self.client = client
self.paths, self.scores = self.calcmetric(client)
#Initializing client features
i = self.getpathnum()
j = self.gethighestscore()
k = self.getmeanscore()
l = self.gettotalscore()
self.clientfeatures = [i, j, k, l]
def init_standard(self):
'''
To initialize diff. parameters to oneself
'''
paths, scores = self.calcmetric(self.word)
#Initializing client features
i = self.getpathnum(paths)
j = self.gethighestscore(scores)
k = self.getmeanscore(scores)
l = self.gettotalscore(scores)
self.standardfeatures = [i, j, k, l]
#Generic function for reuse
def calcmetric(self, client):
clientpaths = []
clientscores = []
total = []
clientedges = self.graph[client]
clientdests = []
for edge in clientedges:
clientdests.append(edge.dest)
common_points = []
for node in self.web:
if node in clientdests:
common_points.append(node)
# Handles case when no common points exist
extrapath = {}
for node in common_points:
edges = self.graph[node]
maxweight = -1
for edge in edges:
if edge.dest == client:
if maxweight < edge.weight:
maxweight = edge.weight
extrapath[node] = edge
for node in common_points:
paths = self.web[node]
for path in paths:
extraedge = extrapath[node]
if extraedge: # If path exists
ls = copy.deepcopy(path)
ls.append(extraedge)
clientpaths.append(ls)
# Score calculation
for path in clientpaths:
score = 1
for edge in path:
score *= edge.weight
clientscores.append(score)
total.append(clientpaths)
total.append(clientscores)
return total
# Functions strictly for access only, no reuse
def getscores(self):
'''
To access client scores
'''
return self.scores
def getpaths(self):
'''
To access client paths
'''
return self.paths
def getfeatures(self):
'''
To access client features
'''
return self.clientfeatures
def getstandard(self):
'''
To access standard features to oneself
'''
if self.standardfeatures:
return self.standardfeatures
else:
self.init_standard() # Initialize Standard
return self.standardfeatures
def getmetric(self):
'''
To get semantic score between client and word
'''
if self.standardfeatures == []:
self.init_standard()
#Scaling dimensions to get nearest results
standfeat = []
clientfeat = []
for i in range(len(self.clientfeatures)):
standfeat.append(self.standardfeatures[i] * tuners[i])
clientfeat.append(self.clientfeatures[i] * tuners[i])
# score = Cosine_similarity(standfeat, clientfeat)
score = self.clientfeatures[1]/self.standardfeatures[1]
# File Logging
log = '\n*******FROM : '+self.word+' TO : '+self.client+' *******'
Filedump('WordComparison.log',log)
log = 'Client Feature : '+str(self.clientfeatures)
Filedump('WordComparison.log',log)
log = 'Standard Feature : '+str(self.standardfeatures)
Filedump('WordComparison.log',log)
log = '#######Semantic Word Score : '+str(score)+' #######'
Filedump('WordComparison.log',log)
return score
def printweb(self):
'''
To Print entire web
'''
print ('FROM : ',self.word)
for dest, paths in self.web.items():
print ('TO : ',dest)
for i, path in enumerate(paths):
print ('PATH',i+1,' :',end='')
for edge in path:
print (' |',edge, end='')
print ()
def printpaths(self, paths=None, scores=None):
'''
To print paths to a client by default, else can print any paths and scores to them
'''
if paths is None:
paths = self.paths
if scores is None:
scores = self.scores
if paths:
for i, path in enumerate(paths):
print ('PATH', i+1,' :',end='')
for edge in path:
print (' |',edge, end='')
print ()
print ('PathScore : ',scores[i])
else:
print ('Word',dest,'is not reachable from Source')
# Functions reused to create features for standard and client, also can be accessed directly for client
def gettotalscore(self, scores=None):
'''
To compute total score
'''
if scores is None:
scores = self.scores
return sum(scores)
def getmeanscore(self, scores=None):
'''
Get Mean of all scores
'''
if scores is None:
scores = self.scores
if len(scores) == 0:
return 0 # To prevent division by zero
else:
return round(sum(scores)/len(scores),3)
def gethighestscore(self, scores=None):
'''
To return highest score
'''
if scores is None:
scores = self.scores
if len(scores) == 0:
return 0 # To prevent no arg. error
else:
return max(scores)
def getpathnum(self, paths=None):
'''
To return no of paths obtained
'''
if paths is None:
paths = self.paths
return len(paths)
if __name__ == '__main__':
start_time = time.time()
word = 'midday'
client = 'noon'
try:
wc = Wordclient(word)
wc.init_client(client)
# wc.printweb()
# wc.printpaths()
score = wc.getmetric()
print ('Execution Time : ',time.time() - start_time)
except Exception as e:
print ('Error Wordclient- ',e)
|
{
"content_hash": "2d22c40a62ef08f01c218e7783a4db7e",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 104,
"avg_line_length": 23.991869918699187,
"alnum_prop": 0.6645205015249068,
"repo_name": "anirudhagar13/SS_Graph",
"id": "4aac5c0dc90497517fe9c3fb2994dd2ed76701d6",
"size": "5902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Wordclient.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "59870"
},
{
"name": "HTML",
"bytes": "17185"
},
{
"name": "JavaScript",
"bytes": "38013"
},
{
"name": "Python",
"bytes": "61676"
},
{
"name": "Shell",
"bytes": "381"
}
],
"symlink_target": ""
}
|
"""
Main Lomb-Scargle Implementation
The ``lombscargle`` function here is essentially a sophisticated switch
statement for the various implementations available in this submodule
"""
import warnings
import numpy as np
from astropy import units
from astropy.utils.compat.numpy import broadcast_arrays
from .slow_impl import lombscargle_slow
from .fast_impl import lombscargle_fast
from .scipy_impl import lombscargle_scipy
from .chi2_impl import lombscargle_chi2
from .fastchi2_impl import lombscargle_fastchi2
METHODS = {'slow': lombscargle_slow,
'fast': lombscargle_fast,
'chi2': lombscargle_chi2,
'scipy': lombscargle_scipy,
'fastchi2': lombscargle_fastchi2}
def _validate_inputs(t, y, dy=None, frequency=None, strip_units=True):
"""Validation of input shapes & units
This utility function serves a few purposes:
- it validates that the shapes of t, y, and dy match, and broadcasts
them to a common 1D shape
- if any of t, y, day, or frequency are astropy Quantities (i.e. have
units attached), it validates that the units are compatible, and does
any necessary unit conversions
- if ``strip_units == True``, it strips units from all the arrays
before returning them.
- all relevant units are returned in ``unit_dict``
Parameters
----------
t, y : array_like or Quantity
dy, frequency : array_like or Quantity (optional)
strip_units : bool (optional, default=True)
if True, the returned quantities will have units stripped.
Returns
-------
t, y, dy, frequency : ndarray, Quantity, or None
reshaped and/or unit-stripped arrays
unit_dict : dict
dictionary of relevant units
"""
if dy is None:
t, y = broadcast_arrays(t, y, subok=True)
else:
t, y, dy = broadcast_arrays(t, y, dy, subok=True)
if t.ndim != 1:
raise ValueError("Input times & data must be one-dimensional")
has_units = any(isinstance(arr, units.Quantity)
for arr in (t, y, dy, frequency))
if has_units:
power_unit = units.dimensionless_unscaled
t = units.Quantity(t)
y = units.Quantity(y)
if frequency is not None:
frequency = units.Quantity(frequency)
if not t.unit.is_equivalent(1. / frequency.unit):
raise ValueError("Units of frequency not equivalent to "
"units of 1/t")
t = units.Quantity(t, unit=1. / frequency.unit)
if dy is not None:
dy = units.Quantity(dy)
if not y.unit.is_equivalent(dy.unit):
raise ValueError("Units of y not equivalent to units of dy")
dy = units.Quantity(dy, unit=y.unit)
else:
power_unit = 1
t = np.asarray(t)
y = np.asarray(y)
if dy is not None:
dy = np.asarray(dy)
def get_unit(val):
if isinstance(val, units.Quantity):
return val.unit
else:
return 1
unit_dict = {'t': get_unit(t),
'y': get_unit(y),
'dy': get_unit(y),
'frequency': 1. / get_unit(t),
'power': power_unit}
def unit_strip(arr):
if arr is None:
return arr
else:
return np.asarray(arr)
if strip_units:
t, y, dy, frequency = map(unit_strip, (t, y, dy, frequency))
return t, y, dy, frequency, unit_dict
def _get_frequency_grid(frequency, assume_regular_frequency=False):
"""Utility to get grid parameters from a frequency array
Parameters
----------
frequency : array_like or Quantity
input frequency grid
assume_regular_frequency : bool (default = False)
if True, then do not check whether frequency is a regular grid
Returns
-------
f0, df, N : scalars
Parameters such that all(frequency == f0 + df * np.arange(N))
"""
frequency = np.asarray(frequency)
if frequency.ndim != 1:
raise ValueError("frequency grid must be 1 dimensional")
elif len(frequency) == 1:
return frequency[0], frequency[0], 1
elif not assume_regular_frequency:
diff = frequency[1:] - frequency[:-1]
if not np.allclose(diff[0], diff):
raise ValueError("frequency must be a regular grid")
return frequency[0], frequency[1] - frequency[0], len(frequency)
def _is_regular(frequency, assume_regular_frequency=False):
if assume_regular_frequency:
return True
frequency = np.asarray(frequency)
if frequency.ndim != 1:
return False
elif len(frequency) == 1:
return True
else:
diff = frequency[1:] - frequency[:-1]
return np.allclose(diff[0], diff)
def _validate_method(method, dy, fit_bias, nterms,
frequency, assume_regular_frequency):
fast_method_ok = hasattr(np.ufunc, 'at')
if not fast_method_ok:
warnings.warn("Fast Lomb-Scargle methods require numpy version 1.8 "
"or newer. Using slower methods instead.")
# automatically choose the appropiate method
if method == 'auto':
if nterms != 1:
if (fast_method_ok and len(frequency) > 100
and _is_regular(frequency, assume_regular_frequency)):
method = 'fastchi2'
else:
method = 'chi2'
elif (fast_method_ok and len(frequency) > 100
and _is_regular(frequency, assume_regular_frequency)):
method = 'fast'
elif dy is None and not fit_bias:
method = 'scipy'
else:
method = 'slow'
if method not in METHODS:
raise ValueError("invalid method: {0}".format(method))
return method
def lombscargle(t, y, dy=None,
frequency=None,
method='auto',
assume_regular_frequency=False,
normalization='normalized',
fit_bias=True, center_data=True,
method_kwds=None, nterms=1):
"""
Compute the Lomb-scargle Periodogram with a given method.
Parameters
----------
t : array_like
sequence of observation times
y : array_like
sequence of observations associated with times t
dy : float or array_like (optional)
error or sequence of observational errors associated with times t
frequency : array_like
frequencies (not angular frequencies) at which to evaluate the
periodogram. If not specified, optimal frequencies will be chosen using
a heuristic which will attempt to provide sufficient frequency range
and sampling so that peaks will not be missed. Note that in order to
use method='fast', frequencies must be regularly spaced.
method : string (optional)
specify the lomb scargle implementation to use. Options are:
- 'auto': choose the best method based on the input
- 'fast': use the O[N log N] fast method. Note that this requires
evenly-spaced frequencies: by default this will be checked unless
`assume_regular_frequency` is set to True.
- `slow`: use the O[N^2] pure-python implementation
- `chi2`: use the O[N^2] chi2/linear-fitting implementation
- `fastchi2`: use the O[N log N] chi2 implementation. Note that this
requires evenly-spaced frequencies: by default this will be checked
unless `assume_regular_frequency` is set to True.
- `scipy`: use ``scipy.signal.lombscargle``, which is an O[N^2]
implementation written in C. Note that this does not support
heteroskedastic errors.
assume_regular_frequency : bool (optional)
if True, assume that the input frequency is of the form
freq = f0 + df * np.arange(N). Only referenced if method is 'auto'
or 'fast'.
normalization : string (optional, default='normalized')
Normalization to use for the periodogram. Options are 'normalized' or
'unnormalized'.
fit_bias : bool (optional, default=True)
if True, include a constant offet as part of the model at each
frequency. This can lead to more accurate results, especially in then
case of incomplete phase coverage.
center_data : bool (optional, default=True)
if True, pre-center the data by subtracting the weighted mean
of the input data. This is especially important if `fit_bias = False`
method_kwds : dict (optional)
additional keywords to pass to the lomb-scargle method
nterms : int (default=1)
number of Fourier terms to use in the periodogram.
Not supported with every method.
Returns
-------
PLS : array_like
Lomb-Scargle power associated with each frequency omega
"""
if frequency is None:
raise ValueError("Must supply a valid frequency. If you would like "
"an automatic frequency grid, use the "
"LombScargle.autopower() method.")
t, y, dy, frequency, unit_dict = _validate_inputs(t, y, dy, frequency)
output_shape = frequency.shape
frequency = frequency.ravel()
# we'll need to adjust args and kwds for each method
args = (t, y, dy)
kwds = dict(frequency=frequency,
center_data=center_data,
fit_bias=fit_bias,
normalization=normalization,
nterms=nterms,
**(method_kwds or {}))
method = _validate_method(method, dy=dy, fit_bias=fit_bias, nterms=nterms,
frequency=frequency,
assume_regular_frequency=assume_regular_frequency)
# scipy doesn't support dy or fit_bias=True
if method == 'scipy':
if kwds.pop('fit_bias'):
raise ValueError("scipy method does not support fit_bias=True")
if dy is not None:
dy = np.ravel(np.asarray(dy))
if not np.allclose(dy[0], dy):
raise ValueError("scipy method only supports "
"uniform uncertainties dy")
args = (t, y)
# fast methods require frequency expressed as a grid
if method.startswith('fast'):
f0, df, Nf = _get_frequency_grid(kwds.pop('frequency'),
assume_regular_frequency)
kwds.update(f0=f0, df=df, Nf=Nf)
# only chi2 methods support nterms
if not method.endswith('chi2'):
if kwds.pop('nterms') != 1:
raise ValueError("nterms != 1 only supported with 'chi2' "
"or 'fastchi2' methods")
PLS = METHODS[method](*args, **kwds)
return PLS.reshape(output_shape) * unit_dict['power']
|
{
"content_hash": "dec03ce8b66084091b181561c1d28076",
"timestamp": "",
"source": "github",
"line_count": 301,
"max_line_length": 80,
"avg_line_length": 35.946843853820596,
"alnum_prop": 0.6081330868761553,
"repo_name": "jakevdp/lombscargle",
"id": "01e489d0bd6a623ef188a18b4aef6abf8f261567",
"size": "10820",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lombscargle/implementations/main.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "123855"
},
{
"name": "Shell",
"bytes": "493"
}
],
"symlink_target": ""
}
|
import re
from django.core import exceptions
from django.db import models
from django.forms import fields
from tower import ugettext as _
class DecimalCharField(models.DecimalField):
"""Like the standard django DecimalField but stored in a varchar
In order to gracefully read crappy data, use nullify_invalid=True.
This will set the field's value to None rather than raising an exception
whenever a non-null, non-decimal string is read from a queryset.
However, use this option with caution as it also prevents exceptions
from being raised during model property assignment. This could allow you
to "successfuly" save a ton of data when all that is really written
is NULL. It might be best to combine this with the null=False option.
"""
description = 'Decimal number stored as a varchar'
__metaclass__ = models.SubfieldBase
def __init__(self, verbose_name=None, name=None, max_digits=None,
decimal_places=None, nullify_invalid=False, **kwargs):
self.nullify_invalid = nullify_invalid
kwargs['max_length'] = max_digits + 1
super(DecimalCharField, self).__init__(verbose_name, name,
max_digits=max_digits, decimal_places=decimal_places, **kwargs)
def get_internal_type(self):
return "CharField"
def to_python(self, value):
try:
return super(DecimalCharField, self).to_python(value)
except exceptions.ValidationError:
if self.nullify_invalid:
return None
else:
raise
def get_db_prep_save(self, value, connection, prepared=False):
if prepared:
return value
else:
return self.get_prep_value(value)
def get_prep_value(self, value):
if value is None:
return value
return self.format_number(value)
|
{
"content_hash": "aaa557315d43bf23486c2add2c7ca212",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 76,
"avg_line_length": 34.7037037037037,
"alnum_prop": 0.6670224119530416,
"repo_name": "andymckay/zamboni",
"id": "eaec93bdd484f78e8ba26e2de615d53aa07bf7d9",
"size": "1874",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/amo/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357533"
},
{
"name": "JavaScript",
"bytes": "524153"
},
{
"name": "Python",
"bytes": "3863676"
},
{
"name": "Shell",
"bytes": "14980"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.