text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""
HiveDB client access via SQLAlchemy
"""
import sqlalchemy as sq
metadata = sq.MetaData()
DB_TYPES = dict(
BIGINT=sq.Integer,
CHAR=sq.String,
DATE=sq.DateTime,
DOUBLE=sq.Integer,
FLOAT=sq.Float,
INTEGER=sq.Integer,
SMALLINT=sq.SmallInteger,
TIMESTAMP=sq.DateTime,
TINYINT=sq.SmallInteger,
VARCHAR=sq.String,
)
hive_primary = sq.Table(
'hive_primary_DIMENSION',
metadata,
# the 'id' column is added dynamically, with type based on
# partition_dimension_metadata.db_type
sq.Column('node', sq.SmallInteger,
nullable=False,
index=True,
),
sq.Column('secondary_index_count', sq.Integer, nullable=False),
# Hive_ERD.png says "date", but I think you want time too
sq.Column('last_updated', sq.DateTime,
nullable=False,
index=True,
),
sq.Column('read_only', sq.Boolean, nullable=False, default=False),
)
hive_secondary = sq.Table(
'hive_secondary_RESOURCE_COLUMN',
metadata,
# TODO this should be whatever datatype
# secondary_index_metadata.db_type says, no uniqueness guarantee
sq.Column('id', sq.Integer,
nullable=True,
index=True,
),
# TODO this should be whatever datatype resource_metadata.db_type
# says; this doesn't point to primary index but to the column
# named by secondary_index_metadata.column_name in the table named
# by resource_metadata.name
sq.Column('pkey', sq.Integer,
nullable=False,
index=True,
),
)
def dynamic_table(table, directory_metadata, name):
"""
Access C{table} under new C{directory_metadata} with new C{name}.
"""
new = directory_metadata.tables.get(name, None)
if new is not None:
return new
new = sq.Table(
name,
directory_metadata,
*[c.copy() for c in table.columns])
return new
def get_primary_table(
directory_metadata,
dimension_name,
db_type,
):
table_name = 'hive_primary_%s' % dimension_name
table = dynamic_table(
table=metadata.tables['hive_primary_DIMENSION'],
directory_metadata=directory_metadata,
name=table_name,
)
table.append_column(
sq.Column(
'id',
DB_TYPES[db_type],
nullable=False,
),
)
#table.constraints.add(sq.UniqueConstraint('id', 'node'))
return table
|
{
"content_hash": "0ad7c71fd4403999ea2bc03401f6e2aa",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 70,
"avg_line_length": 27.25,
"alnum_prop": 0.6039090546469884,
"repo_name": "tv42/snakepit",
"id": "fab81c80943dd4c9cd99158994fe18eccb20c1c6",
"size": "2507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snakepit/directory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "58316"
},
{
"name": "Shell",
"bytes": "313"
}
],
"symlink_target": ""
}
|
from google.cloud import container_v1beta1
async def sample_list_locations():
# Create a client
client = container_v1beta1.ClusterManagerAsyncClient()
# Initialize request argument(s)
request = container_v1beta1.ListLocationsRequest(
parent="parent_value",
)
# Make the request
response = await client.list_locations(request=request)
# Handle the response
print(response)
# [END container_v1beta1_generated_ClusterManager_ListLocations_async]
|
{
"content_hash": "68b1cb6213a43a9105b20e04169d7ac6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 25.94736842105263,
"alnum_prop": 0.7302231237322515,
"repo_name": "GoogleContainerTools/python-container",
"id": "ffe44d15dee0939fe467cd498ea0470475991cc7",
"size": "1889",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "samples/generated_samples/container_v1beta1_generated_cluster_manager_list_locations_async.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2442194"
},
{
"name": "Shell",
"bytes": "30669"
}
],
"symlink_target": ""
}
|
import argparse
from matplotlib import pyplot as plt
from matplotlib import animation
import pds.pds as pds
class PdsScope(object):
def __init__(self, tagname, npoints=80, ylim=(), offset=0, scale_factor=1,
interval=200, style=None, show_grid=True):
self.store_conf(npoints=npoints, ylim=ylim, offset=offset, scale_factor=scale_factor, interval=interval, style=style, show_grid=show_grid)
self.tagname = tagname
self.conn = None
self.tag = None
self.fig = None
self.line = None
self.data = {'x': [], 'y': []}
def store_conf(self, **kwargs):
self.conf = kwargs
def init_plot(self):
if self.conf['style']:
plt.style.use(self.conf['style'])
self.fig, self.ax = plt.subplots()
self.ax.set_xlim(0, self.conf['npoints'])
plt.title(self.tagname)
# If not explicitly set, we autoscale the y-axis
if self.conf['ylim']:
self.ax.set_ylim(self.conf['ylim'])
self.ax.grid(b=self.conf['show_grid'])
self.line, = self.ax.plot(self.data['x'], self.data['y'])
return self.line,
def update(self, data):
self.line.set_xdata(self.data['x'])
self.line.set_ydata(self.data['y'])
# canvas.draw is expensive, so we only call it if we have to autoscale
if not self.conf['ylim']:
self.ax.relim()
self.ax.autoscale_view(scalex=False)
self.fig.canvas.draw()
self.fig.canvas.flush_events()
return self.line,
def process_tag(self):
# Used to ensure the latest data scrolls in the plotting area
fixed_x = [i for i in range(self.conf['npoints'])]
i = 0
self.conn = pds.PDSconnect(pds.PDS_IPCKEY)
if self.conn.conn_status != pds.PDS_CONN_OK:
raise ValueError('Error connecting to the PDS: {}'.format(self.conn.status))
self.tag = pds.PDSget_tag_object(self.conn, self.tagname)
while True:
try:
self.data['x'].append(i)
i += 1
y = int(self.tag.value) * self.conf['scale_factor'] + self.conf['offset']
self.data['y'].append(y)
# Once we've drawn to the RHS, we start scrolling
if len(self.data['x']) > self.conf['npoints']:
self.data['x'] = fixed_x[:]
self.data['y'] = self.data['y'][1:]
# We must yield an iterable, hence this tuple
yield self.data,
except KeyboardInterrupt:
print('Keyboard Interrupt detected: exiting...')
break
pds.PDSdisconnect(self.conn)
def run(self):
self.anim = animation.FuncAnimation(self.fig, self.update, frames=self.process_tag, blit=True, interval=self.conf['interval'])
plt.show()
def parse_cmdln():
epilog = """Examples
Say we have a sine wave (in a tag called 'sine'), offset by 1, to make all
samples non-negative, and scaled by 1000 to make the samples integer.
Passing the offset (-o) and scale-factor (-f) options normalises the sine
wave to [-1,1]. We set fixed y-axis limits (-y), as autoscaling the y-axis
incurs overhead so it's best to avoid it if we know the function domain in
advance, as we do here:
python3 pds-scope.py -o -1 -f 0.001 -y -1.1 1.1 sine
The same, but with a higher sampling rate by setting a smaller interval
between samples (-i):
python3 pds-scope.py -o -1 -f 0.001 -y -1.1 1.1 -i 40 sine
Turn the grid off (-G) and set the style (-s) to a light signal on a dark
background:
python3 pds-scope.py -o -1 -f 0.001 -y -1.1 1.1 -i 40 sine -G -s dark_background
"""
parser = argparse.ArgumentParser(description='oscilloscope to display the given tag', epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('tagname', help='name of the tag to display')
parser.add_argument('-p', '--npoints', help='number of points N to display', metavar='N', type=int, default=80)
parser.add_argument('-y', '--ylim', help='limits of the y-axis MIN MAX', metavar='N', type=float, nargs=2)
parser.add_argument('-o', '--offset', help='offset N to add to a displaced signal', metavar='N', type=float, default=0)
parser.add_argument('-f', '--scale-factor', help='factor N to multiply a scaled signal by', metavar='N', type=float, default=1)
parser.add_argument('-i', '--interval', help='interval N in ms between samples being taken of the signal', metavar='N', type=int, default=200)
parser.add_argument('-s', '--style', help='stylesheet STYLE to apply to the plotting area, see the matplotlib stylesheets documentation for options', metavar='STYLE', type=str)
group = parser.add_mutually_exclusive_group()
group.add_argument('-g', '--show-grid', help='show a grid in the plotting area', action='store_true', default=True)
group.add_argument('-G', '--no-show-grid', help='don\'t show a grid in the plotting area', action='store_false', default=False, dest='show_grid')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_cmdln()
scope = PdsScope(args.tagname, npoints=args.npoints, ylim=args.ylim, offset=args.offset, scale_factor=args.scale_factor, interval=args.interval, show_grid=args.show_grid, style=args.style)
scope.init_plot()
scope.run()
exit(0)
|
{
"content_hash": "11f5b2876b137d6f46e423b8be2bdc06",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 192,
"avg_line_length": 39.13669064748201,
"alnum_prop": 0.6253676470588235,
"repo_name": "paul-breen/plc-data-server",
"id": "f59c48640567a9358e7e07f9cf4c82fb6ba0bd2e",
"size": "5765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python_utils/pds-scope.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "669460"
},
{
"name": "Lex",
"bytes": "16091"
},
{
"name": "Makefile",
"bytes": "52911"
},
{
"name": "Python",
"bytes": "11129"
},
{
"name": "Shell",
"bytes": "15574"
},
{
"name": "Tcl",
"bytes": "56360"
}
],
"symlink_target": ""
}
|
"""
1. P: is set of problems that can be solved by determinitisic Turing machine in Polynomial time
2. NP: is set of decision problems that can be solved by non-deterministic Turing Machine in Polinominal time.
P is subset of NP.
3. NP-complete problems are the hardest problem in NP set, a decision problem L is NP-complete if:
a. L is in NP (a solution can be verified but not efficient)
b. Every problem in NP is reducible to L in in polynomial time
4. NP-hard if it follows property (b) but not follow property (a)
So NP-complete set is also a subset of NP-Hard set.
"""
|
{
"content_hash": "98e4550489ab840271d2ff7883fb085c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 110,
"avg_line_length": 41.92857142857143,
"alnum_prop": 0.7444633730834753,
"repo_name": "armsky/Algorithms",
"id": "2bb6901177bcf29ea6dd28a4300b83bdec2fba78",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Google/np-complete.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "19955"
},
{
"name": "HTML",
"bytes": "155023"
},
{
"name": "Java",
"bytes": "1844"
},
{
"name": "Python",
"bytes": "180747"
}
],
"symlink_target": ""
}
|
from flask import Flask,render_template,request,redirect
import loganalysis_v2
import user
import gconf
app=Flask(__name__)
@app.route('/')
def index():
return render_template('login.html')
@app.route('/log/',methods=['POST','GET'])
def log():
params = request.args if request.method == 'GET' else request.form
topn=params.get('topn',10)
topn=int(topn) if str(topn).isdigit() else 10
SrcFileName='www_access_20140823.log'
rt_list=loganalysis_v2.FilterNginx(SrcFileName,Num=topn)
return render_template('log.html',rt_list=rt_list,title="top"+str(topn))
@app.route('/login/',methods=['POST','GET'])
def login():
params = request.args if request.method == 'GET' else request.form
username=params.get('username', '')
password=params.get('password','')
if user.validate_user(username, password):
return redirect('/users/')
else:
return render_template('login.html',username=username,error=u'用户名或密码错误')
@app.route('/users/')
def users():
UserList=user.GetUser(gconf.UserFile)
return render_template('users.html',userlist=UserList)
@app.route('/user/create/')
def usercreate():
return render_template('createuser.html')
@app.route('/user/add/',methods=['POST','GET'])
def useradd():
params = request.args if request.method == 'GET' else request.form
username,password,age=params.get('username',''),params.get('password',''),params.get('age','')
Flag=user.JudgUser(username)
print Flag
if Flag:
return render_template('createuser.html',userexist= u'抱歉,用户%s已经存在' %(username))
else:
Flag=user.AddUser(username, password, age)
if Flag:
UserList=user.GetUser(gconf.UserFile)
return render_template('users.html',userlist=UserList,color='green',Flag=u'恭喜,添加成功')
else:
return render_template('createuser.html',userexist= u'抱歉,用户%s添加失败' %(username))
@app.route('/user/modify/',methods=['GET'])
def usermodify():
username,password,age=request.args.get('username',''),request.args.get('password',''),request.args.get('age','')
return render_template('modifyuser.html',username=username,password=password,age=age)
@app.route('/user/change/',methods=['POST','GET'])
def userchange():
params = request.args if request.method == 'GET' else request.form
username,password,age=params.get('username',''),params.get('password',''),params.get('age','')
Flag=user.ChangeUser(username, password, age)
if Flag=='samepassword':
return render_template('modifyuser.html',username=username,password=password,age=age,samepassword=u'抱歉,用户%s修改后的密码不能和原密码相同' %(username))
elif Flag:
UserList=user.GetUser(gconf.UserFile)
return render_template('users.html',userlist=UserList,color='green',Flag=u'恭喜,修改成功')
else:
return render_template('modifyuser.html',error=u'抱歉,用户%s修改失败' %(username))
@app.route('/user/del/',methods=['GET'])
def userdel():
username=request.args.get('username','')
Flag=user.DelUser(username)
if Flag:
UserList=user.GetUser(gconf.UserFile)
return render_template('users.html',userlist=UserList,color='green',Flag=u'恭喜,用户%s删除成功' %(username))
else:
UserList=user.GetUser(gconf.UserFile)
return render_template('users.html',userlist=UserList,color='red',Flag=u'抱歉,用户%s删除失败' %(username))
if __name__ == '__main__':
app.run(host='0.0.0.0', port=8080, debug=True)
'''
不错,继续加油, 可以参考别人的代码,但是建议自己敲一遍代码,这样才能知道什么地方有问题
改进点
1. 在打开编辑用户页面,目前只有三个属性,可以都使用get方式都传递到后台,再渲染到模板中没有问题
但常用方法是:只传递用户的唯一标识,在后台再通过唯一标识查找存储的信息,回显到编辑页面中
'''
|
{
"content_hash": "142fe864d2f6e39804ee4b7a7597477f",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 143,
"avg_line_length": 38.56989247311828,
"alnum_prop": 0.6849735154725397,
"repo_name": "51reboot/actual_09_homework",
"id": "888ead60f54ede27fcda32b834246af963748cca",
"size": "4071",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "05/qicheng/app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4623850"
},
{
"name": "HTML",
"bytes": "90670692"
},
{
"name": "JavaScript",
"bytes": "31827839"
},
{
"name": "Nginx",
"bytes": "1073"
},
{
"name": "PHP",
"bytes": "349512"
},
{
"name": "Python",
"bytes": "1705997"
},
{
"name": "Shell",
"bytes": "10001"
},
{
"name": "Smarty",
"bytes": "342164"
}
],
"symlink_target": ""
}
|
import os
import re
import hmac
import jinja2
import hashlib
import random
from string import letters
from google.appengine.ext import db
# Jinja configuration
template_dir = os.path.join(os.path.dirname(__file__), 'templates')
jinja_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir),
autoescape=True)
# Global functions
def render_str(template, **params):
t = jinja_env.get_template(template)
return t.render(params)
# Model keys
def users_key(group='default'):
return db.Key.from_path('users', group)
def blog_key(name='default'):
return db.Key.from_path('blogs', name)
# Validation
USER_RE = re.compile(r"^[a-zA-Z0-9_-]{3,20}$")
PASS_RE = re.compile(r"^.{3,20}$")
EMAIL_RE = re.compile(r'^[\S]+@[\S]+\.[\S]+$')
def valid_username(username):
return username and USER_RE.match(username)
def valid_password(password):
return password and PASS_RE.match(password)
def valid_email(email):
return not email or EMAIL_RE.match(email)
# Authentication
secret = 'fart'
def make_pw_hash(name, password, salt=None):
if not salt:
salt = make_salt()
h = hashlib.sha256(name + password + salt).hexdigest()
return '%s,%s' % (salt, h)
def make_salt(length=5):
return ''.join(random.choice(letters) for x in xrange(length))
def valid_pw(name, password, h):
salt = h.split(',')[0]
return h == make_pw_hash(name, password, salt)
def make_secure_val(val):
return '%s|%s' % (val, hmac.new(secret, val).hexdigest())
def check_secure_val(secure_val):
val = secure_val.split('|')[0]
if secure_val == make_secure_val(val):
return val
|
{
"content_hash": "bbaebabea3deb16c9c391c01e87fdc2c",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 76,
"avg_line_length": 22.5,
"alnum_prop": 0.66006006006006,
"repo_name": "mr-karan/Udacity-FullStack-ND004",
"id": "1076f7a66bd73450dc99727b5643ee8c1196eadc",
"size": "1665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Project3/udacityblog-159515/helpers.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14903"
},
{
"name": "HTML",
"bytes": "25466"
},
{
"name": "JavaScript",
"bytes": "5166"
},
{
"name": "Python",
"bytes": "44170"
},
{
"name": "Shell",
"bytes": "117"
}
],
"symlink_target": ""
}
|
from .manager import JSONRPCResponseManager
from .dispatcher import Dispatcher
__version = (1, 13, 0)
__version__ = version = '.'.join(map(str, __version))
__project__ = PROJECT = __name__
dispatcher = Dispatcher()
# lint_ignore=W0611,W0401
|
{
"content_hash": "2f0904f3cc25e93cf2a9bac0d0cac3a3",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 53,
"avg_line_length": 22.272727272727273,
"alnum_prop": 0.689795918367347,
"repo_name": "pavlov99/json-rpc",
"id": "753838fc33cbc55d08e3f73ae80a802e2c1e1e73",
"size": "245",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jsonrpc/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "469"
},
{
"name": "Python",
"bytes": "138537"
}
],
"symlink_target": ""
}
|
"""Provides strategy object."""
from __future__ import absolute_import
import re
from ..entity import Entity
from ..utils import PATHS, suppress
PIXEL_PATTERN = re.compile(r'\[(\d+)\]')
OPERATOR_PATTERN = re.compile(r'(AND|OR)')
class Strategy(Entity):
"""docstring for Strategy."""
collection = 'strategies'
resource = 'strategy'
_relations = {
'campaign', 'currency', 'time_zone',
}
_aud_seg_exc = Entity._enum({'AND', 'OR'}, 'OR')
_aud_seg_inc = Entity._enum({'AND', 'OR'}, 'OR')
_freq_int = Entity._enum({'hour', 'day', 'week', 'month', 'campaign',
'not-applicable'}, 'not-applicable')
_freq_type = Entity._enum({'even', 'asap', 'no-limit'}, 'no-limit')
_goal_type = Entity._enum({'spend', 'reach', 'cpc', 'cpe', 'cpa', 'roi'},
'cpc')
_media_type = Entity._enum({'DISPLAY', 'VIDEO'}, 'DISPLAY')
_pac_int = Entity._enum({'hour', 'day'}, 'day')
_pac_type = Entity._enum({'even', 'asap'}, 'even')
_site_selec = Entity._enum({'MATHSELECT_250', 'EXCLUDE_UGC', 'ALL',
'REDUCED'}, 'REDUCED')
_supply_type = Entity._enum({'RTB', 'RMX_API', 'T1_RMX'}, 'RTB')
_type = Entity._enum({'REM', 'GBO', 'AUD'}, 'GBO')
_pull = {
'audience_segment_exclude_op': None,
'audience_segment_include_op': None,
'bid_aggresiveness': float,
'bid_price_is_media_only': Entity._int_to_bool,
'budget': float,
'campaign_id': int,
'created_on': Entity._strpt,
'description': None,
'effective_goal_value': float,
'end_date': Entity._strpt,
'feature_compatibility': None,
'frequency_amount': int,
'frequency_interval': None,
'frequency_type': None,
'goal_type': None,
'goal_value': float,
'id': int,
'impression_cap': int,
'max_bid': float,
'media_type': None,
'name': None,
'pacing_amount': float,
'pacing_interval': None,
'pacing_type': None,
'pixel_target_expr': None,
'run_on_all_exchanges': Entity._int_to_bool,
'run_on_all_pmp': Entity._int_to_bool,
'run_on_display': Entity._int_to_bool,
'run_on_mobile': Entity._int_to_bool,
'run_on_streaming': Entity._int_to_bool,
'site_restriction_transparent_urls': Entity._int_to_bool,
'site_selectiveness': None,
'start_date': Entity._strpt,
'status': Entity._int_to_bool,
'supply_type': None,
'type': None,
'updated_on': Entity._strpt,
'use_campaign_end': Entity._int_to_bool,
'use_campaign_start': Entity._int_to_bool,
'use_mm_freq': Entity._int_to_bool,
'use_optimization': Entity._int_to_bool,
'version': int,
}
_push = _pull.copy()
_push.update({
'audience_segment_exclude_op': _aud_seg_exc,
'audience_segment_include_op': _aud_seg_inc,
'bid_price_is_media_only': int,
'end_date': Entity._strft,
'frequency_interval': _freq_int,
'frequency_type': _freq_type,
'goal_type': _goal_type,
'media_type': _media_type,
'pacing_interval': _pac_int,
'pacing_type': _pac_type,
'run_on_all_exchanges': int,
'run_on_all_pmp': int,
'run_on_display': int,
'run_on_mobile': int,
'run_on_streaming': int,
'site_restriction_transparent_urls': int,
'site_selectiveness': _site_selec,
'start_date': Entity._strft,
'status': int,
'supply_type': _supply_type,
'type': _type,
'use_campaign_end': int,
'use_campaign_start': int,
'use_mm_freq': int,
'use_optimization': int,
})
_readonly = Entity._readonly | {'effective_goal_value', }
def __init__(self, session, properties=None, **kwargs):
super(Strategy, self).__init__(session, properties, **kwargs)
try:
self.pixel_target_expr
except AttributeError:
self.pixel_target_expr = ''
self._deserialize_target_expr()
def _deserialize_target_expr(self):
"""Deserialize pixel_target_expr string into dict"""
if 'AND NOT' in self.pixel_target_expr:
include_string, exclude_string = self.pixel_target_expr.split('AND NOT')
elif 'NOT' in self.pixel_target_expr:
include_string, exclude_string = self.pixel_target_expr.split('NOT')
elif self.pixel_target_expr:
include_string = self.pixel_target_expr
exclude_string = ''
else:
include_string = ''
exclude_string = ''
include_operator = OPERATOR_PATTERN.search(include_string)
exclude_operator = OPERATOR_PATTERN.search(exclude_string)
if include_operator:
include_operator = include_operator.group(0)
if exclude_operator:
exclude_operator = exclude_operator.group(0)
self.pixel_target_expr = {
'include': {
'pixels': [int(pix) for pix in PIXEL_PATTERN.findall(include_string)],
'operator': include_operator,
},
'exclude': {
'pixels': [int(pix) for pix in PIXEL_PATTERN.findall(exclude_string)],
'operator': exclude_operator,
},
}
def save_supplies(self, data):
url = self._construct_url(addl=['supplies', ])
entity, _ = super(Strategy, self)._post(PATHS['mgmt'], url, data)
self._update_self(next(entity))
self._deserialize_target_expr()
if 'relations' in self.properties:
del self.properties['relations']
def save_domains(self, data):
url = self._construct_url(addl=['domain_restrictions', ])
# this endpoint doesn't return an entity like the supplies endpoint
# so we ignore the error
with suppress(AttributeError):
entity, _ = super(Strategy, self)._post(PATHS['mgmt'], url, data)
# you can't get these values so we don't need to reset anything
def _serialize_target_expr(self):
"""Serialize pixel_target_expr dict into string"""
include_bool = '] {} ['.format(self.pixel_target_expr['include']['operator'] or 'OR')
include_pixels = self.pixel_target_expr['include']['pixels']
exclude_bool = '] {} ['.format(self.pixel_target_expr['exclude']['operator'] or 'OR')
exclude_pixels = self.pixel_target_expr['exclude']['pixels']
include_string = '( [{}] )'.format(include_bool.join(
str(pix) for pix in include_pixels)) if include_pixels else ''
exclude_string = 'NOT ( [{}] )'.format(exclude_bool.join(
str(pix) for pix in exclude_pixels)) if exclude_pixels else ''
if include_string and exclude_string:
return '{} AND {}'.format(include_string, exclude_string)
else:
return include_string + exclude_string
def save(self, data=None, url=None):
self.pixel_target_expr = self._serialize_target_expr()
if data is None:
data = self.properties.copy()
if getattr(self, 'use_campaign_start', False):
data.pop('start_date', None)
if getattr(self, 'use_campaign_end', False):
data.pop('end_date', None)
super(Strategy, self).save(data=data, url=url)
self._deserialize_target_expr()
@property
def pixel_target_expr_string(self):
"""Return string version of pixel_target_expr"""
return self._serialize_target_expr()
|
{
"content_hash": "592fffb59882bd77aed77a21864fb1d6",
"timestamp": "",
"source": "github",
"line_count": 196,
"max_line_length": 93,
"avg_line_length": 39.01020408163265,
"alnum_prop": 0.566309181271253,
"repo_name": "leiforion/t1-python",
"id": "eb3e65ab5544baa492731d116bd5f6991e778363",
"size": "7670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "terminalone/models/strategy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "141331"
}
],
"symlink_target": ""
}
|
'''
A home for the vdb specific memory renderers.
'''
import envi
import vtrace
import envi.bits as e_bits
import envi.memory as e_mem
import envi.memcanvas as e_canvas
import vivisect.impapi as viv_impapi
import envi.memcanvas.renderers as e_canvas_rend
class OpcodeRenderer(e_canvas.MemoryRenderer):
def __init__(self, trace, arch=envi.ARCH_DEFAULT):
self.arch = arch
self.emu_cache = {} # arch_num: emu instance
self.pwidth = trace.getPointerSize()
self.pformat = '0x%%.%dx' % ( self.pwidth * 2 )
def _getOpcodePrefix(self, trace, va, op):
regs = trace.getRegisters()
regs = dict([ (rval,rname) for (rname,rval) in regs.items() if rval != 0 ])
bp = trace.getBreakpointByAddr(va)
if bp != None:
return ('bp[%d]' % bp.id).ljust(8)
rname = regs.get( va )
if rname != None:
return rname[:7].ljust(8)
return ' '
def _getOpcodeSuffix(self, trace, va, op):
pc = trace.getProgramCounter()
if va != pc:
return ''
ovals = []
for o in op.opers:
if o.isDeref():
ova = o.getOperAddr(op, trace)
else:
ova = o.getOperValue(op, trace)
sym = None
if trace.isValidPointer(ova):
rova = trace.readMemoryFormat(ova, '<P')[0]
sym = trace.getSymByAddr(rova)
if sym == None:
sym = trace.getSymByAddr(ova)
if sym:
ovals.append(repr(sym))
elif o.isDeref():
ovals.append('[0x%.8x]' % ova)
else:
ovals.append('0x%.8x' % ova)
if [branch for branch, flag in op.getBranches() if flag & envi.BR_COND]:
emu = self.emu_cache.get(self.arch, vtrace.getEmu(trace))
emu.setRegisters(trace.getRegisters())
emu.setProgramCounter(va)
emu.executeOpcode(op)
nextpc = emu.getProgramCounter()
if va + len(op) != nextpc:
ovals.append('Branch taken: 0x%08x' % nextpc)
else:
ovals.append('Branch not taken: 0x%08x' % nextpc)
return ','.join(ovals)
def render(self, mcanv, va):
vastr = self.pformat % va
# NOTE: we assume the memobj is a trace
trace = mcanv.mem
sym = trace.getSymByAddr(va)
if sym != None:
mcanv.addText('\n')
mcanv.addVaText(str(sym), va=va)
mcanv.addText(':\n')
op = trace.parseOpcode(va, arch=self.arch)
obytes = trace.readMemory(va, op.size)[:8]
prefix = self._getOpcodePrefix(trace, va, op)
mcanv.addText(prefix)
mcanv.addVaText(vastr, va=va)
mcanv.addText(": %s " % obytes.encode('hex').ljust(17))
op.render(mcanv)
try:
suffix = self._getOpcodeSuffix(trace, va, op)
if suffix:
mcanv.addText(' ;'+suffix)
except Exception, e:
mcanv.addText('; suffix error: %s' % e)
mcanv.addText("\n")
return len(op)
class SymbolRenderer(e_canvas.MemoryRenderer):
def __init__(self, trace):
a = trace.getMeta("Architecture")
self.arch = envi.getArchModule(a)
self.pwidth = self.arch.getPointerSize()
def render(self, mcanv, va):
# This is only used with tracer based stuff...
trace = mcanv.mem
vastr = self.arch.pointerString(va)
# NOTE: we assume the memobj is a trace
trace = mcanv.mem
p = trace.readMemoryFormat(va, 'P')[0]
isptr = trace.isValidPointer(p)
pstr = self.arch.pointerString(p)
mcanv.addVaText(vastr, va=va)
mcanv.addText(": ")
if isptr:
mcanv.addVaText(pstr, p)
else:
mcanv.addText(pstr)
if isptr:
sym = trace.getSymByAddr(p, exact=False)
if sym != None:
mcanv.addText(' %s + %d' % (repr(sym), p-long(sym)))
mcanv.addText('\n')
return self.pwidth
class DerefRenderer(e_canvas.MemoryRenderer):
def __init__(self, trace):
a = trace.getMeta("Architecture")
self.arch = envi.getArchModule(a)
self.pwidth = self.arch.getPointerSize()
def renderData(self, mcanv, va):
vastr = self.arch.pointerString(va)
# NOTE: we assume the memobj is a trace
trace = mcanv.mem
p = trace.readMemoryFormat(va, 'P')[0]
isptr = trace.isValidPointer(p)
pstr = self.arch.pointerString(p)
vareg = ""
preg = ""
regs = trace.getRegisters()
for name,val in regs.items():
if val == 0:
continue
if val == va:
vareg = "(%s)" % name
if val == p:
preg = "(%s)" % name
bt = trace.getStackTrace()
if len(bt) > 1:
for i in range(1, len(bt)):
spc, sfc = bt[i]
if sfc == 0:
break
if spc == 0:
break
if va == spc:
vareg = "(savepc)"
if va == sfc:
vareg = "(frame%d)" % i
if p == spc:
preg = "(savepc)"
if p == sfc:
preg = "(frame%d)" % i
vareg = vareg.ljust(8)
preg = preg.ljust(8)
mcanv.addText(" %s: " % str(va - mcanv._canv_beginva).ljust(5))
mcanv.addVaText(vastr, va=va)
mcanv.addText(" %s: " % vareg)
if isptr:
mcanv.addVaText(pstr, p)
else:
mcanv.addText(pstr)
mcanv.addText(preg)
def renderMetadata(self, mcanv, va):
trace = mcanv.mem
p = trace.readMemoryFormat(va, 'P')[0]
e_canvas_rend.AutoBytesRenderer().render(mcanv, p)
def render(self, mcanv, va):
self.renderData(mcanv, va)
self.renderMetadata(mcanv, va)
mcanv.addText('\n')
return self.arch.getPointerSize()
class StackRenderer(DerefRenderer):
def __init__(self, trace):
DerefRenderer.__init__(self, trace)
def render(self, mcanv, va):
trace = mcanv.mem
if va != trace.getStackCounter():
return DerefRenderer.render(self, mcanv, va)
pc = trace.getProgramCounter()
sym, is_thunk = trace.getSymByAddrThunkAware(pc)
if sym == None:
return DerefRenderer.render(self, mcanv, va)
# TODO: this code also exists in win32stealth and in hookbreakpoint
# we should put this somewhere common
platform = trace.getMeta('Platform')
arch = trace.getMeta('Architecture')
impapi = viv_impapi.getImportApi(platform, arch)
cc_name = impapi.getImpApiCallConv(sym)
emu = vtrace.getEmu(trace)
cc = emu.getCallingConvention(cc_name)
args_def = impapi.getImpApiArgs(sym)
if args_def == None:
# sym did not exist in impapi :(
print('sym but no impapi match: {}'.format(sym))
return DerefRenderer.render(self, mcanv, va)
argc = len(args_def)
curop = trace.parseOpcode(trace.getProgramCounter())
# use the calling convention to retrieve the args
args = None
if curop.isCall() or is_thunk:
args = cc.getPreCallArgs(trace, argc)
else:
args = cc.getCallArgs(trace, argc)
# since we are 'normalizing' the calls by visualizing all calling
# conventions in a stdcall fashion, some args (like the ones in
# registers don't have a stack va.
mcanv.addText('%s :\n' % sym)
fmt = ' arg%%d (%%s) 0x%%0%dx %%s\n' % (trace.getPointerSize()*2,)
for index, arg in enumerate(args):
argtype = args_def[index][0]
argva = arg
if trace.isValidPointer(arg):
argva = trace.readMemoryFormat(arg, 'P')[0]
smc = e_canvas.StringMemoryCanvas(trace)
e_canvas_rend.AutoBytesRenderer(maxrend=64).render(smc, argva)
desc = str(smc)
mcanv.addText(fmt % (index, argtype, arg, desc))
mcanv.addText('-' * 5)
mcanv.addText('\n')
return DerefRenderer.render(self, mcanv, va)
|
{
"content_hash": "d0109d7d52c822d5bf11309b94b29f20",
"timestamp": "",
"source": "github",
"line_count": 263,
"max_line_length": 83,
"avg_line_length": 31.836501901140686,
"alnum_prop": 0.536725188104622,
"repo_name": "imjonsnooow/vivisect",
"id": "75ba4a95fe813bb9f90045f647f405fa0dbc97b0",
"size": "8373",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "vdb/renderers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "11397213"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
}
|
import re
import copy
import numpy as np
from astropy import _erfa as erfa
from astropy.utils.compat.misc import override__dir__
from astropy import units as u
from astropy.constants import c as speed_of_light
from astropy.wcs.utils import skycoord_to_pixel, pixel_to_skycoord
from astropy.utils.data_info import MixinInfo
from astropy.utils import ShapedLikeNDArray
from astropy.time import Time
from .distances import Distance
from .angles import Angle
from .baseframe import (BaseCoordinateFrame, frame_transform_graph,
GenericFrame)
from .builtin_frames import ICRS, SkyOffsetFrame
from .representation import (SphericalRepresentation,
UnitSphericalRepresentation, SphericalDifferential)
from .sky_coordinate_parsers import (_get_frame_class, _get_frame_without_data,
_parse_coordinate_data)
__all__ = ['SkyCoord', 'SkyCoordInfo']
class SkyCoordInfo(MixinInfo):
"""
Container for meta information like name, description, format. This is
required when the object is used as a mixin column within a table, but can
be used as a general way to store meta information.
"""
attrs_from_parent = set(['unit']) # Unit is read-only
_supports_indexing = False
@staticmethod
def default_format(val):
repr_data = val.info._repr_data
formats = ['{0.' + compname + '.value:}' for compname
in repr_data.components]
return ','.join(formats).format(repr_data)
@property
def unit(self):
repr_data = self._repr_data
unit = ','.join(str(getattr(repr_data, comp).unit) or 'None'
for comp in repr_data.components)
return unit
@property
def _repr_data(self):
if self._parent is None:
return None
sc = self._parent
if (issubclass(sc.representation_type, SphericalRepresentation)
and isinstance(sc.data, UnitSphericalRepresentation)):
repr_data = sc.represent_as(sc.data.__class__, in_frame_units=True)
else:
repr_data = sc.represent_as(sc.representation_type,
in_frame_units=True)
return repr_data
def _represent_as_dict(self):
obj = self._parent
attrs = (list(obj.representation_component_names) +
list(frame_transform_graph.frame_attributes.keys()))
# Don't output distance if it is all unitless 1.0
if 'distance' in attrs and np.all(obj.distance == 1.0):
attrs.remove('distance')
out = super()._represent_as_dict(attrs)
out['representation_type'] = obj.representation_type.get_name()
out['frame'] = obj.frame.name
# Note that obj.info.unit is a fake composite unit (e.g. 'deg,deg,None'
# or None,None,m) and is not stored. The individual attributes have
# units.
return out
class SkyCoord(ShapedLikeNDArray):
"""High-level object providing a flexible interface for celestial coordinate
representation, manipulation, and transformation between systems.
The `SkyCoord` class accepts a wide variety of inputs for initialization. At
a minimum these must provide one or more celestial coordinate values with
unambiguous units. Inputs may be scalars or lists/tuples/arrays, yielding
scalar or array coordinates (can be checked via ``SkyCoord.isscalar``).
Typically one also specifies the coordinate frame, though this is not
required. The general pattern for spherical representations is::
SkyCoord(COORD, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [FRAME], keyword_args ...)
SkyCoord(LON, LAT, [DISTANCE], frame=FRAME, unit=UNIT, keyword_args ...)
SkyCoord([FRAME], <lon_attr>=LON, <lat_attr>=LAT, keyword_args ...)
It is also possible to input coordinate values in other representations
such as cartesian or cylindrical. In this case one includes the keyword
argument ``representation_type='cartesian'`` (for example) along with data
in ``x``, ``y``, and ``z``.
See also: http://docs.astropy.org/en/stable/coordinates/
Examples
--------
The examples below illustrate common ways of initializing a `SkyCoord`
object. For a complete description of the allowed syntax see the
full coordinates documentation. First some imports::
>>> from astropy.coordinates import SkyCoord # High-level coordinates
>>> from astropy.coordinates import ICRS, Galactic, FK4, FK5 # Low-level frames
>>> from astropy.coordinates import Angle, Latitude, Longitude # Angles
>>> import astropy.units as u
The coordinate values and frame specification can now be provided using
positional and keyword arguments::
>>> c = SkyCoord(10, 20, unit="deg") # defaults to ICRS frame
>>> c = SkyCoord([1, 2, 3], [-30, 45, 8], frame="icrs", unit="deg") # 3 coords
>>> coords = ["1:12:43.2 +1:12:43", "1 12 43.2 +1 12 43"]
>>> c = SkyCoord(coords, frame=FK4, unit=(u.deg, u.hourangle), obstime="J1992.21")
>>> c = SkyCoord("1h12m43.2s +1d12m43s", frame=Galactic) # Units from string
>>> c = SkyCoord(frame="galactic", l="1h12m43.2s", b="+1d12m43s")
>>> ra = Longitude([1, 2, 3], unit=u.deg) # Could also use Angle
>>> dec = np.array([4.5, 5.2, 6.3]) * u.deg # Astropy Quantity
>>> c = SkyCoord(ra, dec, frame='icrs')
>>> c = SkyCoord(frame=ICRS, ra=ra, dec=dec, obstime='2001-01-02T12:34:56')
>>> c = FK4(1 * u.deg, 2 * u.deg) # Uses defaults for obstime, equinox
>>> c = SkyCoord(c, obstime='J2010.11', equinox='B1965') # Override defaults
>>> c = SkyCoord(w=0, u=1, v=2, unit='kpc', frame='galactic',
... representation_type='cartesian')
>>> c = SkyCoord([ICRS(ra=1*u.deg, dec=2*u.deg), ICRS(ra=3*u.deg, dec=4*u.deg)])
Velocity components (proper motions or radial velocities) can also be
provided in a similar manner::
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, radial_velocity=10*u.km/u.s)
>>> c = SkyCoord(ra=1*u.deg, dec=2*u.deg, pm_ra_cosdec=2*u.mas/u.yr, pm_dec=1*u.mas/u.yr)
As shown, the frame can be a `~astropy.coordinates.BaseCoordinateFrame`
class or the corresponding string alias. The frame classes that are built in
to astropy are `ICRS`, `FK5`, `FK4`, `FK4NoETerms`, and `Galactic`.
The string aliases are simply lower-case versions of the class name, and
allow for creating a `SkyCoord` object and transforming frames without
explicitly importing the frame classes.
Parameters
----------
frame : `~astropy.coordinates.BaseCoordinateFrame` class or string, optional
Type of coordinate frame this `SkyCoord` should represent. Defaults to
to ICRS if not given or given as None.
unit : `~astropy.units.Unit`, string, or tuple of :class:`~astropy.units.Unit` or str, optional
Units for supplied ``LON`` and ``LAT`` values, respectively. If
only one unit is supplied then it applies to both ``LON`` and
``LAT``.
obstime : valid `~astropy.time.Time` initializer, optional
Time(s) of observation.
equinox : valid `~astropy.time.Time` initializer, optional
Coordinate frame equinox.
representation_type : str or Representation class
Specifies the representation, e.g. 'spherical', 'cartesian', or
'cylindrical'. This affects the positional args and other keyword args
which must correspond to the given representation.
copy : bool, optional
If `True` (default), a copy of any coordinate data is made. This
argument can only be passed in as a keyword argument.
**keyword_args
Other keyword arguments as applicable for user-defined coordinate frames.
Common options include:
ra, dec : valid `~astropy.coordinates.Angle` initializer, optional
RA and Dec for frames where ``ra`` and ``dec`` are keys in the
frame's ``representation_component_names``, including `ICRS`,
`FK5`, `FK4`, and `FK4NoETerms`.
pm_ra_cosdec, pm_dec : `~astropy.units.Quantity`, optional
Proper motion components, in angle per time units.
l, b : valid `~astropy.coordinates.Angle` initializer, optional
Galactic ``l`` and ``b`` for for frames where ``l`` and ``b`` are
keys in the frame's ``representation_component_names``, including
the `Galactic` frame.
pm_l_cosb, pm_b : `~astropy.units.Quantity`, optional
Proper motion components in the `Galactic` frame, in angle per time
units.
x, y, z : float or `~astropy.units.Quantity`, optional
Cartesian coordinates values
u, v, w : float or `~astropy.units.Quantity`, optional
Cartesian coordinates values for the Galactic frame.
radial_velocity : `~astropy.units.Quantity`, optional
The component of the velocity along the line-of-sight (i.e., the
radial direction), in velocity units.
"""
# Declare that SkyCoord can be used as a Table column by defining the
# info property.
info = SkyCoordInfo()
def __init__(self, *args, copy=True, **kwargs):
# these are frame attributes set on this SkyCoord but *not* a part of
# the frame object this SkyCoord contains
self._extra_frameattr_names = set()
# If all that is passed in is a frame instance that already has data,
# we should bypass all of the parsing and logic below. This is here
# to make this the fastest way to create a SkyCoord instance. Many of
# the classmethods implemented for performance enhancements will use
# this as the initialization path
if (len(args) == 1 and len(kwargs) == 0
and isinstance(args[0], (BaseCoordinateFrame, SkyCoord))):
coords = args[0]
if isinstance(coords, SkyCoord):
self._extra_frameattr_names = coords._extra_frameattr_names
self.info = coords.info
# Copy over any extra frame attributes
for attr_name in self._extra_frameattr_names:
# Setting it will also validate it.
setattr(self, attr_name, getattr(coords, attr_name))
coords = coords.frame
if not coords.has_data:
raise ValueError('Cannot initialize from a coordinate frame '
'instance without coordinate data')
if copy:
self._sky_coord_frame = coords.copy()
else:
self._sky_coord_frame = coords
else:
# Get the frame instance without coordinate data but with all frame
# attributes set - these could either have been passed in with the
# frame as an instance, or passed in as kwargs here
frame_cls, frame_kwargs = _get_frame_without_data(args, kwargs)
# Parse the args and kwargs to assemble a sanitized and validated
# kwargs dict for initializing attributes for this object and for
# creating the internal self._sky_coord_frame object
args = list(args) # Make it mutable
skycoord_kwargs, components, info = _parse_coordinate_data(
frame_cls(**frame_kwargs), args, kwargs)
# In the above two parsing functions, these kwargs were identified
# as valid frame attributes for *some* frame, but not the frame that
# this SkyCoord will have. We keep these attributes as special
# skycoord frame attributes:
for attr in skycoord_kwargs:
# Setting it will also validate it.
setattr(self, attr, skycoord_kwargs[attr])
if info is not None:
self.info = info
# Finally make the internal coordinate object.
frame_kwargs.update(components)
self._sky_coord_frame = frame_cls(copy=copy, **frame_kwargs)
if not self._sky_coord_frame.has_data:
raise ValueError('Cannot create a SkyCoord without data')
@property
def frame(self):
return self._sky_coord_frame
@property
def representation_type(self):
return self.frame.representation_type
@representation_type.setter
def representation_type(self, value):
self.frame.representation_type = value
# TODO: remove these in future
@property
def representation(self):
return self.frame.representation
@representation.setter
def representation(self, value):
self.frame.representation = value
@property
def shape(self):
return self.frame.shape
def _apply(self, method, *args, **kwargs):
"""Create a new instance, applying a method to the underlying data.
In typical usage, the method is any of the shape-changing methods for
`~numpy.ndarray` (``reshape``, ``swapaxes``, etc.), as well as those
picking particular elements (``__getitem__``, ``take``, etc.), which
are all defined in `~astropy.utils.misc.ShapedLikeNDArray`. It will be
applied to the underlying arrays in the representation (e.g., ``x``,
``y``, and ``z`` for `~astropy.coordinates.CartesianRepresentation`),
as well as to any frame attributes that have a shape, with the results
used to create a new instance.
Internally, it is also used to apply functions to the above parts
(in particular, `~numpy.broadcast_to`).
Parameters
----------
method : str or callable
If str, it is the name of a method that is applied to the internal
``components``. If callable, the function is applied.
args : tuple
Any positional arguments for ``method``.
kwargs : dict
Any keyword arguments for ``method``.
"""
def apply_method(value):
if isinstance(value, ShapedLikeNDArray):
return value._apply(method, *args, **kwargs)
else:
if callable(method):
return method(value, *args, **kwargs)
else:
return getattr(value, method)(*args, **kwargs)
# create a new but empty instance, and copy over stuff
new = super().__new__(self.__class__)
new._sky_coord_frame = self._sky_coord_frame._apply(method,
*args, **kwargs)
new._extra_frameattr_names = self._extra_frameattr_names.copy()
for attr in self._extra_frameattr_names:
value = getattr(self, attr)
if getattr(value, 'size', 1) > 1:
value = apply_method(value)
elif method == 'copy' or method == 'flatten':
# flatten should copy also for a single element array, but
# we cannot use it directly for array scalars, since it
# always returns a one-dimensional array. So, just copy.
value = copy.copy(value)
setattr(new, '_' + attr, value)
# Copy other 'info' attr only if it has actually been defined.
# See PR #3898 for further explanation and justification, along
# with Quantity.__array_finalize__
if 'info' in self.__dict__:
new.info = self.info
return new
def transform_to(self, frame, merge_attributes=True):
"""Transform this coordinate to a new frame.
The precise frame transformed to depends on ``merge_attributes``.
If `False`, the destination frame is used exactly as passed in.
But this is often not quite what one wants. E.g., suppose one wants to
transform an ICRS coordinate that has an obstime attribute to FK4; in
this case, one likely would want to use this information. Thus, the
default for ``merge_attributes`` is `True`, in which the precedence is
as follows: (1) explicitly set (i.e., non-default) values in the
destination frame; (2) explicitly set values in the source; (3) default
value in the destination frame.
Note that in either case, any explicitly set attributes on the source
`SkyCoord` that are not part of the destination frame's definition are
kept (stored on the resulting `SkyCoord`), and thus one can round-trip
(e.g., from FK4 to ICRS to FK4 without loosing obstime).
Parameters
----------
frame : str, `BaseCoordinateFrame` class or instance, or `SkyCoord` instance
The frame to transform this coordinate into. If a `SkyCoord`, the
underlying frame is extracted, and all other information ignored.
merge_attributes : bool, optional
Whether the default attributes in the destination frame are allowed
to be overridden by explicitly set attributes in the source
(see note above; default: `True`).
Returns
-------
coord : `SkyCoord`
A new object with this coordinate represented in the `frame` frame.
Raises
------
ValueError
If there is no possible transformation route.
"""
from astropy.coordinates.errors import ConvertError
frame_kwargs = {}
# Frame name (string) or frame class? Coerce into an instance.
try:
frame = _get_frame_class(frame)()
except Exception:
pass
if isinstance(frame, SkyCoord):
frame = frame.frame # Change to underlying coord frame instance
if isinstance(frame, BaseCoordinateFrame):
new_frame_cls = frame.__class__
# Get frame attributes, allowing defaults to be overridden by
# explicitly set attributes of the source if ``merge_attributes``.
for attr in frame_transform_graph.frame_attributes:
self_val = getattr(self, attr, None)
frame_val = getattr(frame, attr, None)
if (frame_val is not None
and not (merge_attributes
and frame.is_frame_attr_default(attr))):
frame_kwargs[attr] = frame_val
elif (self_val is not None
and not self.is_frame_attr_default(attr)):
frame_kwargs[attr] = self_val
elif frame_val is not None:
frame_kwargs[attr] = frame_val
else:
raise ValueError('Transform `frame` must be a frame name, class, or instance')
# Get the composite transform to the new frame
trans = frame_transform_graph.get_transform(self.frame.__class__, new_frame_cls)
if trans is None:
raise ConvertError('Cannot transform from {} to {}'
.format(self.frame.__class__, new_frame_cls))
# Make a generic frame which will accept all the frame kwargs that
# are provided and allow for transforming through intermediate frames
# which may require one or more of those kwargs.
generic_frame = GenericFrame(frame_kwargs)
# Do the transformation, returning a coordinate frame of the desired
# final type (not generic).
new_coord = trans(self.frame, generic_frame)
# Finally make the new SkyCoord object from the `new_coord` and
# remaining frame_kwargs that are not frame_attributes in `new_coord`.
for attr in (set(new_coord.get_frame_attr_names()) &
set(frame_kwargs.keys())):
frame_kwargs.pop(attr)
return self.__class__(new_coord, **frame_kwargs)
def apply_space_motion(self, new_obstime=None, dt=None):
"""
Compute the position of the source represented by this coordinate object
to a new time using the velocities stored in this object and assuming
linear space motion (including relativistic corrections). This is
sometimes referred to as an "epoch transformation."
The initial time before the evolution is taken from the ``obstime``
attribute of this coordinate. Note that this method currently does not
support evolving coordinates where the *frame* has an ``obstime`` frame
attribute, so the ``obstime`` is only used for storing the before and
after times, not actually as an attribute of the frame. Alternatively,
if ``dt`` is given, an ``obstime`` need not be provided at all.
Parameters
----------
new_obstime : `~astropy.time.Time`, optional
The time at which to evolve the position to. Requires that the
``obstime`` attribute be present on this frame.
dt : `~astropy.units.Quantity`, `~astropy.time.TimeDelta`, optional
An amount of time to evolve the position of the source. Cannot be
given at the same time as ``new_obstime``.
Returns
-------
new_coord : `SkyCoord`
A new coordinate object with the evolved location of this coordinate
at the new time. ``obstime`` will be set on this object to the new
time only if ``self`` also has ``obstime``.
"""
if (new_obstime is None and dt is None or
new_obstime is not None and dt is not None):
raise ValueError("You must specify one of `new_obstime` or `dt`, "
"but not both.")
# Validate that we have velocity info
if 's' not in self.frame.data.differentials:
raise ValueError('SkyCoord requires velocity data to evolve the '
'position.')
if 'obstime' in self.frame.frame_attributes:
raise NotImplementedError("Updating the coordinates in a frame "
"with explicit time dependence is "
"currently not supported. If you would "
"like this functionality, please open an "
"issue on github:\n"
"https://github.com/astropy/astropy")
if new_obstime is not None and self.obstime is None:
# If no obstime is already on this object, raise an error if a new
# obstime is passed: we need to know the time / epoch at which the
# the position / velocity were measured initially
raise ValueError('This object has no associated `obstime`. '
'apply_space_motion() must receive a time '
'difference, `dt`, and not a new obstime.')
# Compute t1 and t2, the times used in the starpm call, which *only*
# uses them to compute a delta-time
t1 = self.obstime
if dt is None:
# self.obstime is not None and new_obstime is not None b/c of above
# checks
t2 = new_obstime
else:
# new_obstime is definitely None b/c of the above checks
if t1 is None:
# MAGIC NUMBER: if the current SkyCoord object has no obstime,
# assume J2000 to do the dt offset. This is not actually used
# for anything except a delta-t in starpm, so it's OK that it's
# not necessarily the "real" obstime
t1 = Time('J2000')
new_obstime = None # we don't actually know the inital obstime
t2 = t1 + dt
else:
t2 = t1 + dt
new_obstime = t2
# starpm wants tdb time
t1 = t1.tdb
t2 = t2.tdb
# proper motion in RA should not include the cos(dec) term, see the
# erfa function eraStarpv, comment (4). So we convert to the regular
# spherical differentials.
icrsrep = self.icrs.represent_as(SphericalRepresentation, SphericalDifferential)
icrsvel = icrsrep.differentials['s']
try:
plx = icrsrep.distance.to_value(u.arcsecond, u.parallax())
except u.UnitConversionError: # No distance: set to 0 by convention
plx = 0.
try:
rv = icrsvel.d_distance.to_value(u.km/u.s)
except u.UnitConversionError: # No RV
rv = 0.
starpm = erfa.starpm(icrsrep.lon.radian, icrsrep.lat.radian,
icrsvel.d_lon.to_value(u.radian/u.yr),
icrsvel.d_lat.to_value(u.radian/u.yr),
plx, rv, t1.jd1, t1.jd2, t2.jd1, t2.jd2)
icrs2 = ICRS(ra=u.Quantity(starpm[0], u.radian, copy=False),
dec=u.Quantity(starpm[1], u.radian, copy=False),
pm_ra=u.Quantity(starpm[2], u.radian/u.yr, copy=False),
pm_dec=u.Quantity(starpm[3], u.radian/u.yr, copy=False),
distance=Distance(parallax=starpm[4] * u.arcsec, copy=False),
radial_velocity=u.Quantity(starpm[5], u.km/u.s, copy=False),
differential_type=SphericalDifferential)
# Update the obstime of the returned SkyCoord, and need to carry along
# the frame attributes
frattrs = {attrnm: getattr(self, attrnm)
for attrnm in self._extra_frameattr_names}
frattrs['obstime'] = new_obstime
return self.__class__(icrs2, **frattrs).transform_to(self.frame)
def _is_name(self, string):
"""
Returns whether a string is one of the aliases for the frame.
"""
return (self.frame.name == string or
(isinstance(self.frame.name, list) and string in self.frame.name))
def __getattr__(self, attr):
"""
Overrides getattr to return coordinates that this can be transformed
to, based on the alias attr in the master transform graph.
"""
if '_sky_coord_frame' in self.__dict__:
if self._is_name(attr):
return self # Should this be a deepcopy of self?
# Anything in the set of all possible frame_attr_names is handled
# here. If the attr is relevant for the current frame then delegate
# to self.frame otherwise get it from self._<attr>.
if attr in frame_transform_graph.frame_attributes:
if attr in self.frame.get_frame_attr_names():
return getattr(self.frame, attr)
else:
return getattr(self, '_' + attr, None)
# Some attributes might not fall in the above category but still
# are available through self._sky_coord_frame.
if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):
return getattr(self._sky_coord_frame, attr)
# Try to interpret as a new frame for transforming.
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
return self.transform_to(attr)
# Fail
raise AttributeError("'{}' object has no attribute '{}'"
.format(self.__class__.__name__, attr))
def __setattr__(self, attr, val):
# This is to make anything available through __getattr__ immutable
if '_sky_coord_frame' in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith('_') and hasattr(self._sky_coord_frame, attr):
setattr(self._sky_coord_frame, attr, val)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be set, but only via a private
# variable. See __getattr__ above.
super().__setattr__('_' + attr, val)
# Validate it
frame_transform_graph.frame_attributes[attr].__get__(self)
# And add to set of extra attributes
self._extra_frameattr_names |= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__setattr__(attr, val)
def __delattr__(self, attr):
# mirror __setattr__ above
if '_sky_coord_frame' in self.__dict__:
if self._is_name(attr):
raise AttributeError(f"'{attr}' is immutable")
if not attr.startswith('_') and hasattr(self._sky_coord_frame,
attr):
delattr(self._sky_coord_frame, attr)
return
frame_cls = frame_transform_graph.lookup_name(attr)
if frame_cls is not None and self.frame.is_transformable_to(frame_cls):
raise AttributeError(f"'{attr}' is immutable")
if attr in frame_transform_graph.frame_attributes:
# All possible frame attributes can be deleted, but need to remove
# the corresponding private variable. See __getattr__ above.
super().__delattr__('_' + attr)
# Also remove it from the set of extra attributes
self._extra_frameattr_names -= {attr}
else:
# Otherwise, do the standard Python attribute setting
super().__delattr__(attr)
@override__dir__
def __dir__(self):
"""
Override the builtin `dir` behavior to include:
- Transforms available by aliases
- Attribute / methods of the underlying self.frame object
"""
# determine the aliases that this can be transformed to.
dir_values = set()
for name in frame_transform_graph.get_names():
frame_cls = frame_transform_graph.lookup_name(name)
if self.frame.is_transformable_to(frame_cls):
dir_values.add(name)
# Add public attributes of self.frame
dir_values.update(set(attr for attr in dir(self.frame) if not attr.startswith('_')))
# Add all possible frame attributes
dir_values.update(frame_transform_graph.frame_attributes.keys())
return dir_values
def __repr__(self):
clsnm = self.__class__.__name__
coonm = self.frame.__class__.__name__
frameattrs = self.frame._frame_attrs_repr()
if frameattrs:
frameattrs = ': ' + frameattrs
data = self.frame._data_repr()
if data:
data = ': ' + data
return '<{clsnm} ({coonm}{frameattrs}){data}>'.format(**locals())
def to_string(self, style='decimal', **kwargs):
"""
A string representation of the coordinates.
The default styles definitions are::
'decimal': 'lat': {'decimal': True, 'unit': "deg"}
'lon': {'decimal': True, 'unit': "deg"}
'dms': 'lat': {'unit': "deg"}
'lon': {'unit': "deg"}
'hmsdms': 'lat': {'alwayssign': True, 'pad': True, 'unit': "deg"}
'lon': {'pad': True, 'unit': "hour"}
See :meth:`~astropy.coordinates.Angle.to_string` for details and
keyword arguments (the two angles forming the coordinates are are
both :class:`~astropy.coordinates.Angle` instances). Keyword
arguments have precedence over the style defaults and are passed
to :meth:`~astropy.coordinates.Angle.to_string`.
Parameters
----------
style : {'hmsdms', 'dms', 'decimal'}
The formatting specification to use. These encode the three most
common ways to represent coordinates. The default is `decimal`.
kwargs
Keyword args passed to :meth:`~astropy.coordinates.Angle.to_string`.
"""
sph_coord = self.frame.represent_as(SphericalRepresentation)
styles = {'hmsdms': {'lonargs': {'unit': u.hour, 'pad': True},
'latargs': {'unit': u.degree, 'pad': True, 'alwayssign': True}},
'dms': {'lonargs': {'unit': u.degree},
'latargs': {'unit': u.degree}},
'decimal': {'lonargs': {'unit': u.degree, 'decimal': True},
'latargs': {'unit': u.degree, 'decimal': True}}
}
lonargs = {}
latargs = {}
if style in styles:
lonargs.update(styles[style]['lonargs'])
latargs.update(styles[style]['latargs'])
else:
raise ValueError('Invalid style. Valid options are: {}'.format(",".join(styles)))
lonargs.update(kwargs)
latargs.update(kwargs)
if np.isscalar(sph_coord.lon.value):
coord_string = (sph_coord.lon.to_string(**lonargs) +
" " + sph_coord.lat.to_string(**latargs))
else:
coord_string = []
for lonangle, latangle in zip(sph_coord.lon.ravel(), sph_coord.lat.ravel()):
coord_string += [(lonangle.to_string(**lonargs) +
" " + latangle.to_string(**latargs))]
if len(sph_coord.shape) > 1:
coord_string = np.array(coord_string).reshape(sph_coord.shape)
return coord_string
def is_equivalent_frame(self, other):
"""
Checks if this object's frame as the same as that of the ``other``
object.
To be the same frame, two objects must be the same frame class and have
the same frame attributes. For two `SkyCoord` objects, *all* of the
frame attributes have to match, not just those relevant for the object's
frame.
Parameters
----------
other : SkyCoord or BaseCoordinateFrame
The other object to check.
Returns
-------
isequiv : bool
True if the frames are the same, False if not.
Raises
------
TypeError
If ``other`` isn't a `SkyCoord` or a `BaseCoordinateFrame` or subclass.
"""
if isinstance(other, BaseCoordinateFrame):
return self.frame.is_equivalent_frame(other)
elif isinstance(other, SkyCoord):
if other.frame.name != self.frame.name:
return False
for fattrnm in frame_transform_graph.frame_attributes:
if np.any(getattr(self, fattrnm) != getattr(other, fattrnm)):
return False
return True
else:
# not a BaseCoordinateFrame nor a SkyCoord object
raise TypeError("Tried to do is_equivalent_frame on something that "
"isn't frame-like")
# High-level convenience methods
def separation(self, other):
"""
Computes on-sky separation between this coordinate and another.
.. note::
If the ``other`` coordinate object is in a different frame, it is
first transformed to the frame of this object. This can lead to
unintuitive behavior if not accounted for. Particularly of note is
that ``self.separation(other)`` and ``other.separation(self)`` may
not give the same answer in this case.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Angle`
The on-sky separation between this and the ``other`` coordinate.
Notes
-----
The separation is calculated using the Vincenty formula, which
is stable at all locations, including poles and antipodes [1]_.
.. [1] https://en.wikipedia.org/wiki/Great-circle_distance
"""
from . import Angle
from .angle_utilities import angular_separation
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get separation to another SkyCoord '
'or a coordinate frame with data')
lon1 = self.spherical.lon
lat1 = self.spherical.lat
lon2 = other.spherical.lon
lat2 = other.spherical.lat
# Get the separation as a Quantity, convert to Angle in degrees
sep = angular_separation(lon1, lat1, lon2, lat2)
return Angle(sep, unit=u.degree)
def separation_3d(self, other):
"""
Computes three dimensional separation between this coordinate
and another.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
other : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to get the separation to.
Returns
-------
sep : `~astropy.coordinates.Distance`
The real-space distance between these two coordinates.
Raises
------
ValueError
If this or the other coordinate do not have distances.
"""
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get separation to another SkyCoord '
'or a coordinate frame with data')
if issubclass(self.data.__class__, UnitSphericalRepresentation):
raise ValueError('This object does not have a distance; cannot '
'compute 3d separation.')
if issubclass(other.data.__class__, UnitSphericalRepresentation):
raise ValueError('The other object does not have a distance; '
'cannot compute 3d separation.')
c1 = self.cartesian.without_differentials()
c2 = other.cartesian.without_differentials()
return Distance((c1 - c2).norm())
def spherical_offsets_to(self, tocoord):
r"""
Computes angular offsets to go *from* this coordinate *to* another.
Parameters
----------
tocoord : `~astropy.coordinates.BaseCoordinateFrame`
The coordinate to find the offset to.
Returns
-------
lon_offset : `~astropy.coordinates.Angle`
The angular offset in the longitude direction (i.e., RA for
equatorial coordinates).
lat_offset : `~astropy.coordinates.Angle`
The angular offset in the latitude direction (i.e., Dec for
equatorial coordinates).
Raises
------
ValueError
If the ``tocoord`` is not in the same frame as this one. This is
different from the behavior of the `separation`/`separation_3d`
methods because the offset components depend critically on the
specific choice of frame.
Notes
-----
This uses the sky offset frame machinery, and hence will produce a new
sky offset frame if one does not already exist for this object's frame
class.
See Also
--------
separation : for the *total* angular offset (not broken out into components).
position_angle : for the direction of the offset.
"""
if not self.is_equivalent_frame(tocoord):
raise ValueError('Tried to use spherical_offsets_to with two non-matching frames!')
aframe = self.skyoffset_frame()
acoord = tocoord.transform_to(aframe)
dlon = acoord.spherical.lon.view(Angle)
dlat = acoord.spherical.lat.view(Angle)
return dlon, dlat
def directional_offset_by(self, position_angle, separation):
"""
Computes coordinates at the given offset from this coordinate.
Parameters
----------
position_angle : `~astropy.coordinates.Angle`
position_angle of offset
separation : `~astropy.coordinates.Angle`
offset angular separation
Returns
-------
newpoints : `~astropy.coordinates.SkyCoord`
The coordinates for the location that corresponds to offsetting by
the given `position_angle` and `separation`.
Notes
-----
Returned SkyCoord frame retains only the frame attributes that are for
the resulting frame type. (e.g. if the input frame is
`~astropy.coordinates.ICRS`, an ``equinox`` value will be retained, but
an ``obstime`` will not.)
For a more complete set of transform offsets, use `~astropy.wcs.WCS`.
`~astropy.coordinates.SkyCoord.skyoffset_frame()` can also be used to
create a spherical frame with (lat=0, lon=0) at a reference point,
approximating an xy cartesian system for small offsets. This method
is distinct in that it is accurate on the sphere.
See Also
--------
position_angle : inverse operation for the ``position_angle`` component
separation : inverse operation for the ``separation`` component
"""
from . import angle_utilities
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
newlon, newlat = angle_utilities.offset_by(
lon=slon, lat=slat,
posang=position_angle, distance=separation)
return SkyCoord(newlon, newlat, frame=self.frame)
def match_to_catalog_sky(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest on-sky matches of this coordinate in a set of
catalog coordinates.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is ``2``,
for matching a coordinate catalog against *itself* (``1``
is inappropriate because each point will find itself as the
closest match).
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object. Unless both this and ``catalogcoord`` have associated
distances, this quantity assumes that all sources are at a
distance of 1 (dimensionless).
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_sky
SkyCoord.match_to_catalog_3d
"""
from .matching import match_coordinates_sky
if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data):
self_in_catalog_frame = self.transform_to(catalogcoord)
else:
raise TypeError('Can only get separation to another SkyCoord or a '
'coordinate frame with data')
res = match_coordinates_sky(self_in_catalog_frame, catalogcoord,
nthneighbor=nthneighbor,
storekdtree='_kdtree_sky')
return res
def match_to_catalog_3d(self, catalogcoord, nthneighbor=1):
"""
Finds the nearest 3-dimensional matches of this coordinate to a set
of catalog coordinates.
This finds the 3-dimensional closest neighbor, which is only different
from the on-sky distance if ``distance`` is set in this object or the
``catalogcoord`` object.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
catalogcoord : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The base catalog in which to search for matches. Typically this
will be a coordinate object that is an array (i.e.,
``catalogcoord.isscalar == False``)
nthneighbor : int, optional
Which closest neighbor to search for. Typically ``1`` is
desired here, as that is correct for matching one set of
coordinates to another. The next likely use case is
``2``, for matching a coordinate catalog against *itself*
(``1`` is inappropriate because each point will find
itself as the closest match).
Returns
-------
idx : integer array
Indices into ``catalogcoord`` to get the matched points for
each of this object's coordinates. Shape matches this
object.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the closest match for each
element in this object in ``catalogcoord``. Shape matches
this object.
dist3d : `~astropy.units.Quantity`
The 3D distance between the closest match for each element
in this object in ``catalogcoord``. Shape matches this
object.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ to be
installed or it will fail.
See Also
--------
astropy.coordinates.match_coordinates_3d
SkyCoord.match_to_catalog_sky
"""
from .matching import match_coordinates_3d
if (isinstance(catalogcoord, (SkyCoord, BaseCoordinateFrame))
and catalogcoord.has_data):
self_in_catalog_frame = self.transform_to(catalogcoord)
else:
raise TypeError('Can only get separation to another SkyCoord or a '
'coordinate frame with data')
res = match_coordinates_3d(self_in_catalog_frame, catalogcoord,
nthneighbor=nthneighbor,
storekdtree='_kdtree_3d')
return res
def search_around_sky(self, searcharoundcoords, seplimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given on-sky separation.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation`.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
seplimit : `~astropy.units.Quantity` with angle units
The on-sky separation to search within.
Returns
-------
idxsearcharound : integer array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : integer array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0) to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_sky
SkyCoord.search_around_3d
"""
from .matching import search_around_sky
return search_around_sky(searcharoundcoords, self, seplimit,
storekdtree='_kdtree_sky')
def search_around_3d(self, searcharoundcoords, distlimit):
"""
Searches for all coordinates in this object around a supplied set of
points within a given 3D radius.
This is intended for use on `~astropy.coordinates.SkyCoord` objects
with coordinate arrays, rather than a scalar coordinate. For a scalar
coordinate, it is better to use
`~astropy.coordinates.SkyCoord.separation_3d`.
For more on how to use this (and related) functionality, see the
examples in :doc:`/coordinates/matchsep`.
Parameters
----------
searcharoundcoords : `~astropy.coordinates.SkyCoord` or `~astropy.coordinates.BaseCoordinateFrame`
The coordinates to search around to try to find matching points in
this `SkyCoord`. This should be an object with array coordinates,
not a scalar coordinate object.
distlimit : `~astropy.units.Quantity` with distance units
The physical radius to search within.
Returns
-------
idxsearcharound : integer array
Indices into ``searcharoundcoords`` that match the
corresponding elements of ``idxself``. Shape matches
``idxself``.
idxself : integer array
Indices into ``self`` that match the
corresponding elements of ``idxsearcharound``. Shape matches
``idxsearcharound``.
sep2d : `~astropy.coordinates.Angle`
The on-sky separation between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
dist3d : `~astropy.units.Quantity`
The 3D distance between the coordinates. Shape matches
``idxsearcharound`` and ``idxself``.
Notes
-----
This method requires `SciPy <https://www.scipy.org/>`_ (>=0.12.0) to be
installed or it will fail.
In the current implementation, the return values are always sorted in
the same order as the ``searcharoundcoords`` (so ``idxsearcharound`` is
in ascending order). This is considered an implementation detail,
though, so it could change in a future release.
See Also
--------
astropy.coordinates.search_around_3d
SkyCoord.search_around_sky
"""
from .matching import search_around_3d
return search_around_3d(searcharoundcoords, self, distlimit,
storekdtree='_kdtree_3d')
def position_angle(self, other):
"""
Computes the on-sky position angle (East of North) between this
`SkyCoord` and another.
Parameters
----------
other : `SkyCoord`
The other coordinate to compute the position angle to. It is
treated as the "head" of the vector of the position angle.
Returns
-------
pa : `~astropy.coordinates.Angle`
The (positive) position angle of the vector pointing from ``self``
to ``other``. If either ``self`` or ``other`` contain arrays, this
will be an array following the appropriate `numpy` broadcasting
rules.
Examples
--------
>>> c1 = SkyCoord(0*u.deg, 0*u.deg)
>>> c2 = SkyCoord(1*u.deg, 0*u.deg)
>>> c1.position_angle(c2).degree
90.0
>>> c3 = SkyCoord(1*u.deg, 1*u.deg)
>>> c1.position_angle(c3).degree # doctest: +FLOAT_CMP
44.995636455344844
"""
from . import angle_utilities
if not self.is_equivalent_frame(other):
try:
other = other.transform_to(self, merge_attributes=False)
except TypeError:
raise TypeError('Can only get position_angle to another '
'SkyCoord or a coordinate frame with data')
slat = self.represent_as(UnitSphericalRepresentation).lat
slon = self.represent_as(UnitSphericalRepresentation).lon
olat = other.represent_as(UnitSphericalRepresentation).lat
olon = other.represent_as(UnitSphericalRepresentation).lon
return angle_utilities.position_angle(slon, slat, olon, olat)
def skyoffset_frame(self, rotation=None):
"""
Returns the sky offset frame with this `SkyCoord` at the origin.
Returns
-------
astrframe : `~astropy.coordinates.SkyOffsetFrame`
A sky offset frame of the same type as this `SkyCoord` (e.g., if
this object has an ICRS coordinate, the resulting frame is
SkyOffsetICRS, with the origin set to this object)
rotation : `~astropy.coordinates.Angle` or `~astropy.units.Quantity` with angle units
The final rotation of the frame about the ``origin``. The sign of
the rotation is the left-hand rule. That is, an object at a
particular position angle in the un-rotated system will be sent to
the positive latitude (z) direction in the final frame.
"""
return SkyOffsetFrame(origin=self, rotation=rotation)
def get_constellation(self, short_name=False, constellation_list='iau'):
"""
Determines the constellation(s) of the coordinates this `SkyCoord`
contains.
Parameters
----------
short_name : bool
If True, the returned names are the IAU-sanctioned abbreviated
names. Otherwise, full names for the constellations are used.
constellation_list : str
The set of constellations to use. Currently only ``'iau'`` is
supported, meaning the 88 "modern" constellations endorsed by the IAU.
Returns
-------
constellation : str or string array
If this is a scalar coordinate, returns the name of the
constellation. If it is an array `SkyCoord`, it returns an array of
names.
Notes
-----
To determine which constellation a point on the sky is in, this first
precesses to B1875, and then uses the Delporte boundaries of the 88
modern constellations, as tabulated by
`Roman 1987 <http://cdsarc.u-strasbg.fr/viz-bin/Cat?VI/42>`_.
See Also
--------
astropy.coordinates.get_constellation
"""
from .funcs import get_constellation
# because of issue #7028, the conversion to a PrecessedGeocentric
# system fails in some cases. Work around is to drop the velocities.
# they are not needed here since only position infromation is used
extra_frameattrs = {nm: getattr(self, nm)
for nm in self._extra_frameattr_names}
novel = SkyCoord(self.realize_frame(self.data.without_differentials()),
**extra_frameattrs)
return get_constellation(novel, short_name, constellation_list)
# the simpler version below can be used when gh-issue #7028 is resolved
# return get_constellation(self, short_name, constellation_list)
# WCS pixel to/from sky conversions
def to_pixel(self, wcs, origin=0, mode='all'):
"""
Convert this coordinate to pixel coordinates using a `~astropy.wcs.WCS`
object.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
xp, yp : `numpy.ndarray`
The pixel coordinates
See Also
--------
astropy.wcs.utils.skycoord_to_pixel : the implementation of this method
"""
return skycoord_to_pixel(self, wcs=wcs, origin=origin, mode=mode)
@classmethod
def from_pixel(cls, xp, yp, wcs, origin=0, mode='all'):
"""
Create a new `SkyCoord` from pixel coordinates using an
`~astropy.wcs.WCS` object.
Parameters
----------
xp, yp : float or `numpy.ndarray`
The coordinates to convert.
wcs : `~astropy.wcs.WCS`
The WCS to use for convert
origin : int
Whether to return 0 or 1-based pixel coordinates.
mode : 'all' or 'wcs'
Whether to do the transformation including distortions (``'all'``) or
only including only the core WCS transformation (``'wcs'``).
Returns
-------
coord : an instance of this class
A new object with sky coordinates corresponding to the input ``xp``
and ``yp``.
See Also
--------
to_pixel : to do the inverse operation
astropy.wcs.utils.pixel_to_skycoord : the implementation of this method
"""
return pixel_to_skycoord(xp, yp, wcs=wcs, origin=origin, mode=mode, cls=cls)
def contained_by(self, wcs, image=None, **kwargs):
"""
Determines if the SkyCoord is contained in the given wcs footprint.
Parameters
----------
wcs : `~astropy.wcs.WCS`
The coordinate to check if it is within the wcs coordinate.
image : array
Optional. The image associated with the wcs object that the cooordinate
is being checked against. If not given the naxis keywords will be used
to determine if the coordinate falls within the wcs footprint.
**kwargs :
Additional arguments to pass to `~astropy.coordinates.SkyCoord.to_pixel`
Returns
-------
response : bool
True means the WCS footprint contains the coordinate, False means it does not.
"""
if image is not None:
ymax, xmax = image.shape
else:
xmax, ymax = wcs._naxis
import warnings
with warnings.catch_warnings():
# Suppress warnings since they just mean we didn't find the coordinate
warnings.simplefilter("ignore")
try:
x, y = self.to_pixel(wcs, **kwargs)
except Exception:
return False
return (x < xmax) & (x > 0) & (y < ymax) & (y > 0)
def radial_velocity_correction(self, kind='barycentric', obstime=None,
location=None):
"""
Compute the correction required to convert a radial velocity at a given
time and place on the Earth's Surface to a barycentric or heliocentric
velocity.
Parameters
----------
kind : str
The kind of velocity correction. Must be 'barycentric' or
'heliocentric'.
obstime : `~astropy.time.Time` or None, optional
The time at which to compute the correction. If `None`, the
``obstime`` frame attribute on the `SkyCoord` will be used.
location : `~astropy.coordinates.EarthLocation` or None, optional
The observer location at which to compute the correction. If
`None`, the ``location`` frame attribute on the passed-in
``obstime`` will be used, and if that is None, the ``location``
frame attribute on the `SkyCoord` will be used.
Raises
------
ValueError
If either ``obstime`` or ``location`` are passed in (not ``None``)
when the frame attribute is already set on this `SkyCoord`.
TypeError
If ``obstime`` or ``location`` aren't provided, either as arguments
or as frame attributes.
Returns
-------
vcorr : `~astropy.units.Quantity` with velocity units
The correction with a positive sign. I.e., *add* this
to an observed radial velocity to get the barycentric (or
heliocentric) velocity. If m/s precision or better is needed,
see the notes below.
Notes
-----
The barycentric correction is calculated to higher precision than the
heliocentric correction and includes additional physics (e.g time dilation).
Use barycentric corrections if m/s precision is required.
The algorithm here is sufficient to perform corrections at the mm/s level, but
care is needed in application. Strictly speaking, the barycentric correction is
multiplicative and should be applied as::
sc = SkyCoord(1*u.deg, 2*u.deg)
vcorr = sc.rv_correction(kind='barycentric', obstime=t, location=loc)
rv = rv + vcorr + rv * vcorr / consts.c
If your target is nearby and/or has finite proper motion you may need to account
for terms arising from this. See Wright & Eastmann (2014) for details.
The default is for this method to use the builtin ephemeris for
computing the sun and earth location. Other ephemerides can be chosen
by setting the `~astropy.coordinates.solar_system_ephemeris` variable,
either directly or via ``with`` statement. For example, to use the JPL
ephemeris, do::
sc = SkyCoord(1*u.deg, 2*u.deg)
with coord.solar_system_ephemeris.set('jpl'):
rv += sc.rv_correction(obstime=t, location=loc)
"""
# has to be here to prevent circular imports
from .solar_system import get_body_barycentric_posvel
# location validation
timeloc = getattr(obstime, 'location', None)
if location is None:
if self.location is not None:
location = self.location
if timeloc is not None:
raise ValueError('`location` cannot be in both the '
'passed-in `obstime` and this `SkyCoord` '
'because it is ambiguous which is meant '
'for the radial_velocity_correction.')
elif timeloc is not None:
location = timeloc
else:
raise TypeError('Must provide a `location` to '
'radial_velocity_correction, either as a '
'SkyCoord frame attribute, as an attribute on '
'the passed in `obstime`, or in the method '
'call.')
elif self.location is not None or timeloc is not None:
raise ValueError('Cannot compute radial velocity correction if '
'`location` argument is passed in and there is '
'also a `location` attribute on this SkyCoord or '
'the passed-in `obstime`.')
# obstime validation
if obstime is None:
obstime = self.obstime
if obstime is None:
raise TypeError('Must provide an `obstime` to '
'radial_velocity_correction, either as a '
'SkyCoord frame attribute or in the method '
'call.')
elif self.obstime is not None:
raise ValueError('Cannot compute radial velocity correction if '
'`obstime` argument is passed in and it is '
'inconsistent with the `obstime` frame '
'attribute on the SkyCoord')
pos_earth, v_earth = get_body_barycentric_posvel('earth', obstime)
if kind == 'barycentric':
v_origin_to_earth = v_earth
elif kind == 'heliocentric':
v_sun = get_body_barycentric_posvel('sun', obstime)[1]
v_origin_to_earth = v_earth - v_sun
else:
raise ValueError("`kind` argument to radial_velocity_correction must "
"be 'barycentric' or 'heliocentric', but got "
"'{}'".format(kind))
gcrs_p, gcrs_v = location.get_gcrs_posvel(obstime)
# transforming to GCRS is not the correct thing to do here, since we don't want to
# include aberration (or light deflection)? Instead, only apply parallax if necessary
if self.data.__class__ is UnitSphericalRepresentation:
targcart = self.icrs.cartesian
else:
# skycoord has distances so apply parallax
obs_icrs_cart = pos_earth + gcrs_p
icrs_cart = self.icrs.cartesian
targcart = icrs_cart - obs_icrs_cart
targcart /= targcart.norm()
if kind == 'barycentric':
beta_obs = (v_origin_to_earth + gcrs_v) / speed_of_light
gamma_obs = 1 / np.sqrt(1 - beta_obs.norm()**2)
gr = location.gravitational_redshift(obstime)
# barycentric redshift according to eq 28 in Wright & Eastmann (2014),
# neglecting Shapiro delay and effects of the star's own motion
zb = gamma_obs * (1 + targcart.dot(beta_obs)) / (1 + gr/speed_of_light) - 1
return zb * speed_of_light
else:
# do a simpler correction ignoring time dilation and gravitational redshift
# this is adequate since Heliocentric corrections shouldn't be used if
# cm/s precision is required.
return targcart.dot(v_origin_to_earth + gcrs_v)
# Table interactions
@classmethod
def guess_from_table(cls, table, **coord_kwargs):
r"""
A convenience method to create and return a new `SkyCoord` from the data
in an astropy Table.
This method matches table columns that start with the case-insensitive
names of the the components of the requested frames, if they are also
followed by a non-alphanumeric character. It will also match columns
that *end* with the component name if a non-alphanumeric character is
*before* it.
For example, the first rule means columns with names like
``'RA[J2000]'`` or ``'ra'`` will be interpreted as ``ra`` attributes for
`~astropy.coordinates.ICRS` frames, but ``'RAJ2000'`` or ``'radius'``
are *not*. Similarly, the second rule applied to the
`~astropy.coordinates.Galactic` frame means that a column named
``'gal_l'`` will be used as the the ``l`` component, but ``gall`` or
``'fill'`` will not.
The definition of alphanumeric here is based on Unicode's definition
of alphanumeric, except without ``_`` (which is normally considered
alphanumeric). So for ASCII, this means the non-alphanumeric characters
are ``<space>_!"#$%&'()*+,-./\:;<=>?@[]^`{|}~``).
Parameters
----------
table : astropy.Table
The table to load data from.
coord_kwargs
Any additional keyword arguments are passed directly to this class's
constructor.
Returns
-------
newsc : same as this class
The new `SkyCoord` (or subclass) object.
"""
_frame_cls, _frame_kwargs = _get_frame_without_data([], coord_kwargs)
frame = _frame_cls(**_frame_kwargs)
coord_kwargs['frame'] = coord_kwargs.get('frame', frame)
comp_kwargs = {}
for comp_name in frame.representation_component_names:
# this matches things like 'ra[...]'' but *not* 'rad'.
# note that the "_" must be in there explicitly, because
# "alphanumeric" usually includes underscores.
starts_with_comp = comp_name + r'(\W|\b|_)'
# this part matches stuff like 'center_ra', but *not*
# 'aura'
ends_with_comp = r'.*(\W|\b|_)' + comp_name + r'\b'
# the final regex ORs together the two patterns
rex = re.compile('(' + starts_with_comp + ')|(' + ends_with_comp + ')',
re.IGNORECASE | re.UNICODE)
for col_name in table.colnames:
if rex.match(col_name):
if comp_name in comp_kwargs:
oldname = comp_kwargs[comp_name].name
msg = ('Found at least two matches for component "{0}"'
': "{1}" and "{2}". Cannot continue with this '
'ambiguity.')
raise ValueError(msg.format(comp_name, oldname, col_name))
comp_kwargs[comp_name] = table[col_name]
for k, v in comp_kwargs.items():
if k in coord_kwargs:
raise ValueError('Found column "{}" in table, but it was '
'already provided as "{}" keyword to '
'guess_from_table function.'.format(v.name, k))
else:
coord_kwargs[k] = v
return cls(**coord_kwargs)
# Name resolve
@classmethod
def from_name(cls, name, frame='icrs', parse=False):
"""
Given a name, query the CDS name resolver to attempt to retrieve
coordinate information for that object. The search database, sesame
url, and query timeout can be set through configuration items in
``astropy.coordinates.name_resolve`` -- see docstring for
`~astropy.coordinates.get_icrs_coordinates` for more
information.
Parameters
----------
name : str
The name of the object to get coordinates for, e.g. ``'M42'``.
frame : str or `BaseCoordinateFrame` class or instance
The frame to transform the object to.
parse: bool
Whether to attempt extracting the coordinates from the name by
parsing with a regex. For objects catalog names that have
J-coordinates embedded in their names eg:
'CRTS SSS100805 J194428-420209', this may be much faster than a
sesame query for the same object name. The coordinates extracted
in this way may differ from the database coordinates by a few
deci-arcseconds, so only use this option if you do not need
sub-arcsecond accuracy for coordinates.
Returns
-------
coord : SkyCoord
Instance of the SkyCoord class.
"""
from .name_resolve import get_icrs_coordinates
icrs_coord = get_icrs_coordinates(name, parse)
icrs_sky_coord = cls(icrs_coord)
if frame in ('icrs', icrs_coord.__class__):
return icrs_sky_coord
else:
return icrs_sky_coord.transform_to(frame)
|
{
"content_hash": "35a77e1bc0c2492a84e9e1db82deb9a2",
"timestamp": "",
"source": "github",
"line_count": 1701,
"max_line_length": 106,
"avg_line_length": 42.81540270429159,
"alnum_prop": 0.5931016490683656,
"repo_name": "stargaser/astropy",
"id": "9c113ef1f0ad54c0e8f1670e079846430fe20ae9",
"size": "72830",
"binary": false,
"copies": "2",
"ref": "refs/heads/placeholder",
"path": "astropy/coordinates/sky_coordinate.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "444651"
},
{
"name": "C++",
"bytes": "1057"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Objective-C",
"bytes": "615"
},
{
"name": "Python",
"bytes": "9898387"
},
{
"name": "TeX",
"bytes": "853"
}
],
"symlink_target": ""
}
|
"""
Configuration consolidation for the Nexus Driver
This module will export the configuration parameters
from the nexus.ini file
"""
from quantum.common.utils import find_config_file
from quantum.plugins.cisco.common import cisco_configparser as confp
CP = confp.CiscoConfigParser(find_config_file({'plugin': 'cisco'},
"nexus.ini"))
SECTION = CP['SWITCH']
NEXUS_IP_ADDRESS = SECTION['nexus_ip_address']
NEXUS_PORTS = SECTION['ports']
NEXUS_SSH_PORT = SECTION['nexus_ssh_port']
SECTION = CP['DRIVER']
NEXUS_DRIVER = SECTION['name']
|
{
"content_hash": "937e8529b4f57616127d545ab9ee408a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 68,
"avg_line_length": 28.3,
"alnum_prop": 0.7173144876325088,
"repo_name": "ruijie/quantum",
"id": "4d648d93ddd63ce5e7b144964c6081231c1e4903",
"size": "1342",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "quantum/plugins/cisco/nexus/cisco_nexus_configuration.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "71602"
},
{
"name": "Perl",
"bytes": "36750"
},
{
"name": "Python",
"bytes": "2684560"
},
{
"name": "Racket",
"bytes": "143"
},
{
"name": "Shell",
"bytes": "8432"
}
],
"symlink_target": ""
}
|
import json
from sygnal.gcmpushkin import GcmPushkin
from tests import testutils
from tests.testutils import DummyResponse
DEVICE_EXAMPLE = {"app_id": "com.example.gcm", "pushkey": "spqr", "pushkey_ts": 42}
DEVICE_EXAMPLE2 = {"app_id": "com.example.gcm", "pushkey": "spqr2", "pushkey_ts": 42}
DEVICE_EXAMPLE_WITH_DEFAULT_PAYLOAD = {
"app_id": "com.example.gcm",
"pushkey": "spqr",
"pushkey_ts": 42,
"data": {
"default_payload": {
"aps": {
"mutable-content": 1,
"alert": {"loc-key": "SINGLE_UNREAD", "loc-args": []},
}
}
},
}
DEVICE_EXAMPLE_WITH_BAD_DEFAULT_PAYLOAD = {
"app_id": "com.example.gcm",
"pushkey": "badpayload",
"pushkey_ts": 42,
"data": {
"default_payload": None,
},
}
DEVICE_EXAMPLE_IOS = {
"app_id": "com.example.gcm.ios",
"pushkey": "spqr",
"pushkey_ts": 42,
}
class TestGcmPushkin(GcmPushkin):
"""
A GCM pushkin with the ability to make HTTP requests removed and instead
can be preloaded with virtual requests.
"""
def __init__(self, name, sygnal, config):
super().__init__(name, sygnal, config)
self.preloaded_response = None
self.preloaded_response_payload = None
self.last_request_body = None
self.last_request_headers = None
self.num_requests = 0
def preload_with_response(self, code, response_payload):
"""
Preloads a fake GCM response.
"""
self.preloaded_response = DummyResponse(code)
self.preloaded_response_payload = response_payload
async def _perform_http_request(self, body, headers):
self.last_request_body = body
self.last_request_headers = headers
self.num_requests += 1
return self.preloaded_response, json.dumps(self.preloaded_response_payload)
class GcmTestCase(testutils.TestCase):
def config_setup(self, config):
config["apps"]["com.example.gcm"] = {
"type": "tests.test_gcm.TestGcmPushkin",
"api_key": "kii",
}
config["apps"]["com.example.gcm.ios"] = {
"type": "tests.test_gcm.TestGcmPushkin",
"api_key": "kii",
"fcm_options": {"content_available": True, "mutable_content": True},
}
def get_test_pushkin(self, name: str) -> TestGcmPushkin:
pushkin = self.sygnal.pushkins[name]
assert isinstance(pushkin, TestGcmPushkin)
return pushkin
def test_expected(self):
"""
Tests the expected case: a good response from GCM leads to a good
response from Sygnal.
"""
gcm = self.get_test_pushkin("com.example.gcm")
gcm.preload_with_response(
200, {"results": [{"message_id": "msg42", "registration_id": "spqr"}]}
)
resp = self._request(self._make_dummy_notification([DEVICE_EXAMPLE]))
self.assertEqual(resp, {"rejected": []})
self.assertEqual(gcm.num_requests, 1)
def test_expected_with_default_payload(self):
"""
Tests the expected case: a good response from GCM leads to a good
response from Sygnal.
"""
gcm = self.get_test_pushkin("com.example.gcm")
gcm.preload_with_response(
200, {"results": [{"message_id": "msg42", "registration_id": "spqr"}]}
)
resp = self._request(
self._make_dummy_notification([DEVICE_EXAMPLE_WITH_DEFAULT_PAYLOAD])
)
self.assertEqual(resp, {"rejected": []})
self.assertEqual(gcm.num_requests, 1)
def test_misformed_default_payload_rejected(self):
"""
Tests that a non-dict default_payload is rejected.
"""
gcm = self.get_test_pushkin("com.example.gcm")
gcm.preload_with_response(
200, {"results": [{"message_id": "msg42", "registration_id": "badpayload"}]}
)
resp = self._request(
self._make_dummy_notification([DEVICE_EXAMPLE_WITH_BAD_DEFAULT_PAYLOAD])
)
self.assertEqual(resp, {"rejected": ["badpayload"]})
self.assertEqual(gcm.num_requests, 0)
def test_rejected(self):
"""
Tests the rejected case: a pushkey rejected to GCM leads to Sygnal
informing the homeserver of the rejection.
"""
gcm = self.get_test_pushkin("com.example.gcm")
gcm.preload_with_response(
200, {"results": [{"registration_id": "spqr", "error": "NotRegistered"}]}
)
resp = self._request(self._make_dummy_notification([DEVICE_EXAMPLE]))
self.assertEqual(resp, {"rejected": ["spqr"]})
self.assertEqual(gcm.num_requests, 1)
def test_batching(self):
"""
Tests that multiple GCM devices have their notification delivered to GCM
together, instead of being delivered separately.
"""
gcm = self.get_test_pushkin("com.example.gcm")
gcm.preload_with_response(
200,
{
"results": [
{"registration_id": "spqr", "message_id": "msg42"},
{"registration_id": "spqr2", "message_id": "msg42"},
]
},
)
resp = self._request(
self._make_dummy_notification([DEVICE_EXAMPLE, DEVICE_EXAMPLE2])
)
self.assertEqual(resp, {"rejected": []})
assert gcm.last_request_body is not None
self.assertEqual(gcm.last_request_body["registration_ids"], ["spqr", "spqr2"])
self.assertEqual(gcm.num_requests, 1)
def test_batching_individual_failure(self):
"""
Tests that multiple GCM devices have their notification delivered to GCM
together, instead of being delivered separately,
and that if only one device ID is rejected, then only that device is
reported to the homeserver as rejected.
"""
gcm = self.get_test_pushkin("com.example.gcm")
gcm.preload_with_response(
200,
{
"results": [
{"registration_id": "spqr", "message_id": "msg42"},
{"registration_id": "spqr2", "error": "NotRegistered"},
]
},
)
resp = self._request(
self._make_dummy_notification([DEVICE_EXAMPLE, DEVICE_EXAMPLE2])
)
self.assertEqual(resp, {"rejected": ["spqr2"]})
assert gcm.last_request_body is not None
self.assertEqual(gcm.last_request_body["registration_ids"], ["spqr", "spqr2"])
self.assertEqual(gcm.num_requests, 1)
def test_fcm_options(self):
"""
Tests that the config option `fcm_options` allows setting a base layer
of options to pass to FCM, for example ones that would be needed for iOS.
"""
gcm = self.get_test_pushkin("com.example.gcm.ios")
gcm.preload_with_response(
200, {"results": [{"registration_id": "spqr_new", "message_id": "msg42"}]}
)
resp = self._request(self._make_dummy_notification([DEVICE_EXAMPLE_IOS]))
self.assertEqual(resp, {"rejected": []})
assert gcm.last_request_body is not None
self.assertEqual(gcm.last_request_body["mutable_content"], True)
self.assertEqual(gcm.last_request_body["content_available"], True)
|
{
"content_hash": "638a3530fa1fd968db86cd8119811716",
"timestamp": "",
"source": "github",
"line_count": 215,
"max_line_length": 88,
"avg_line_length": 34.17209302325581,
"alnum_prop": 0.581734041105213,
"repo_name": "matrix-org/sygnal",
"id": "dcb685bf1267cc5d1f493b8d04a919cf50618ec5",
"size": "7967",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_gcm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "708"
},
{
"name": "Python",
"bytes": "234239"
},
{
"name": "Shell",
"bytes": "3795"
}
],
"symlink_target": ""
}
|
"""This module implements the WordSub class, modelled after a recipe
in "Python Cookbook" (Recipe 3.14, "Replacing Multiple Patterns in a
Single Pass" by Xavier Defrang).
Usage:
Use this class like a dictionary to add before/after pairs:
> subber = TextSub()
> subber["before"] = "after"
> subber["begin"] = "end"
Use the sub() method to perform the substitution:
> print subber.sub("before we begin")
after we end
All matching is intelligently case-insensitive:
> print subber.sub("Before we BEGIN")
After we END
The 'before' words must be complete words -- no prefixes.
The following example illustrates this point:
> subber["he"] = "she"
> print subber.sub("he says he'd like to help her")
she says she'd like to help her
Note that "he" and "he'd" were replaced, but "help" and "her" were
not.
"""
# 'dict' objects weren't available to subclass from until version 2.2.
# Get around this by importing UserDict.UserDict if the built-in dict
# object isn't available.
try:
dict
except:
from UserDict import UserDict as dict
import re
import string
class WordSub(dict):
"""All-in-one multiple-string-substitution class."""
def _wordToRegex(self, word):
"""Convert a word to a regex object which matches the word."""
if word != "" and word[0].isalpha() and word[-1].isalpha():
return "\\b%s\\b" % re.escape(word)
else:
return r"\b%s\b" % re.escape(word)
def _update_regex(self):
"""Build re object based on the keys of the current
dictionary.
"""
self._regex = re.compile("|".join(map(self._wordToRegex, self.keys())))
self._regexIsDirty = False
def __init__(self, defaults={}):
"""Initialize the object, and populate it with the entries in
the defaults dictionary.
"""
self._regex = None
self._regexIsDirty = True
for k, v in defaults.items():
self[k] = v
def __call__(self, match):
"""Handler invoked for each regex match."""
return self[match.group(0)]
def __setitem__(self, i, y):
self._regexIsDirty = True
# for each entry the user adds, we actually add three entrys:
super(type(self), self).__setitem__(string.lower(i), string.lower(y)) # key = value
super(type(self), self).__setitem__(string.capwords(i), string.capwords(y)) # Key = Value
super(type(self), self).__setitem__(string.upper(i), string.upper(y)) # KEY = VALUE
def sub(self, text):
"""Translate text, returns the modified text."""
if self._regexIsDirty:
self._update_regex()
return self._regex.sub(self, text)
# self-test
if __name__ == "__main__":
subber = WordSub()
subber["apple"] = "banana"
subber["orange"] = "pear"
subber["banana"] = "apple"
subber["he"] = "she"
subber["I'd"] = "I would"
# test case insensitivity
inStr = "I'd like one apple, one Orange and one BANANA."
outStr = "I Would like one banana, one Pear and one APPLE."
if subber.sub(inStr) == outStr:
print "Test #1 PASSED"
else:
print "Test #1 FAILED: '%s'" % subber.sub(inStr)
inStr = "He said he'd like to go with me"
outStr = "She said she'd like to go with me"
if subber.sub(inStr) == outStr:
print "Test #2 PASSED"
else:
print "Test #2 FAILED: '%s'" % subber.sub(inStr)
|
{
"content_hash": "0067f110976e43a8a968bac8a58985c2",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 97,
"avg_line_length": 34.04807692307692,
"alnum_prop": 0.5972889014402711,
"repo_name": "mpetyx/pychatbot",
"id": "ef30ef688256488b85fa4f3814ec8faf654aa140",
"size": "3541",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AIML/PyAIML_CUSTOM_OVERLAY-SEMIOTICS/aiml/WordSub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "99757"
},
{
"name": "C++",
"bytes": "1736027"
},
{
"name": "CSS",
"bytes": "287248"
},
{
"name": "D",
"bytes": "5487330"
},
{
"name": "Java",
"bytes": "4140"
},
{
"name": "JavaScript",
"bytes": "8460"
},
{
"name": "Objective-C",
"bytes": "39"
},
{
"name": "PHP",
"bytes": "4179"
},
{
"name": "Perl",
"bytes": "40530"
},
{
"name": "Python",
"bytes": "943590"
},
{
"name": "Shell",
"bytes": "175258"
},
{
"name": "TeX",
"bytes": "234627"
},
{
"name": "XSLT",
"bytes": "4027675"
}
],
"symlink_target": ""
}
|
import configparser
import os
import unittest
from parameterized import parameterized
from tests.test_utils import AIRFLOW_MAIN_FOLDER
CONFIG_TEMPLATES_FOLDER = os.path.join(AIRFLOW_MAIN_FOLDER, "airflow", "config_templates")
DEFAULT_AIRFLOW_SECTIONS = [
'core',
"logging",
"metrics",
'secrets',
'cli',
'debug',
'api',
'lineage',
'atlas',
'operators',
'hive',
'webserver',
'email',
'smtp',
'sentry',
'celery_kubernetes_executor',
'celery',
'celery_broker_transport_options',
'dask',
'scheduler',
'kerberos',
'github_enterprise',
'elasticsearch',
'elasticsearch_configs',
'kubernetes',
'smart_sensor',
]
DEFAULT_TEST_SECTIONS = [
'core',
"logging",
'cli',
'api',
'operators',
'hive',
'webserver',
'email',
'smtp',
'celery',
'scheduler',
'elasticsearch',
'elasticsearch_configs',
'kubernetes',
]
class TestAirflowCfg(unittest.TestCase):
@parameterized.expand(
[
("default_airflow.cfg",),
("default_test.cfg",),
]
)
def test_should_be_ascii_file(self, filename: str):
with open(os.path.join(CONFIG_TEMPLATES_FOLDER, filename), "rb") as f:
content = f.read().decode("ascii")
assert content
@parameterized.expand(
[
(
"default_airflow.cfg",
DEFAULT_AIRFLOW_SECTIONS,
),
(
"default_test.cfg",
DEFAULT_TEST_SECTIONS,
),
]
)
def test_should_be_ini_file(self, filename: str, expected_sections):
filepath = os.path.join(CONFIG_TEMPLATES_FOLDER, filename)
config = configparser.ConfigParser()
config.read(filepath)
assert expected_sections == config.sections()
|
{
"content_hash": "9412db8ced62fa6030d311b49ff45c47",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 90,
"avg_line_length": 21.50574712643678,
"alnum_prop": 0.5638695884553715,
"repo_name": "sekikn/incubator-airflow",
"id": "aa98fa93bf4effb9e45466ce42f5220939039997",
"size": "2656",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tests/core/test_config_templates.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13715"
},
{
"name": "Dockerfile",
"bytes": "15900"
},
{
"name": "HTML",
"bytes": "151266"
},
{
"name": "JavaScript",
"bytes": "25486"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "10792443"
},
{
"name": "Shell",
"bytes": "243458"
},
{
"name": "TSQL",
"bytes": "879"
}
],
"symlink_target": ""
}
|
from lib.utils import connect, tor_init
class Crawler:
def __init__(self, q, tor):
self.q = q
if tor:
tor_init()
def run(self):
while True:
if not self.q[0].empty():
package = self.q[0].get()
result = connect(package)
self.q[1].put(result)
|
{
"content_hash": "2813b41ceab7ab6f9240f930ecf44d33",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 41,
"avg_line_length": 23.133333333333333,
"alnum_prop": 0.4697406340057637,
"repo_name": "OAlienO/DreamStorm",
"id": "13646ccc1eb19f76bf567f57bc80a8d1e84371a1",
"size": "372",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DreamStorm/Crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9697"
}
],
"symlink_target": ""
}
|
'''
*****************************************
Author: zhlinh
Email: zhlinhng@gmail.com
Version: 0.0.1
Created Time: 2016-05-18
Last_modify: 2016-05-18
******************************************
'''
'''
You are given coins of different denominations and a total amount of money amount.
Write a function to compute the fewest number of coins that you need to make up
that amount. If that amount of money cannot be made up by any combination of
the coins, return -1.
Example 1:
coins = [1, 2, 5], amount = 11
return 3 (11 = 5 + 5 + 1)
Example 2:
coins = [2], amount = 3
return -1.
Note:
You may assume that you have an infinite number of each kind of coin.
Credits:
Special thanks to @jianchao.li.fighter for adding this problem and
creating all test cases.
'''
class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
if amount < 1:
return 0
coins.sort(reverse=True)
self.times = 2 ** 31 - 1
self.helper(coins, 0, 0, amount)
return -1 if self.times == 2 ** 31 - 1 else self.times
def helper(self, coins, pos, count, amount):
if pos >= len(coins) or count + 1 >= self.times:
return
maxN = amount // coins[pos]
while maxN >= 0:
newCount = count + maxN
rem = amount - maxN * coins[pos]
if rem > 0 and newCount + 1 < self.times:
self.helper(coins, pos + 1, newCount, rem)
else:
if rem == 0 and newCount < self.times:
self.times = newCount
break
maxN -= 1
|
{
"content_hash": "ed2eb1c21824d9cf08c36f763c16f19a",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 82,
"avg_line_length": 28.566666666666666,
"alnum_prop": 0.543757292882147,
"repo_name": "zhlinh/leetcode",
"id": "efd61e889891cddd3839514dd0e8b6970daf899f",
"size": "1760",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "0322.Coin Change/solution.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "478111"
}
],
"symlink_target": ""
}
|
import json
from simpoll_backend.models import Poll
def get_poll(poll_id):
# for some reason mongoengine uses id instead of _id..
poll = Poll.objects.get_or_404(id=poll_id)
return to_json(poll)
def get_polls_chron():
# arbitrarily limit to 100 objects
polls = Poll.objects.all().order_by('-id').limit(100)
return to_json_arr(polls)
def get_polls_top():
poll = Poll.objects.order_by('-topscore').limit(100)
return to_json_arr(polls)
def put_poll(poll_id, request):
new_poll_dict = request.json
poll = Poll.objects.get_or_404(id=request.json['id'])
poll['option1votes'] = int(request.json['option1votes'])
poll['option2votes'] = int(request.json['option2votes'])
poll['topscore'] = poll['option1votes'] + poll['option2votes']
poll.save()
return to_json(poll)
def post_poll(request):
# new_poll_dict = json.loads(request.json)
new_poll_dict = request.json
new_poll = Poll(question=new_poll_dict['question'],
option1=new_poll_dict['option1'],
option2=new_poll_dict['option2'])
new_poll.save()
return to_json(new_poll)
# helper func to make 1 json obj
def to_json(doc):
json_dict = {
"id": str(doc.id),
"created_at": doc.created_at.isoformat(),
"question": doc.question,
"option1": doc.option1,
"option2": doc.option2,
"option1votes": doc.option1votes,
"option2votes": doc.option2votes,
"topscore": doc.topscore
}
return json.dumps(json_dict)
# helper function to get an array of json docs
def to_json_arr(docs):
docs_jsons = [to_json(doc) for doc in docs]
full_json = "[%s]" % ",\n".join(docs_jsons)
return full_json
|
{
"content_hash": "2c3e254ce6d3c81c93d7100c1d1c059d",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 66,
"avg_line_length": 31.854545454545455,
"alnum_prop": 0.6255707762557078,
"repo_name": "dpuleri/simpoll_backend",
"id": "84a6abb404bd70708a6c1fb2800bacda0eb08936",
"size": "1752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "restful.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3814"
},
{
"name": "JavaScript",
"bytes": "2022"
},
{
"name": "Python",
"bytes": "4631"
},
{
"name": "Shell",
"bytes": "244"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='OnlineNIC',
version='0.1.2',
url='http://github.com/kolanos/onlinenic',
license='MIT',
author='Michael Lavers',
author_email='kolanos@gmail.com',
description='A simple wrapper for the OnlineNIC API.',
long_description=read('README.rst'),
py_modules=['onlinenic'],
platforms='any',
install_requires=['BeautifulSoup'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
{
"content_hash": "14e103dae3c6378a607e71d87d0bfee7",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 70,
"avg_line_length": 27.967741935483872,
"alnum_prop": 0.6274509803921569,
"repo_name": "kolanos/onlinenic",
"id": "ad81c86082dfa86a65d6a5082bb59b5f1b03981f",
"size": "867",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25423"
}
],
"symlink_target": ""
}
|
from Kamaelia.Visualisation.PhysicsGraph import BaseParticle
import pygame
from pygame.locals import *
"""\
====================================================
"Postbox" particle for Axon/Kamaelia visualisation
====================================================
This is an implementation of a rendering particle for "Postbox" particles in
topology visualisation of Axon/Kamaelia systems, representing inboxes/outboxes.
Example Usage
-------------
See Kamaelia.Visualisation.Axon.AxonLaws or
Kamaelia.Visualisation.Axon.AxonVisualiserServer
How does it work?
-----------------
This object subclasses Kamaelia.Physics.Simple.Particle and adds methods to
support rendering. Specifically, rendering to represent an inbox or outbox in
an Axon/Kamaelia system.
At initialisation, provide a unique ID, a starting (x,y) position tuple, a name
and whether it is an inbox or outbox. The name is abbreviated and displayed as
the particle.
If the particle becomes selected, then it will render its full name at the top
of the display surface.
At initialisation the label is rendered at several different 45 degree angles.
When rendering, the appropriate one is chosen depending on the directions of
bonds (linkages) this particle is involved in.
It also renders bonds *from* this particle *to* another. Their colour depends
on whether they represent ordinary or passthrough linkages. This is determined
by looking at whether both postbox particles involved are of the same type.
It is assumed that any bonds going *from* this particle *to* another go to
another postbox particle (not a component particle). If this is not the case
then behaviour is undefined.
Rendering is performed by a generator, returned when the render() method is
called. Its behaviour is that needed for the framework for multi-pass rendering
that is used by TopologyViewer.
The generator yields the number of the rendering pass it wishes to be next on
next. Each time it is subsequently called, it performs the rendering required
for that pass. It then yields the number of the next required pass or completes
if there is no more rendering required.
An setOffset() method is also implemented to allow the particles coordinates
to be offset. This therefore makes it possible to scroll the particles around
the display surface.
See TopologyViewer for more details.
"""
def abbreviate(string):
"""Abbreviates strings to capitals, word starts and numerics and underscores"""
out = ""
prev = ""
for c in string:
if c.isupper() or c.isdigit() or c == "_" or c == "." or (c.isalpha() and not prev.isalpha()):
out += c.upper()
prev = c
return string
# return out
_COMPONENT_RADIUS = 32
def nearest45DegreeStep( delta ):
"""Returns (in degrees) the nearest 45 degree angle match to the supplied vector.
Returned values are one of 0, 45, 90, 135, 180, 225, 270, 315.
If the supplied vector is (0,0), the returned angle is 0.
"""
dx,dy = delta
if dx == 0 and dy == 0:
return 0
# rotate dy and dx by +22.5 degrees,
# so the boundaries between the 45 degree regions now nicely
# line up with 0, 45, 90, ... instead of 22.5, 67,5 etc
cos = 0.92387953251128674 # math.cos(math.radians(22.5))
sin = 0.38268343236508978 # math.sin(math.radians(22.5))
dx, dy = (dx*cos - dy*sin), (dy*cos + dx*sin)
# lookup angle against properties of dy and dx
index = ( dy > 0, dx > 0, abs(dy) > abs(dx) )
return angleMappings[index]
angleMappings = { (True, True, False) : 0,
(True, True, True ) : 45,
(True, False, True ) : 90,
(True, False, False) : 135,
(False, False, False) : 180,
(False, False, True ) : 225,
(False, True, True ) : 270,
(False, True, False) : 315 }
class PPostbox(BaseParticle):
"""\
PPostbox -> new PPostbox object.
Particle representing an Axon/Kamaelia inbox/outbox for topology
visualisation.
Keyword arguments:
- position -- (x,y) tuple of particle coordinates
- name -- Name for the inbox/outbox being represented
- boxtype -- "inbox" or "outbox"
"""
# mapping of angles to labels
labelangles = { 0:2, 45:3, 90:0, 135:1, 180:2, 225:3, 270:0, 315:1 }
# different colours for linkages depending on whether they are passthrough
# (inbox->inbox, outbox->outbox) or ordinary (inbox<->outbox)
colours = { ("inbox", "outbox"):(0,160,0),
("outbox", "inbox" ):(0,160,0),
("inbox", "inbox" ):(224,128,0),
("outbox", "outbox"):(224,128,0) }
def Inbox(ID, position, name):
"""\
Inbox(ID,position,name) -> new PPostbox object with boxtype "inbox".
Static method.
"""
return PPostbox(ID=ID, position=position, name=name, boxtype="inbox")
def Outbox(ID, position, name):
"""\
Outbox(ID,position,name) -> new PPostbox object with boxtype "outbox".
Static method.
"""
return PPostbox(ID=ID, position=position, name=name, boxtype="outbox")
Inbox = staticmethod(Inbox)
Outbox = staticmethod(Outbox)
def __init__(self, ID, position, name, boxtype):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(PPostbox,self).__init__(position=position, ID = ID )
self.name = name
self.ptype = "postbox"
self.postboxtype = boxtype
self.left = 0
self.top = 0
self.radius = 16
self.buildLabels()
self.selected = False
pygame.font.init()
def buildLabels(self):
"""\
Pre-renders text labels to surfaces for different 45 degree
angles.
On exit:
self.label is a list of surfaces containing rendered labels
self.slabel is the same but coloured for when the particle is selected
self.labelxo is a list of x-offsets for each label's centre.
self.labelyo is a list of y-offsets fo reach label's centre.
self.desclabel is the description label displayed when selected
"""
from pygame.transform import rotozoom, rotate
font = pygame.font.Font(None, 14)
label = font.render(" "+abbreviate(self.name)+" ", True, (0,0,0), )
self.label = [] # 'selected' labels
self.labelxo = []
self.labelyo = []
self.label.append(rotate(label, 90))
self.label.append(rotozoom(label, 45, 1.0))
self.label.append(label)
self.label.append(rotozoom(label, -45, 1.0))
slabel = font.render(" "+abbreviate(self.name)+" ", True, (96,96,255), )
self.slabel = []
self.slabel.append(rotate(slabel, 90))
self.slabel.append(rotozoom(slabel, 45, 1.0))
self.slabel.append(slabel)
self.slabel.append(rotozoom(slabel, -45, 1.0))
for l in self.label:
self.labelxo.append( - l.get_width() / 2 )
self.labelyo.append( - l.get_height() / 2 )
font = pygame.font.Font(None, 20)
self.desclabel = font.render(self.postboxtype.upper()+" : "+self.name, True, (0,0,0), (255,255,255))
def render(self, surface):
"""\
Multi-pass rendering generator.
Renders this particle in multiple passes to the specified pygame surface -
yielding the number of the next pass to be called on between each. Completes
once it is fully rendered.
"""
direction = (0,0) # default direction for the text label
yield 1
x = int(self.pos[0] - self.left)
y = int(self.pos[1] - self.top )
for p in self.bondedTo:
endx = int(p.pos[0] - self.left)
endy = int(p.pos[1] - self.top)
colour = PPostbox.colours[ (self.postboxtype, p.postboxtype) ]
pygame.draw.line(surface, colour, (x,y), (endx,endy) )
# draw a pwetty arrow on the line, showing the direction
mid = ( (x+endx*3)/4, (y+endy*3)/4 )
direction = ( (endx-x), (endy-y) )
length = ( direction[0]**2 + direction[1]**2 )**0.5
direction = [ 6*n / length for n in direction ]
norm = ( -direction[1], direction[0] )
leftarrow = ( mid[0] - direction[0] - norm[0], mid[1] - direction[1] - norm[1] )
rightarrow = ( mid[0] - direction[0] + norm[0], mid[1] - direction[1] + norm[1] )
pygame.draw.line(surface, colour, mid, leftarrow )
pygame.draw.line(surface, colour, mid, rightarrow )
yield 3
# if we've not got a 'direction' yet for the text label (from bonds 'from' this node )
# then look at bonds 'to' this node from other nodes of the same type
if direction==(0,0):
for p in self.bondedFrom:
if p.ptype == self.ptype:
endx = int(p.pos[0] - self.left)
endy = int(p.pos[1] - self.top)
direction = ( (endx-x), (endy-y) )
# render name label, tilted along the 'direction'
i = PPostbox.labelangles[ nearest45DegreeStep(direction) ]
if self.selected:
l = self.slabel[i]
else:
l = self.label[i]
surface.blit(l, ( x + self.labelxo[i], y + self.labelyo[i] ) )
if self.selected:
yield 10
surface.blit(self.desclabel, (72,16) )
def setOffset( self, offset ):
"""\
Set the offset of the top left corner of the rendering area.
If this particle is at (px,py) it will be rendered at (px-x,py-y).
"""
x,y = offset
self.left = x
self.top = y
def select( self ):
"""Tell this particle it is selected."""
self.selected = True
def deselect( self ):
"""Tell this particle it is deselected."""
self.selected = False
|
{
"content_hash": "2a83bee9d0bcb8068f9c18e66e9bf58a",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 108,
"avg_line_length": 36.38461538461539,
"alnum_prop": 0.5837017105516048,
"repo_name": "sparkslabs/kamaelia_",
"id": "989eecae1a0e28eeef8747dfac3be0b3a13479b6",
"size": "11311",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Code/Python/Kamaelia/Kamaelia/Visualisation/Axon/PPostbox.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896248"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707430"
}
],
"symlink_target": ""
}
|
from logging.handlers import RotatingFileHandler
import logging
def init_logger(app, log_file):
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
file_handler = RotatingFileHandler(log_file, maxBytes=100000, backupCount=10)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.DEBUG)
app.logger.propagate = False
def init_task_logger(app, log_file):
logger = logging.getLogger("apscheduler.scheduler")
formatter = logging.Formatter('%(asctime)s %(levelname)s: %(message)s '
'[in %(pathname)s:%(lineno)d]')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.DEBUG)
stream_handler.setFormatter(formatter)
file_handler = RotatingFileHandler(log_file, maxBytes=100000, backupCount=10)
file_handler.setLevel(logging.DEBUG)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
logger.setLevel(logging.DEBUG)
logger.propagate = False
|
{
"content_hash": "e1f3f3e586a8067461448250fdc12122",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 81,
"avg_line_length": 36.1764705882353,
"alnum_prop": 0.7284552845528456,
"repo_name": "ragnraok/MonoReader",
"id": "dd0d8f811c3f85ab77aaf3f6a1849657c0a35084",
"size": "1230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monoweb/mono/logger/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1146"
},
{
"name": "Groovy",
"bytes": "1784"
},
{
"name": "Java",
"bytes": "172840"
},
{
"name": "Python",
"bytes": "66709"
}
],
"symlink_target": ""
}
|
import os
import sqlite3
import sys
MIN_SINGLE_DB_FORMAT = 19
def get_format(wc_path):
entries = os.path.join(wc_path, '.svn', 'entries')
wc_db = os.path.join(wc_path, '.svn', 'wc.db')
formatno = 'not under version control'
if os.path.exists(wc_db):
conn = sqlite3.connect(wc_db)
curs = conn.cursor()
curs.execute('pragma user_version;')
formatno = curs.fetchone()[0]
elif os.path.exists(entries):
formatno = int(open(entries).readline())
elif os.path.exists(wc_path):
parent_path = os.path.dirname(os.path.abspath(wc_path))
if wc_path != parent_path:
formatno = get_format(parent_path)
if formatno >= MIN_SINGLE_DB_FORMAT:
return formatno
return formatno
def print_format(wc_path):
# see subversion/libsvn_wc/wc.h for format values and information
# 1.0.x -> 1.3.x: format 4
# 1.4.x: format 8
# 1.5.x: format 9
# 1.6.x: format 10
# 1.7.x: format 29
formatno = get_format(wc_path)
print '%s: %s' % (wc_path, formatno)
if __name__ == '__main__':
paths = sys.argv[1:]
if not paths:
paths = ['.']
for wc_path in paths:
print_format(wc_path)
|
{
"content_hash": "abbedaa3f32eb636354167569b2d8847",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 67,
"avg_line_length": 25.488888888888887,
"alnum_prop": 0.6277244986922407,
"repo_name": "centic9/subversion-ppa",
"id": "fc6ef0789d9108b0324b545b3011a3e4819ac3af",
"size": "1956",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/dev/wc-format.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1036"
},
{
"name": "C",
"bytes": "35999482"
},
{
"name": "C++",
"bytes": "561218"
},
{
"name": "HTML",
"bytes": "1372"
},
{
"name": "Java",
"bytes": "1270658"
},
{
"name": "M4",
"bytes": "157024"
},
{
"name": "Makefile",
"bytes": "689573"
},
{
"name": "Objective-C",
"bytes": "459823"
},
{
"name": "PLSQL",
"bytes": "1622"
},
{
"name": "PLpgSQL",
"bytes": "2212"
},
{
"name": "Perl",
"bytes": "247422"
},
{
"name": "Python",
"bytes": "6215649"
},
{
"name": "Roff",
"bytes": "25001"
},
{
"name": "Ruby",
"bytes": "440494"
},
{
"name": "Shell",
"bytes": "456885"
}
],
"symlink_target": ""
}
|
import logging
import re
from django.core.management.base import BaseCommand
from optparse import make_option
from corehq.toggles import all_toggles
from corehq.apps.domain.models import Domain
from corehq.apps.app_manager.dbaccessors import get_apps_in_domain
from corehq.apps.app_manager.util import ParentCasePropertyBuilder, save_xform
logger = logging.getLogger('cmitfb_migrate_syntax')
logger.setLevel('DEBUG')
class Command(BaseCommand):
help = '''
Migrate apps using vellum case management from the old
#case/type/property syntax to the new #case/relationship/property syntax.
Pass --save to actually save changes.
'''
option_list = BaseCommand.option_list + (
make_option('--save',
action='store_true',
dest='save',
help='Save changes to forms.'),
)
affixes = ["", "parent/", "grandparent/"]
def _form_error(self, form, error=""):
logger.error("{} (domain {}, app {}, form {})".format(error, form.get_app().domain,
form.get_app().id, form.unique_id))
def _replace_in_form(self, form, relationships, case_type, affix_index):
if not case_type:
return
if affix_index < len(self.affixes):
#logger.info("Replacing #case/{}/ with #case/{}".format(case_type, self.affixes[affix_index]))
form.source = form.source.replace("#case/{}/".format(case_type),
"#case/{}".format(self.affixes[affix_index]))
parents = relationships[case_type].get("parent", [])
if len(parents) > 1:
self._form_error(form, "Multiple parents: {}".format(", ".join(parents)))
elif len(parents) == 1:
self._replace_in_form(form, relationships, parents[0], affix_index + 1)
else:
self._form_error(form, "Hierarchy too deep")
def handle(self, *args, **options):
toggle_map = dict([(t.slug, t) for t in all_toggles()])
domains = [row['key'] for row in Domain.get_all(include_docs=False)]
for domain in domains:
if toggle_map['rich_text'].enabled(domain) or toggle_map['experimental_ui'].enabled(domain):
#logger.info('migrating domain {}'.format(domain))
apps = get_apps_in_domain(domain, include_remote=False)
for app in apps:
app_dirty = False
builder = ParentCasePropertyBuilder(app)
relationships = builder.get_parent_type_map(app.get_case_types(), allow_multiple_parents=True)
for module in app.modules:
for form in module.forms:
if form.doc_type == 'Form' and form.requires_case():
#logger.info('migrating form {}'.format(form.name.get('en', form.name)))
base_case_type = form.get_module().case_type
self._replace_in_form(form, relationships, base_case_type, 0)
prefixes = re.findall(r'#case/\w+/', form.source)
prefixes = set(prefixes)
for p in prefixes:
if p != "#case/parent/" and p != "#case/grandparent/":
self._form_error(form, "Unknown prefix remaining: {}".format(p))
if options['save']:
try:
save_xform(form.get_app(), form, form.source)
app_dirty = True
except:
self._form_error(form, "Form xml invalid")
if app_dirty:
app.save()
logger.info('done with cmitfb_migrate_syntax')
|
{
"content_hash": "95a3b686513ae31bb083aa9ea45f4a6a",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 114,
"avg_line_length": 48.926829268292686,
"alnum_prop": 0.519690927218345,
"repo_name": "qedsoftware/commcare-hq",
"id": "19bc0dd2e42b60b7ec6a3c6c295d965e3764b7a9",
"size": "4012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/apps/app_manager/management/commands/cmitfb_migrate_syntax.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15950"
},
{
"name": "CSS",
"bytes": "508392"
},
{
"name": "HTML",
"bytes": "2869325"
},
{
"name": "JavaScript",
"bytes": "2395360"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "125298"
},
{
"name": "Python",
"bytes": "14670713"
},
{
"name": "Shell",
"bytes": "37514"
}
],
"symlink_target": ""
}
|
from authorize import Customer
from authorize import AuthorizeResponseError
from datetime import date
from nose.plugins.attrib import attr
from unittest2 import TestCase
FULL_CUSTOMER = {
'email': 'vincent@vincentcatalano.com',
'description': 'Cool web developer guy',
'customer_type': 'individual',
'billing': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
'phone_number': '520-123-4567',
'fax_number': '520-456-7890',
},
'bank_account': {
'routing_number': '322271627',
'account_number': '00987467838473',
'name_on_account': 'Rob Otron',
'bank_name': 'Evil Bank Co.',
'echeck_type': 'CCD'
},
'shipping': {
'first_name': 'Rob',
'last_name': 'Oteron',
'company': 'Robotron Studios',
'address': '101 Computer Street',
'city': 'Tucson',
'state': 'AZ',
'zip': '85704',
'country': 'US',
'phone_number': '520-123-4567',
'fax_number': '520-456-7890',
}
}
CUSTOMER_WITH_CARD = {
'email': 'vincent@vincentcatalano.com',
'description': 'Cool web developer guy',
'credit_card': {
'card_number': '4111111111111111',
'expiration_date': '04/{0}'.format(date.today().year + 1),
'card_code': '456',
},
}
@attr('live_tests')
class CustomerTests(TestCase):
def test_live_customer(self):
# Create customers
result = Customer.create()
Customer.create(FULL_CUSTOMER)
Customer.create(CUSTOMER_WITH_CARD)
# Read customer information. This returns the payment profile IDs
# address IDs for the user
customer_id = result.customer_id
Customer.details(customer_id)
# Update customer information
Customer.update(customer_id, {
'email': 'vincent@test.com',
'description': 'Cool web developer guy'
})
# Delete customer information
Customer.delete(customer_id)
self.assertRaises(AuthorizeResponseError, Customer.delete, customer_id)
Customer.list()
|
{
"content_hash": "457b4b3d5936a3992462ed7cb7ff7419",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 79,
"avg_line_length": 27.865853658536587,
"alnum_prop": 0.5776805251641138,
"repo_name": "ClearcodeHQ/py-authorize",
"id": "c253be65d5eb3316cac881dfbaee07d19642fdee",
"size": "2285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_live_customer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135046"
}
],
"symlink_target": ""
}
|
from builtins import range
from airflow import configuration
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
PARALLELISM = configuration.conf.getint('core', 'PARALLELISM')
class BaseExecutor(LoggingMixin):
def __init__(self, parallelism=PARALLELISM):
"""
Class to derive in order to interface with executor-type systems
like Celery, Mesos, Yarn and the likes.
:param parallelism: how many jobs should run at one time. Set to
``0`` for infinity
:type parallelism: int
"""
self.parallelism = parallelism
self.queued_tasks = {}
self.running = {}
self.event_buffer = {}
def start(self): # pragma: no cover
"""
Executors may need to get things started. For example LocalExecutor
starts N workers.
"""
pass
def queue_command(self, task_instance, command, priority=1, queue=None):
key = task_instance.key
if key not in self.queued_tasks and key not in self.running:
self.log.info("Adding to queue: %s", command)
self.queued_tasks[key] = (command, priority, queue, task_instance)
def queue_task_instance(
self,
task_instance,
mark_success=False,
pickle_id=None,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=None,
cfg_path=None):
pool = pool or task_instance.pool
# TODO (edgarRd): AIRFLOW-1985:
# cfg_path is needed to propagate the config values if using impersonation
# (run_as_user), given that there are different code paths running tasks.
# For a long term solution we need to address AIRFLOW-1986
command = task_instance.command(
local=True,
mark_success=mark_success,
ignore_all_deps=ignore_all_deps,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state,
pool=pool,
pickle_id=pickle_id,
cfg_path=cfg_path)
self.queue_command(
task_instance,
command,
priority=task_instance.task.priority_weight_total,
queue=task_instance.task.queue)
def has_task(self, task_instance):
"""
Checks if a task is either queued or running in this executor
:param task_instance: TaskInstance
:return: True if the task is known to this executor
"""
if task_instance.key in self.queued_tasks or task_instance.key in self.running:
return True
def sync(self):
"""
Sync will get called periodically by the heartbeat method.
Executors should override this to perform gather statuses.
"""
pass
def heartbeat(self):
# Triggering new jobs
if not self.parallelism:
open_slots = len(self.queued_tasks)
else:
open_slots = self.parallelism - len(self.running)
self.log.debug("%s running task instances", len(self.running))
self.log.debug("%s in queue", len(self.queued_tasks))
self.log.debug("%s open slots", open_slots)
sorted_queue = sorted(
[(k, v) for k, v in self.queued_tasks.items()],
key=lambda x: x[1][1],
reverse=True)
for i in range(min((open_slots, len(self.queued_tasks)))):
key, (command, _, queue, ti) = sorted_queue.pop(0)
# TODO(jlowin) without a way to know what Job ran which tasks,
# there is a danger that another Job started running a task
# that was also queued to this executor. This is the last chance
# to check if that happened. The most probable way is that a
# Scheduler tried to run a task that was originally queued by a
# Backfill. This fix reduces the probability of a collision but
# does NOT eliminate it.
self.queued_tasks.pop(key)
ti.refresh_from_db()
if ti.state != State.RUNNING:
self.running[key] = command
self.execute_async(key, command=command, queue=queue)
else:
self.log.debug(
'Task is already running, not sending to executor: %s',
key
)
# Calling child class sync method
self.log.debug("Calling the %s sync method", self.__class__)
self.sync()
def change_state(self, key, state):
self.running.pop(key)
self.event_buffer[key] = state
def fail(self, key):
self.change_state(key, State.FAILED)
def success(self, key):
self.change_state(key, State.SUCCESS)
def get_event_buffer(self, dag_ids=None):
"""
Returns and flush the event buffer. In case dag_ids is specified
it will only return and flush events for the given dag_ids. Otherwise
it returns and flushes all
:param dag_ids: to dag_ids to return events for, if None returns all
:return: a dict of events
"""
cleared_events = dict()
if dag_ids is None:
cleared_events = self.event_buffer
self.event_buffer = dict()
else:
for key in list(self.event_buffer.keys()):
dag_id, _, _ = key
if dag_id in dag_ids:
cleared_events[key] = self.event_buffer.pop(key)
return cleared_events
def execute_async(self, key, command, queue=None): # pragma: no cover
"""
This method will execute the command asynchronously.
"""
raise NotImplementedError()
def end(self): # pragma: no cover
"""
This method is called when the caller is done submitting job and is
wants to wait synchronously for the job submitted previously to be
all done.
"""
raise NotImplementedError()
def terminate(self):
"""
This method is called when the daemon receives a SIGTERM
"""
raise NotImplementedError()
|
{
"content_hash": "98922fd42898c9a245928a6a94d9c9fe",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 87,
"avg_line_length": 35.39325842696629,
"alnum_prop": 0.5876190476190476,
"repo_name": "wndhydrnt/airflow",
"id": "30ecee06a19bce3081e93fd61396b1a21521facb",
"size": "7113",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "airflow/executors/base_executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109698"
},
{
"name": "HTML",
"bytes": "270515"
},
{
"name": "JavaScript",
"bytes": "1988427"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "3580266"
},
{
"name": "Shell",
"bytes": "36912"
}
],
"symlink_target": ""
}
|
import pytest
import rhc.task as task
@pytest.fixture
def happy():
def cb(rc, result):
assert rc == 0
return task.Task(cb)
@pytest.fixture
def not_happy():
def cb(rc, result):
assert rc != 0
return task.Task(cb)
def test_simple(happy):
happy.respond('yay')
def test_error(not_happy):
not_happy.error('boo')
def task_cmd(task, result):
task.worked = True
def partial_happy(cb):
cb(0, 'yay')
def partial_not_happy(cb):
cb(1, 'boo')
def test_defer(happy):
happy.worked = False
happy.defer(task_cmd, partial_happy)
assert happy.worked
def test_final_fn(happy):
final = {'answer': False}
def f():
final['answer'] = True
happy.defer(task_cmd, partial_happy)
assert final['answer'] is False
happy.defer(task_cmd, partial_happy, final_fn=f)
assert final['answer'] is True
|
{
"content_hash": "9497a0a78de92adeaed7f9c3450a6f32",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 52,
"avg_line_length": 16.58490566037736,
"alnum_prop": 0.6257110352673493,
"repo_name": "robertchase/rhc",
"id": "41b2c8f3422c055280549400d883327bc103a464",
"size": "879",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/test_task.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "686"
},
{
"name": "Jupyter Notebook",
"bytes": "42516"
},
{
"name": "Makefile",
"bytes": "333"
},
{
"name": "Python",
"bytes": "236043"
},
{
"name": "Shell",
"bytes": "183"
},
{
"name": "TSQL",
"bytes": "833"
}
],
"symlink_target": ""
}
|
from firenado import tornadoweb
class IndexHandler(tornadoweb.TornadoHandler):
def get(self):
self.render("index.html", schedulers=self.component.schedulers)
|
{
"content_hash": "b98e70f4a9b778ecf10df8453a1f9291",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 71,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.7572254335260116,
"repo_name": "candango/firenado",
"id": "a029672924d330ae3cc56a828de74735ef764197",
"size": "173",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "examples/schedapp/handlers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Gherkin",
"bytes": "472"
},
{
"name": "HTML",
"bytes": "5244"
},
{
"name": "Python",
"bytes": "226801"
},
{
"name": "Shell",
"bytes": "1289"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, with_statement
|
{
"content_hash": "77c851e9e9b2d0a77a91ae12a74e9c42",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 64,
"avg_line_length": 33,
"alnum_prop": 0.7727272727272727,
"repo_name": "robertding/vo",
"id": "ac2f2e656ba258a4e4759c2b49cef3230877e87f",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "268"
},
{
"name": "Python",
"bytes": "9263"
}
],
"symlink_target": ""
}
|
"""
Regular prism mesh
--------------------
The mesh classes in Fatiando are more efficient ways of representing regular
meshes than simples lists of :class:`~fatiando.mesher.Prism` objects. This is
how you can create a :class:`~fatiando.mesher.PrismMesh` and assign it a
density for each prism.
"""
from __future__ import print_function
from fatiando.mesher import PrismMesh
from fatiando.vis import myv
mesh = PrismMesh(bounds=(0, 100, 0, 200, 0, 150), shape=(5, 6, 7))
# We'll give each prism a density value corresponding to it's index on the
# mesh. Notice that meshes take lists/arrays as their property values
mesh.addprop('density', list(range(mesh.size)))
# You can iterate over meshes like lists of elements
for p in mesh:
print(p.props['density'], end=' ')
scene = myv.figure(size=(600, 600))
# Because you can iterate over a mesh, you can pass it anywhere a list of
# prisms is accepted
plot = myv.prisms(mesh, prop='density')
# The code below enables and configures the color bar. This will be automated
# on a function in the future (write to the mailing list if you'd like to help
# out!)
plot.module_manager.scalar_lut_manager.show_scalar_bar = True
plot.module_manager.scalar_lut_manager.lut_mode = 'Greens'
plot.module_manager.scalar_lut_manager.reverse_lut = True
plot.module_manager.scalar_lut_manager.label_text_property.color = (0, 0, 0)
plot.module_manager.scalar_lut_manager.title_text_property.color = (0, 0, 0)
plot.module_manager.scalar_lut_manager.scalar_bar_representation.position = \
[0.9, 0.4]
plot.module_manager.scalar_lut_manager.scalar_bar_representation.position2 = \
[0.1, 0.6]
myv.axes(myv.outline(), fmt='%.1f')
myv.show()
|
{
"content_hash": "4dcb09c4f00a367497e89af93008170f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 78,
"avg_line_length": 40.023809523809526,
"alnum_prop": 0.735871505056514,
"repo_name": "mtb-za/fatiando",
"id": "75a569a5464f6f8b8bc8cfb94866d60fda8b26bc",
"size": "1681",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gallery/3-mesh/prism_mesh.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "454779"
},
{
"name": "Makefile",
"bytes": "1756"
},
{
"name": "Python",
"bytes": "1024422"
},
{
"name": "Shell",
"bytes": "3825"
}
],
"symlink_target": ""
}
|
# import modules from main
import os
import decimal
# import other modules
import pandas as pd
import matplotlib
# import local functions
import net_worth as net_worth
from financial_independence import fin_ind
import utils as utils
# some initial setup - abbreviating the Decimal command, changing the style of matplotlib, assigning the current
# working directory variable and filepath variable
D = decimal.Decimal
matplotlib.style.use('ggplot')
if os.name == 'nt':
cwd = "C:\\Users\\ssamdj\\Dropbox\\Financials"
filepath = "{}\\data\\Expenses.csv".format(cwd)
else:
cwd = "/home/mj/Dropbox/Financials"
filepath = "{}/data/Expenses.csv".format(cwd)
# These are the "Tableau 20" colors as RGB.
tableau20 = [(31, 119, 180), (174, 199, 232), (255, 127, 14), (255, 187, 120),
(44, 160, 44), (152, 223, 138), (214, 39, 40), (255, 152, 150),
(148, 103, 189), (197, 176, 213), (140, 86, 75), (196, 156, 148),
(227, 119, 194), (247, 182, 210), (127, 127, 127), (199, 199, 199),
(188, 189, 34), (219, 219, 141), (23, 190, 207), (158, 218, 229)]
# Scale the RGB values to the [0, 1] range, which is the format matplotlib accepts.
for i in range(len(tableau20)):
r, g, b = tableau20[i]
tableau20[i] = (r / 255., g / 255., b / 255.)
# read in CSV and create a dataframe, assigned to df. Change currency format into float as is parsed as string due to
# commas. dayfirst assumes that dates are in the form of DD/MM
df = pd.read_csv(filepath, parse_dates=[1, 2], infer_datetime_format=True, dayfirst=True)
df['Cost'] = (df['Cost'].str.replace(r'[^-+\d.]', '').astype(float))
if os.name == 'nt':
# print the metadata definitions
with open("{}\\docs\\Metadata Definitions.txt".format(cwd), "w+") as file:
file.write("The following is a list of the data types read in from the Expenses file. \n")
file.write(pd.Series.to_string(df.dtypes))
file.write("\n")
# create a working dataframe df_work, add quarter column
df_work = df[['ID', 'Date', 'Cost', 'Category']].dropna()
df_work.loc[:, 'Month'] = df_work.loc[:, 'Date'].dt.month
df_work.loc[:, 'Quarter'] = df_work.loc[:, 'Date'].dt.quarter
df_work.loc[:, 'Year'] = df_work.loc[:, 'Date'].dt.year
# the below code remaps the Quarters of the year, to Quarters of Australia's financial year
df_work.loc[:, 'Aus_Qtr'] = df_work.loc[:, 'Quarter'].map(utils.quarter_aus)
df_work.loc[:, 'Fin_Year'] = df_work.loc[:, 'Date'].apply(utils.fin_year)
# begin the monthly reporting
utils.mon_func(df_work, cwd, tableau20)
utils.qtr_func(df_work, cwd, tableau20)
utils.year_func(df_work, cwd)
ageVec, networthVec, superVec, savingsVec, principalVec, incomeVec, expensesVec, index_fi = fin_ind()
utils.plot_fi(ageVec, networthVec, superVec, savingsVec, principalVec, incomeVec, expensesVec, index_fi)
# begin the Net Worth reporting
net_worth.net_worth(cwd, tableau20)
|
{
"content_hash": "f719fe7c0f2cbb6488bee58ee6f70153",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 117,
"avg_line_length": 43.838235294117645,
"alnum_prop": 0.6517946997651795,
"repo_name": "dmsun/Financial-Independence",
"id": "f4d90b5c596373e465291d6b3f6e933e0bb84b85",
"size": "3005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "expenses.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46924"
}
],
"symlink_target": ""
}
|
import os
class Libstdcpp_P1:
conf_lst = {}
e = False
root_dir = ""
def init(self, c_lst, ex, root_dir):
self.conf_lst = c_lst
self.e = ex
self.root_dir = root_dir
self.config = {
"name": "libstdcpp", # Name of the package
"version": "6.2.0", # Version of the package
"size": 666, # Size of the installed package (MB)
"archive": "gcc-6.2.0.tar.bz2", # Archive name
"SBU": 0.4, # SBU (Compilation time)
"tmp_install": True, # Is this package part of the temporary install
"after": False,
"next": "binutils2", # Next package to install
"chdir": False,
"urls": [ # Url to download the package. The first one must be morphux servers
"https://install.morphux.org/packages/gcc-6.2.0.tar.bz2"
]
}
return self.config
def before(self):
os.chdir("gcc-6.2.0")
res = self.e(["rm", "-rf", "build"])
res = self.e(["mkdir", "-vp", "build"])
os.chdir("build")
return res
def configure(self):
return self.e(["../libstdc++-v3/configure",
"--prefix=/tools",
"--host=" + self.conf_lst["target"],
"--disable-multilib",
"--disable-nls",
"--disable-libstdcxx-threads",
"--disable-libstdcxx-pch",
"--with-gxx-include-dir=/tools/"+ self.conf_lst["target"] +"/include/c++/6.2.0"
])
def make(self):
return self.e(["make", "-j", self.conf_lst["cpus"]])
def install(self):
return self.e(["make", "install"])
|
{
"content_hash": "35cb45ecd5dca2842b62665c312886ee",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 95,
"avg_line_length": 33.90196078431372,
"alnum_prop": 0.48582995951417,
"repo_name": "Morphux/installer",
"id": "c030d72ee96e4b52452995cdf091a44e5368e066",
"size": "3035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pkgs/libstdcpp_p1/libstdcpp_p1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "6786"
},
{
"name": "Python",
"bytes": "706930"
},
{
"name": "Shell",
"bytes": "66"
},
{
"name": "VimL",
"bytes": "149"
}
],
"symlink_target": ""
}
|
import json
import socket
from hashlib import sha256
from urllib2 import urlopen
from urllib import quote
def urlencode(query):
if isinstance(query, dict):
query = query.items()
pairs = []
for item in query:
pairs.append("%s=%s" % (item[0], quote(str(item[1]))))
return "&".join(pairs)
class MinecraftStream(object):
# Extends the basic stream object and adds a readjson method to the object
def __getattribute__(self, name):
if name not in ['readjson', '_original_stream']:
return getattr(
object.__getattribute__(self, '_original_stream'),
name
)
else:
return object.__getattribute__(self, name)
def __init__(self, stream):
self._original_stream = stream
def readjson(self, *args, **kwargs):
ret = self._original_stream.readline(*args, **kwargs)
return json.loads(ret)
class MinecraftJsonApi (object):
'''
Python Interface to JSONAPI for Bukkit (Minecraft)
Based off of the PHP interface by Alec Gorge <alecgorge@gmail.com>
https://github.com/alecgorge/jsonapi/raw/master/sdk/php/JSONAPI.php
(c) 2011 Accalia.de.Elementia <Accalia.de.Elementia@gmail.com>
This work is licensed under a Creative Commons Attribution
3.0 Unported License <http://creativecommons.org/licenses/by/3.0/>
JSONAPI homepage:
http://ramblingwood.com/minecraft/jsonapi/
'''
__basic_url = 'http://{host}:{port}/api/call?{query}'
__multi_url = 'http://{host}:{port}/api/call-multiple?{query}'
__subscribe_url = '/api/subscribe?{query}'
__letters = list('abcdefghijklmnopqrstuvwxyz')
def __createkey(self, method):
'''
Create an authentication hash for the given method.
'''
return sha256('{username}{method}{password}{salt}'.format(
username = self.username,
method = method,
password = self.password,
salt = self.salt
)
).hexdigest()
def __createURL(self, method, args):
'''
Create the full URL for calling a method.
'''
key = self.__createkey(method)
return self.__basic_url.format(
host = self.host,
port = self.port,
query = urlencode([
('method', method),
('args', json.dumps(args)),
('key', key),
])
)
def __createStreamURL(self, source):
'''
Create the full URL for subscribing to a stream.
'''
key = self.__createkey(source)
return self.__subscribe_url.format(
query = urlencode([
('source', source),
('key', key),
])
)
def __createMultiCallURL(self, methodlist, arglist):
'''
Create the full URL for calling multiple methods.
'''
methodlist = json.dumps(methodlist)
arglist = json.dumps(arglist)
key = self.__createkey(methodlist)
return self.__multi_url.format(
host = self.host,
port = self.port,
query = urlencode([
('method', methodlist),
('args', arglist),
('key', key),
])
)
def __createsocket(self):
'''
Setup a socket connection to the server and return a file like
object for reading and writing.
Copied with minor edits from examples on:
http://docs.python.org/library/socket.html
'''
'''try:
flags = socket.AI_ADDRCONFIG
except AttributeError:
flags = 0
for res in socket.getaddrinfo(self.host, (self.port+1),
socket.AF_UNSPEC, socket.SOCK_STREAM,
socket.IPPROTO_TCP, flags):
af, socktype, proto, canonname, sa = res'''
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = self.port +1
sock.connect((self.host, port))
except socket.error:
if sock:
sock.close()
sock = None
#continue
#break
if not sock:
raise Exception('Connect failed')
return MinecraftStream(sock.makefile('rwb'))
def __createMethodAttributes(self, method):
'''
Yet another translation method.
Transform the method definition JSON into a dictionary
containing only the attributes needed for the wrapper.
'''
attrs = {}
attrs['name'] = method.get('name', '')
if attrs['name'] < 0:
raise Exception('Malformed method definition in JSON')
attrs['description'] = method.get('desc','')
attrs['namespace'] = method.get('namespace','')
attrs['enabled'] = method.get('enabled',False)
if attrs['namespace']:
attrs['method_name'] = attrs['namespace']+ '_'+attrs['name']
attrs['call_name'] = attrs['namespace'] + '.'+attrs['name']
else:
attrs['method_name'] = attrs['name']
attrs['call_name'] = attrs['name']
attrs['returns'] = method.get('returns',
[None,'Unspecified return type.'])[1]
args = method.get('args',[])
num_args = len(args)
alpha = self.__letters
attrs['args'] = str(alpha[:num_args]).replace('\'','')[1:-1]
attrs['params'] = '\n'.join([
'{1} ({0})'.format(a[0], a[1]) for a in args
])
return attrs
def __createMethod (self, method):
'''
Create a dynamic method based on provided definition.
TODO: Is there a better way to do this? Possibly via closure to
avoid exec
'''
def makeMethod (method):
call_name = method['call_name']
def _method (self, *args):
return self.call(call_name,*args)
_method.__name__ = str(method['method_name'])
_method.__doc__ = """{description}
{returns}
Parameters:
{params}
""".format(**method)
return _method
attributes = self.__createMethodAttributes(method)
if method['enabled']:
rv_method = makeMethod(attributes)
else:
rv_method = None
attributes['method'] = rv_method
del attributes['call_name']
del attributes['args']
return attributes
def __init__(self, host='localhost', port=20059, username='admin',
password='demo', salt=''):
self.host = host
self.username = username
self.password = password
self.port = int(port)
self.salt = salt
self.__methods = []
def rawCall (self, method, *args):
'''
Make a remote call and return the raw response.
'''
url = self.__createURL(method, args)
result = urlopen(url).read()
return result
def call (self, method, *args):
'''
Make a remote call and return the JSON response.
'''
data = self.rawCall(method, *args)
result = json.loads(data)
if result['result'] =='success':
return result['success']
else:
raise Exception('(%s) %s' %(result['result'], result[result['result']]))
def call_multiple(self, methodlist, arglist):
'''
Make multiple calls and return multiple responses
'''
url = self.__createMultiCallURL(methodlist, arglist)
result = urlopen(url).read()
return result
def subscribe (self, feed):
'''
Subscribe to the remote stream.
Return a file like object for reading responses from. Use
read/readline for raw values, use readjson for parsed values.
'''
# This doesn't work right, I don't know why.... yet.
#raise NotImplementedError()
if feed not in ['console', 'chat', 'connections']:
raise NotImplementedError(
'Subscribing to feed \'%s\' is not supported.' % feed)
url = self.__createStreamURL(feed)
stream = self.__createsocket()
stream.write(url)
stream.write('\n')
stream.flush()
return stream
def getLoadedMethods(self, active_only=True):
'''
Get all methods recognized by the remote server.
'''
if active_only:
test = lambda x: x.get('enabled', False)
else:
test = lambda x: True
return [a for a in self.__methods if test(a)]
def getMethod(self, name):
'''
get method definition for the provided method name.
If the method is in a name space the namespace must be provided
too, the name having the form "{namespace}_{name}"
'''
method = [m for m in self.__methods if m['method_name'] == name]
if len(method):
return method[0]
else:
return None
if __name__ == '__main__':
# Some basic test code
# Read params
paramDefaults = {'host': 'localhost', 'port':20059, 'username':'admin', 'password':'demo', 'salt':''}
filterFuncs = {'host': str, 'port': int, 'username': str, 'password': str, 'salt': str}
params = {}
for k in paramDefaults.keys():
value = raw_input("%s (%s): " % (k.capitalize(), str(paramDefaults[k])))
if len(value):
params[k] = filterFuncs[k](value)
else:
params[k] = paramDefaults[k]
api = MinecraftJsonApi(
host = params['host'],
port = params['port'],
username = params['username'],
password = params['password'],
salt = params['salt']
)
print([m['method_name'] for m in api.getLoadedMethods()])
print (api.getMethod('kickPlayer'))
x = True
while x:
method = raw_input('>')
print (api.getMethod(method))
method = raw_input('->')
print api.call(method)
|
{
"content_hash": "11ee653afdeccc60d48dc498c6006a3b",
"timestamp": "",
"source": "github",
"line_count": 322,
"max_line_length": 103,
"avg_line_length": 26.322981366459626,
"alnum_prop": 0.6459414818310524,
"repo_name": "alecgorge/jsonapi",
"id": "79ad8bf4221f7b6230e92b55328b4f9c79025557",
"size": "8495",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdk/py/MinecraftApi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "25714"
},
{
"name": "CSS",
"bytes": "11415"
},
{
"name": "CoffeeScript",
"bytes": "7791"
},
{
"name": "HTML",
"bytes": "50561"
},
{
"name": "Java",
"bytes": "723081"
},
{
"name": "JavaScript",
"bytes": "28470"
},
{
"name": "PHP",
"bytes": "11570"
},
{
"name": "Python",
"bytes": "15044"
},
{
"name": "Shell",
"bytes": "1187"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, division, unicode_literals, absolute_import
from future import standard_library
standard_library.install_aliases()
import pytest
import sys
from contextlib import contextmanager
from io import StringIO
from ...utils import nipype_cmd
PY2 = sys.version_info[0] < 3
@contextmanager
def capture_sys_output():
caputure_out, capture_err = StringIO(), StringIO()
current_out, current_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = caputure_out, capture_err
yield caputure_out, capture_err
finally:
sys.stdout, sys.stderr = current_out, current_err
class TestNipypeCMD():
maxDiff = None
def test_main_returns_2_on_empty(self):
with pytest.raises(SystemExit) as cm:
with capture_sys_output() as (stdout, stderr):
nipype_cmd.main(['nipype_cmd'])
exit_exception = cm.value
assert exit_exception.code == 2
msg = """usage: nipype_cmd [-h] module interface
nipype_cmd: error: the following arguments are required: module, interface
"""
if PY2:
msg = """usage: nipype_cmd [-h] module interface
nipype_cmd: error: too few arguments
"""
assert stderr.getvalue() == msg
assert stdout.getvalue() == ''
def test_main_returns_0_on_help(self):
with pytest.raises(SystemExit) as cm:
with capture_sys_output() as (stdout, stderr):
nipype_cmd.main(['nipype_cmd', '-h'])
exit_exception = cm.value
assert exit_exception.code == 0
assert stderr.getvalue() == ''
assert stdout.getvalue() == \
"""usage: nipype_cmd [-h] module interface
Nipype interface runner
positional arguments:
module Module name
interface Interface name
optional arguments:
-h, --help show this help message and exit
"""
def test_list_nipy_interfacesp(self):
with pytest.raises(SystemExit) as cm:
with capture_sys_output() as (stdout, stderr):
nipype_cmd.main(['nipype_cmd', 'nipype.interfaces.nipy'])
# repeat twice in case nipy raises warnings
with pytest.raises(SystemExit) as cm:
with capture_sys_output() as (stdout, stderr):
nipype_cmd.main(['nipype_cmd', 'nipype.interfaces.nipy'])
exit_exception = cm.value
assert exit_exception.code == 0
assert stderr.getvalue() == ''
assert stdout.getvalue() == \
"""Available Interfaces:
ComputeMask
EstimateContrast
FitGLM
FmriRealign4d
Similarity
SpaceTimeRealigner
"""
def test_run_4d_realign_without_arguments(self):
with pytest.raises(SystemExit) as cm:
with capture_sys_output() as (stdout, stderr):
nipype_cmd.main(['nipype_cmd', 'nipype.interfaces.nipy', 'FmriRealign4d'])
exit_exception = cm.value
assert exit_exception.code == 2
error_message = """usage: nipype_cmd nipype.interfaces.nipy FmriRealign4d [-h]
[--between_loops [BETWEEN_LOOPS [BETWEEN_LOOPS ...]]]
[--ignore_exception]
[--loops [LOOPS [LOOPS ...]]]
[--slice_order SLICE_ORDER]
[--speedup [SPEEDUP [SPEEDUP ...]]]
[--start START]
[--time_interp TIME_INTERP]
[--tr_slices TR_SLICES]
in_file [in_file ...]
tr"""
if not PY2:
error_message += """
nipype_cmd nipype.interfaces.nipy FmriRealign4d: error: the following arguments are required: in_file, tr
"""
else:
error_message += """
nipype_cmd nipype.interfaces.nipy FmriRealign4d: error: too few arguments
"""
assert stderr.getvalue() == error_message
assert stdout.getvalue() == ''
def test_run_4d_realign_help(self):
with pytest.raises(SystemExit) as cm:
with capture_sys_output() as (stdout, stderr):
nipype_cmd.main(['nipype_cmd', 'nipype.interfaces.nipy', 'FmriRealign4d', '-h'])
exit_exception = cm.value
assert exit_exception.code == 0
assert stderr.getvalue() == ''
assert "Run FmriRealign4d" in stdout.getvalue()
|
{
"content_hash": "4911733a59f6d5cd7fcb531fc54ca13d",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 108,
"avg_line_length": 34.0514705882353,
"alnum_prop": 0.5577629021809545,
"repo_name": "mick-d/nipype",
"id": "315d55441f7984a27cb6e2c6475acd0181682ee6",
"size": "4653",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nipype/utils/tests/test_cmd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "1854"
},
{
"name": "Matlab",
"bytes": "1999"
},
{
"name": "Python",
"bytes": "4607773"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
from pipeline.component.hetero_linr import HeteroLinR
a = HeteroLinR(name="hetero_linr_0", early_stop="weight_diff",
stepwise_param={"max_step": 3, "need_stepwise": True})
print(a.output.data)
print(a.output.model)
|
{
"content_hash": "86e14e082d91986086f69770b6b41230",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 69,
"avg_line_length": 29.125,
"alnum_prop": 0.6952789699570815,
"repo_name": "FederatedAI/FATE",
"id": "6d64f604298de6383d79fd77eb7b9513d5e43de0",
"size": "851",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/fate_client/pipeline/test/test_hetero_linr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Lua",
"bytes": "19716"
},
{
"name": "Python",
"bytes": "5121767"
},
{
"name": "Rust",
"bytes": "3971"
},
{
"name": "Shell",
"bytes": "19676"
}
],
"symlink_target": ""
}
|
import warnings
from oscar.utils.deprecation import RemovedInOscar21Warning
def deprecated(obj):
if isinstance(obj, type):
return _deprecated_cls(cls=obj)
else:
return _deprecated_func(f=obj)
def _deprecated_func(f, warn_cls=RemovedInOscar21Warning):
def _deprecated(*args, **kwargs):
message = "Method '%s' is deprecated and will be " \
"removed in the next version of django-oscar" \
% f.__name__
warnings.warn(message, warn_cls, stacklevel=2)
return f(*args, **kwargs)
return _deprecated
def _deprecated_cls(cls, warn_cls=RemovedInOscar21Warning):
class Deprecated(cls):
def __init__(self, *args, **kwargs):
message = "Class '%s' is deprecated and will be " \
"removed in the next version of django-oscar" \
% cls.__name__
warnings.warn(message, warn_cls, stacklevel=2)
super().__init__(*args, **kwargs)
return Deprecated
|
{
"content_hash": "fc7c58ec03eca164ac9b0f0872ff60ce",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 63,
"avg_line_length": 32.12903225806452,
"alnum_prop": 0.6124497991967871,
"repo_name": "sasha0/django-oscar",
"id": "64b61c0ddec13bd06a6598c52b70aee0873faf1f",
"size": "996",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/oscar/core/decorators.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "387941"
},
{
"name": "Dockerfile",
"bytes": "544"
},
{
"name": "HTML",
"bytes": "518624"
},
{
"name": "JavaScript",
"bytes": "344864"
},
{
"name": "Makefile",
"bytes": "4290"
},
{
"name": "Python",
"bytes": "1957797"
},
{
"name": "Shell",
"bytes": "1643"
}
],
"symlink_target": ""
}
|
def _get_data_from_xml(doclist, fieldname, nohitreturn=None):
"""Get the fieldname (i.e. author, title etc)
from minidom.parseString().childNodes[0].childNodes list
"""
result = []
for element in doclist:
try:
fields = element[fieldname]
except KeyError:
fields = [nohitreturn]
result.append(fields)
return result
|
{
"content_hash": "4097fc9e3dd2c2f2268e120e5e637770",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 61,
"avg_line_length": 32.166666666666664,
"alnum_prop": 0.6139896373056994,
"repo_name": "imbasimba/astroquery",
"id": "c4e1cbb59efce40ccd5a79c2a23c043fbb2f29ed",
"size": "388",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "astroquery/nasa_ads/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "493404"
},
{
"name": "Python",
"bytes": "2852847"
}
],
"symlink_target": ""
}
|
"""Testable usage examples for Google BigQuery API wrapper
Each example function takes a ``client`` argument (which must be an instance
of :class:`google.cloud.bigquery.client.Client`) and uses it to perform a task
with the API.
To facilitate running the examples as system tests, each example is also passed
a ``to_delete`` list; the function adds to the list any objects created which
need to be deleted during teardown.
"""
import os
import time
import mock
import pytest
import six
try:
import pandas
except (ImportError, AttributeError):
pandas = None
try:
import pyarrow
except (ImportError, AttributeError):
pyarrow = None
from google.api_core import datetime_helpers
from google.api_core.exceptions import InternalServerError
from google.api_core.exceptions import ServiceUnavailable
from google.api_core.exceptions import TooManyRequests
from google.cloud import bigquery
from google.cloud import storage
from test_utils.retry import RetryErrors
ORIGINAL_FRIENDLY_NAME = 'Original friendly name'
ORIGINAL_DESCRIPTION = 'Original description'
LOCALLY_CHANGED_FRIENDLY_NAME = 'Locally-changed friendly name'
LOCALLY_CHANGED_DESCRIPTION = 'Locally-changed description'
UPDATED_FRIENDLY_NAME = 'Updated friendly name'
UPDATED_DESCRIPTION = 'Updated description'
SCHEMA = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
ROWS = [
('Phred Phlyntstone', 32),
('Bharney Rhubble', 33),
('Wylma Phlyntstone', 29),
('Bhettye Rhubble', 27),
]
QUERY = (
'SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` '
'WHERE state = "TX"')
retry_429 = RetryErrors(TooManyRequests)
retry_storage_errors = RetryErrors(
(TooManyRequests, InternalServerError, ServiceUnavailable))
@pytest.fixture(scope='module')
def client():
return bigquery.Client()
@pytest.fixture
def to_delete(client):
doomed = []
yield doomed
for item in doomed:
if isinstance(item, (bigquery.Dataset, bigquery.DatasetReference)):
retry_429(client.delete_dataset)(item, delete_contents=True)
elif isinstance(item, storage.Bucket):
retry_storage_errors(item.delete)()
else:
retry_429(item.delete)()
def _millis():
return int(time.time() * 1000)
class _CloseOnDelete(object):
def __init__(self, wrapped):
self._wrapped = wrapped
def delete(self):
self._wrapped.close()
def test_create_client_default_credentials():
"""Create a BigQuery client with Application Default Credentials"""
# [START bigquery_client_default_credentials]
from google.cloud import bigquery
# If you don't specify credentials when constructing the client, the
# client library will look for credentials in the environment.
client = bigquery.Client()
# [END bigquery_client_default_credentials]
assert client is not None
def test_create_client_json_credentials():
"""Create a BigQuery client with Application Default Credentials"""
with open(os.environ['GOOGLE_APPLICATION_CREDENTIALS']) as creds_file:
creds_file_data = creds_file.read()
open_mock = mock.mock_open(read_data=creds_file_data)
with mock.patch('io.open', open_mock):
# [START bigquery_client_json_credentials]
from google.cloud import bigquery
# Explicitly use service account credentials by specifying the private
# key file. All clients in google-cloud-python have this helper.
client = bigquery.Client.from_service_account_json(
'path/to/service_account.json')
# [END bigquery_client_json_credentials]
assert client is not None
def test_list_datasets(client):
"""List datasets for a project."""
# [START bigquery_list_datasets]
# from google.cloud import bigquery
# client = bigquery.Client()
datasets = list(client.list_datasets())
project = client.project
if datasets:
print('Datasets in project {}:'.format(project))
for dataset in datasets: # API request(s)
print('\t{}'.format(dataset.dataset_id))
else:
print('{} project does not contain any datasets.'.format(project))
# [END bigquery_list_datasets]
def test_list_datasets_by_label(client, to_delete):
dataset_id = 'list_datasets_by_label_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.labels = {'color': 'green'}
dataset = client.create_dataset(dataset) # API request
to_delete.append(dataset)
# [START bigquery_list_datasets_by_label]
# from google.cloud import bigquery
# client = bigquery.Client()
# The following label filter example will find datasets with an
# arbitrary 'color' label set to 'green'
label_filter = 'labels.color:green'
datasets = list(client.list_datasets(filter=label_filter))
if datasets:
print('Datasets filtered by {}:'.format(label_filter))
for dataset in datasets: # API request(s)
print('\t{}'.format(dataset.dataset_id))
else:
print('No datasets found with this filter.')
# [END bigquery_list_datasets_by_label]
found = set([dataset.dataset_id for dataset in datasets])
assert dataset_id in found
def test_create_dataset(client, to_delete):
"""Create a dataset."""
dataset_id = 'create_dataset_{}'.format(_millis())
# [START bigquery_create_dataset]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# Create a DatasetReference using a chosen dataset ID.
# The project defaults to the Client's project if not specified.
dataset_ref = client.dataset(dataset_id)
# Construct a full Dataset object to send to the API.
dataset = bigquery.Dataset(dataset_ref)
# Specify the geographic location where the dataset should reside.
dataset.location = 'US'
# Send the dataset to the API for creation.
# Raises google.api_core.exceptions.AlreadyExists if the Dataset already
# exists within the project.
dataset = client.create_dataset(dataset) # API request
# [END bigquery_create_dataset]
to_delete.append(dataset)
def test_get_dataset_information(client, to_delete):
"""View information about a dataset."""
dataset_id = 'get_dataset_{}'.format(_millis())
dataset_labels = {'color': 'green'}
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.description = ORIGINAL_DESCRIPTION
dataset.labels = dataset_labels
dataset = client.create_dataset(dataset) # API request
to_delete.append(dataset)
# [START bigquery_get_dataset]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
dataset = client.get_dataset(dataset_ref) # API request
# View dataset properties
print('Dataset ID: {}'.format(dataset_id))
print('Description: {}'.format(dataset.description))
print('Labels:')
labels = dataset.labels
if labels:
for label, value in labels.items():
print('\t{}: {}'.format(label, value))
else:
print("\tDataset has no labels defined.")
# View tables in dataset
print('Tables:')
tables = list(client.list_tables(dataset_ref)) # API request(s)
if tables:
for table in tables:
print('\t{}'.format(table.table_id))
else:
print('\tThis dataset does not contain any tables.')
# [END bigquery_get_dataset]
assert dataset.description == ORIGINAL_DESCRIPTION
assert dataset.labels == dataset_labels
assert tables == []
# [START bigquery_dataset_exists]
def dataset_exists(client, dataset_reference):
"""Return if a dataset exists.
Args:
client (google.cloud.bigquery.client.Client):
A client to connect to the BigQuery API.
dataset_reference (google.cloud.bigquery.dataset.DatasetReference):
A reference to the dataset to look for.
Returns:
bool: ``True`` if the dataset exists, ``False`` otherwise.
"""
from google.cloud.exceptions import NotFound
try:
client.get_dataset(dataset_reference)
return True
except NotFound:
return False
# [END bigquery_dataset_exists]
def test_dataset_exists(client, to_delete):
"""Determine if a dataset exists."""
DATASET_ID = 'get_table_dataset_{}'.format(_millis())
dataset_ref = client.dataset(DATASET_ID)
dataset = bigquery.Dataset(dataset_ref)
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
assert dataset_exists(client, dataset_ref)
assert not dataset_exists(client, client.dataset('i_dont_exist'))
@pytest.mark.skip(reason=(
'update_dataset() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5588'))
def test_update_dataset_description(client, to_delete):
"""Update a dataset's description."""
dataset_id = 'update_dataset_description_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.description = 'Original description.'
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_update_dataset_description]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# dataset = client.get_dataset(dataset_ref) # API request
assert dataset.description == 'Original description.'
dataset.description = 'Updated description.'
dataset = client.update_dataset(dataset, ['description']) # API request
assert dataset.description == 'Updated description.'
# [END bigquery_update_dataset_description]
@pytest.mark.skip(reason=(
'update_dataset() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5588'))
def test_update_dataset_default_table_expiration(client, to_delete):
"""Update a dataset's default table expiration."""
dataset_id = 'update_dataset_default_expiration_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_update_dataset_expiration]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# dataset = client.get_dataset(dataset_ref) # API request
assert dataset.default_table_expiration_ms is None
one_day_ms = 24 * 60 * 60 * 1000 # in milliseconds
dataset.default_table_expiration_ms = one_day_ms
dataset = client.update_dataset(
dataset, ['default_table_expiration_ms']) # API request
assert dataset.default_table_expiration_ms == one_day_ms
# [END bigquery_update_dataset_expiration]
@pytest.mark.skip(reason=(
'update_dataset() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5588'))
def test_manage_dataset_labels(client, to_delete):
dataset_id = 'label_dataset_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_label_dataset]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# dataset = client.get_dataset(dataset_ref) # API request
assert dataset.labels == {}
labels = {'color': 'green'}
dataset.labels = labels
dataset = client.update_dataset(dataset, ['labels']) # API request
assert dataset.labels == labels
# [END bigquery_label_dataset]
# [START bigquery_get_dataset_labels]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
dataset = client.get_dataset(dataset_ref) # API request
# View dataset labels
print('Dataset ID: {}'.format(dataset_id))
print('Labels:')
if dataset.labels:
for label, value in dataset.labels.items():
print('\t{}: {}'.format(label, value))
else:
print("\tDataset has no labels defined.")
# [END bigquery_get_dataset_labels]
assert dataset.labels == labels
# [START bigquery_delete_label_dataset]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# dataset = client.get_dataset(dataset_ref) # API request
# This example dataset starts with one label
assert dataset.labels == {'color': 'green'}
# To delete a label from a dataset, set its value to None
dataset.labels['color'] = None
dataset = client.update_dataset(dataset, ['labels']) # API request
assert dataset.labels == {}
# [END bigquery_delete_label_dataset]
@pytest.mark.skip(reason=(
'update_dataset() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5588'))
def test_update_dataset_access(client, to_delete):
"""Update a dataset's access controls."""
dataset_id = 'update_dataset_access_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_update_dataset_access]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset = client.get_dataset(client.dataset('my_dataset'))
entry = bigquery.AccessEntry(
role='READER',
entity_type='userByEmail',
entity_id='sample.bigquery.dev@gmail.com')
assert entry not in dataset.access_entries
entries = list(dataset.access_entries)
entries.append(entry)
dataset.access_entries = entries
dataset = client.update_dataset(dataset, ['access_entries']) # API request
assert entry in dataset.access_entries
# [END bigquery_update_dataset_access]
def test_delete_dataset(client):
"""Delete a dataset."""
from google.cloud.exceptions import NotFound
dataset1_id = 'delete_dataset_{}'.format(_millis())
dataset1 = bigquery.Dataset(client.dataset(dataset1_id))
client.create_dataset(dataset1)
dataset2_id = 'delete_dataset_with_tables{}'.format(_millis())
dataset2 = bigquery.Dataset(client.dataset(dataset2_id))
client.create_dataset(dataset2)
table = bigquery.Table(dataset2.table('new_table'))
client.create_table(table)
# [START bigquery_delete_dataset]
# from google.cloud import bigquery
# client = bigquery.Client()
# Delete a dataset that does not contain any tables
# dataset1_id = 'my_empty_dataset'
dataset1_ref = client.dataset(dataset1_id)
client.delete_dataset(dataset1_ref) # API request
print('Dataset {} deleted.'.format(dataset1_id))
# Use the delete_contents parameter to delete a dataset and its contents
# dataset2_id = 'my_dataset_with_tables'
dataset2_ref = client.dataset(dataset2_id)
client.delete_dataset(dataset2_ref, delete_contents=True) # API request
print('Dataset {} deleted.'.format(dataset2_id))
# [END bigquery_delete_dataset]
for dataset in [dataset1, dataset2]:
with pytest.raises(NotFound):
client.get_dataset(dataset) # API request
def test_list_tables(client, to_delete):
"""List tables within a dataset."""
dataset_id = 'list_tables_dataset_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = client.create_dataset(bigquery.Dataset(dataset_ref))
to_delete.append(dataset)
# [START bigquery_list_tables]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
tables = list(client.list_tables(dataset_ref)) # API request(s)
assert len(tables) == 0
table_ref = dataset.table('my_table')
table = bigquery.Table(table_ref)
client.create_table(table) # API request
tables = list(client.list_tables(dataset)) # API request(s)
assert len(tables) == 1
assert tables[0].table_id == 'my_table'
# [END bigquery_list_tables]
def test_create_table(client, to_delete):
"""Create a table."""
dataset_id = 'create_table_dataset_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_create_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
table_ref = dataset_ref.table('my_table')
table = bigquery.Table(table_ref, schema=schema)
table = client.create_table(table) # API request
assert table.table_id == 'my_table'
# [END bigquery_create_table]
def test_create_table_nested_repeated_schema(client, to_delete):
dataset_id = 'create_table_nested_repeated_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_nested_repeated_schema]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
schema = [
bigquery.SchemaField('id', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('first_name', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('last_name', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('dob', 'DATE', mode='NULLABLE'),
bigquery.SchemaField('addresses', 'RECORD', mode='REPEATED', fields=[
bigquery.SchemaField('status', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('address', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('city', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('state', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('zip', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('numberOfYears', 'STRING', mode='NULLABLE'),
]),
]
table_ref = dataset_ref.table('my_table')
table = bigquery.Table(table_ref, schema=schema)
table = client.create_table(table) # API request
print('Created table {}'.format(table.full_table_id))
# [END bigquery_nested_repeated_schema]
def test_create_table_cmek(client, to_delete):
dataset_id = 'create_table_cmek_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_create_table_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
table_ref = client.dataset(dataset_id).table('my_table')
table = bigquery.Table(table_ref)
# Set the encryption key to use for the table.
# TODO: Replace this key with a key you have created in Cloud KMS.
kms_key_name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
'cloud-samples-tests', 'us-central1', 'test', 'test')
table.encryption_configuration = bigquery.EncryptionConfiguration(
kms_key_name=kms_key_name)
table = client.create_table(table) # API request
assert table.encryption_configuration.kms_key_name == kms_key_name
# [END bigquery_create_table_cmek]
def test_create_partitioned_table(client, to_delete):
dataset_id = 'create_table_partitioned_{}'.format(_millis())
dataset_ref = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset_ref)
to_delete.append(dataset)
# [START bigquery_create_table_partitioned]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
table_ref = dataset_ref.table('my_partitioned_table')
schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING'),
bigquery.SchemaField('date', 'DATE')
]
table = bigquery.Table(table_ref, schema=schema)
table.time_partitioning = bigquery.TimePartitioning(
type_=bigquery.TimePartitioningType.DAY,
field='date', # name of column to use for partitioning
expiration_ms=7776000000) # 90 days
table = client.create_table(table)
print('Created table {}, partitioned on column {}'.format(
table.table_id, table.time_partitioning.field))
# [END bigquery_create_table_partitioned]
assert table.time_partitioning.type_ == 'DAY'
assert table.time_partitioning.field == 'date'
assert table.time_partitioning.expiration_ms == 7776000000
def test_load_and_query_partitioned_table(client, to_delete):
dataset_id = 'load_partitioned_table_dataset_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_partitioned]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
table_id = 'us_states_by_date'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING'),
bigquery.SchemaField('date', 'DATE')
]
job_config.skip_leading_rows = 1
job_config.time_partitioning = bigquery.TimePartitioning(
type_=bigquery.TimePartitioningType.DAY,
field='date', # name of column to use for partitioning
expiration_ms=7776000000) # 90 days
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states-by-date.csv'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table(table_id),
job_config=job_config) # API request
assert load_job.job_type == 'load'
load_job.result() # Waits for table load to complete.
table = client.get_table(dataset_ref.table(table_id))
print("Loaded {} rows to table {}".format(table.num_rows, table_id))
# [END bigquery_load_table_partitioned]
assert table.num_rows == 50
project_id = client.project
# [START bigquery_query_partitioned_table]
import datetime
# from google.cloud import bigquery
# client = bigquery.Client()
# project_id = 'my-project'
# dataset_id = 'my_dataset'
table_id = 'us_states_by_date'
sql_template = """
SELECT *
FROM `{}.{}.{}`
WHERE date BETWEEN @start_date AND @end_date
"""
sql = sql_template.format(project_id, dataset_id, table_id)
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = [
bigquery.ScalarQueryParameter(
'start_date',
'DATE',
datetime.date(1800, 1, 1)
),
bigquery.ScalarQueryParameter(
'end_date',
'DATE',
datetime.date(1899, 12, 31)
)
]
query_job = client.query(
sql,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request
rows = list(query_job)
print("{} states were admitted to the US in the 1800s".format(len(rows)))
# [END bigquery_query_partitioned_table]
assert len(rows) == 29
def test_get_table_information(client, to_delete):
"""Show a table's properties."""
dataset_id = 'show_table_dataset_{}'.format(_millis())
table_id = 'show_table_table_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table.description = ORIGINAL_DESCRIPTION
table = client.create_table(table)
# [START bigquery_get_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref) # API Request
# View table properties
print(table.schema)
print(table.description)
print(table.num_rows)
# [END bigquery_get_table]
assert table.schema == SCHEMA
assert table.description == ORIGINAL_DESCRIPTION
assert table.num_rows == 0
# [START bigquery_table_exists]
def table_exists(client, table_reference):
"""Return if a table exists.
Args:
client (google.cloud.bigquery.client.Client):
A client to connect to the BigQuery API.
table_reference (google.cloud.bigquery.table.TableReference):
A reference to the table to look for.
Returns:
bool: ``True`` if the table exists, ``False`` otherwise.
"""
from google.cloud.exceptions import NotFound
try:
client.get_table(table_reference)
return True
except NotFound:
return False
# [END bigquery_table_exists]
def test_table_exists(client, to_delete):
"""Determine if a table exists."""
DATASET_ID = 'get_table_dataset_{}'.format(_millis())
TABLE_ID = 'get_table_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(DATASET_ID))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table_ref = dataset.table(TABLE_ID)
table = bigquery.Table(table_ref, schema=SCHEMA)
table = client.create_table(table)
assert table_exists(client, table_ref)
assert not table_exists(client, dataset.table('i_dont_exist'))
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_manage_table_labels(client, to_delete):
dataset_id = 'label_table_dataset_{}'.format(_millis())
table_id = 'label_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table = client.create_table(table)
# [START bigquery_label_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
assert table.labels == {}
labels = {'color': 'green'}
table.labels = labels
table = client.update_table(table, ['labels']) # API request
assert table.labels == labels
# [END bigquery_label_table]
# [START bigquery_get_table_labels]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref) # API Request
# View table labels
print('Table ID: {}'.format(table_id))
print('Labels:')
if table.labels:
for label, value in table.labels.items():
print('\t{}: {}'.format(label, value))
else:
print("\tTable has no labels defined.")
# [END bigquery_get_table_labels]
assert table.labels == labels
# [START bigquery_delete_label_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
# This example table starts with one label
assert table.labels == {'color': 'green'}
# To delete a label from a table, set its value to None
table.labels['color'] = None
table = client.update_table(table, ['labels']) # API request
assert table.labels == {}
# [END bigquery_delete_label_table]
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_update_table_description(client, to_delete):
"""Update a table's description."""
dataset_id = 'update_table_description_dataset_{}'.format(_millis())
table_id = 'update_table_description_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table.description = 'Original description.'
table = client.create_table(table)
# [START bigquery_update_table_description]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
assert table.description == 'Original description.'
table.description = 'Updated description.'
table = client.update_table(table, ['description']) # API request
assert table.description == 'Updated description.'
# [END bigquery_update_table_description]
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_update_table_expiration(client, to_delete):
"""Update a table's expiration time."""
dataset_id = 'update_table_expiration_dataset_{}'.format(_millis())
table_id = 'update_table_expiration_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table = client.create_table(table)
# [START bigquery_update_table_expiration]
import datetime
import pytz
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('my_table')
# table = client.get_table(table_ref) # API request
assert table.expires is None
# set table to expire 5 days from now
expiration = datetime.datetime.now(pytz.utc) + datetime.timedelta(days=5)
table.expires = expiration
table = client.update_table(table, ['expires']) # API request
# expiration is stored in milliseconds
margin = datetime.timedelta(microseconds=1000)
assert expiration - margin <= table.expires <= expiration + margin
# [END bigquery_update_table_expiration]
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_add_empty_column(client, to_delete):
"""Adds an empty column to an existing table."""
dataset_id = 'add_empty_column_dataset_{}'.format(_millis())
table_id = 'add_empty_column_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table = client.create_table(table)
# [START bigquery_add_empty_column]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
table_ref = client.dataset(dataset_id).table(table_id)
table = client.get_table(table_ref) # API request
original_schema = table.schema
new_schema = original_schema[:] # creates a copy of the schema
new_schema.append(bigquery.SchemaField('phone', 'STRING'))
table.schema = new_schema
table = client.update_table(table, ['schema']) # API request
assert len(table.schema) == len(original_schema) + 1 == len(new_schema)
# [END bigquery_add_empty_column]
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_relax_column(client, to_delete):
"""Updates a schema field from required to nullable."""
dataset_id = 'relax_column_dataset_{}'.format(_millis())
table_id = 'relax_column_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_relax_column]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
original_schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
table_ref = client.dataset(dataset_id).table(table_id)
table = bigquery.Table(table_ref, schema=original_schema)
table = client.create_table(table)
assert all(field.mode == 'REQUIRED' for field in table.schema)
# SchemaField properties cannot be edited after initialization.
# To make changes, construct new SchemaField objects.
relaxed_schema = [
bigquery.SchemaField('full_name', 'STRING', mode='NULLABLE'),
bigquery.SchemaField('age', 'INTEGER', mode='NULLABLE'),
]
table.schema = relaxed_schema
table = client.update_table(table, ['schema'])
assert all(field.mode == 'NULLABLE' for field in table.schema)
# [END bigquery_relax_column]
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_update_table_cmek(client, to_delete):
"""Patch a table's metadata."""
dataset_id = 'update_table_cmek_{}'.format(_millis())
table_id = 'update_table_cmek_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id))
original_kms_key_name = (
'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
'cloud-samples-tests', 'us-central1', 'test', 'test'))
table.encryption_configuration = bigquery.EncryptionConfiguration(
kms_key_name=original_kms_key_name)
table = client.create_table(table)
# [START bigquery_update_table_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
assert table.encryption_configuration.kms_key_name == original_kms_key_name
# Set a new encryption key to use for the destination.
# TODO: Replace this key with a key you have created in KMS.
updated_kms_key_name = (
'projects/cloud-samples-tests/locations/us-central1/'
'keyRings/test/cryptoKeys/otherkey')
table.encryption_configuration = bigquery.EncryptionConfiguration(
kms_key_name=updated_kms_key_name)
table = client.update_table(
table, ['encryption_configuration']) # API request
assert table.encryption_configuration.kms_key_name == updated_kms_key_name
assert original_kms_key_name != updated_kms_key_name
# [END bigquery_update_table_cmek]
def test_browse_table_data(client, to_delete, capsys):
"""Retreive selected row data from a table."""
# [START bigquery_browse_table]
# from google.cloud import bigquery
# client = bigquery.Client()
dataset_ref = client.dataset('samples', project='bigquery-public-data')
table_ref = dataset_ref.table('shakespeare')
table = client.get_table(table_ref) # API call
# Load all rows from a table
rows = client.list_rows(table)
assert len(list(rows)) == table.num_rows
# Load the first 10 rows
rows = client.list_rows(table, max_results=10)
assert len(list(rows)) == 10
# Specify selected fields to limit the results to certain columns
fields = table.schema[:2] # first two columns
rows = client.list_rows(table, selected_fields=fields, max_results=10)
assert len(rows.schema) == 2
assert len(list(rows)) == 10
# Use the start index to load an arbitrary portion of the table
rows = client.list_rows(table, start_index=10, max_results=10)
# Print row data in tabular format
format_string = '{!s:<16} ' * len(rows.schema)
field_names = [field.name for field in rows.schema]
print(format_string.format(*field_names)) # prints column headers
for row in rows:
print(format_string.format(*row)) # prints row data
# [END bigquery_browse_table]
out, err = capsys.readouterr()
out = list(filter(bool, out.split('\n'))) # list of non-blank lines
assert len(out) == 11
@pytest.mark.skip(reason=(
'update_table() is flaky '
'https://github.com/GoogleCloudPlatform/google-cloud-python/issues/5589'))
def test_manage_views(client, to_delete):
project = client.project
source_dataset_id = 'source_dataset_{}'.format(_millis())
source_dataset_ref = client.dataset(source_dataset_id)
source_dataset = bigquery.Dataset(source_dataset_ref)
source_dataset = client.create_dataset(source_dataset)
to_delete.append(source_dataset)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
job_config.skip_leading_rows = 1
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.csv'
source_table_id = 'us_states'
load_job = client.load_table_from_uri(
uri, source_dataset.table(source_table_id), job_config=job_config)
load_job.result()
shared_dataset_id = 'shared_dataset_{}'.format(_millis())
shared_dataset_ref = client.dataset(shared_dataset_id)
shared_dataset = bigquery.Dataset(shared_dataset_ref)
shared_dataset = client.create_dataset(shared_dataset)
to_delete.append(shared_dataset)
# [START bigquery_create_view]
# from google.cloud import bigquery
# client = bigquery.Client()
# project = 'my-project'
# source_dataset_id = 'my_source_dataset'
# source_table_id = 'us_states'
# shared_dataset_ref = client.dataset('my_shared_dataset')
# This example shows how to create a shared view of a source table of
# US States. The source table contains all 50 states, while the view will
# contain only states with names starting with 'W'.
view_ref = shared_dataset_ref.table('my_shared_view')
view = bigquery.Table(view_ref)
sql_template = (
'SELECT name, post_abbr FROM `{}.{}.{}` WHERE name LIKE "W%"')
view.view_query = sql_template.format(
project, source_dataset_id, source_table_id)
view = client.create_table(view) # API request
print('Successfully created view at {}'.format(view.full_table_id))
# [END bigquery_create_view]
# [START bigquery_update_view_query]
# from google.cloud import bigquery
# client = bigquery.Client()
# project = 'my-project'
# source_dataset_id = 'my_source_dataset'
# source_table_id = 'us_states'
# shared_dataset_ref = client.dataset('my_shared_dataset')
# This example shows how to update a shared view of a source table of
# US States. The view's query will be updated to contain only states with
# names starting with 'M'.
view_ref = shared_dataset_ref.table('my_shared_view')
view = bigquery.Table(view_ref)
sql_template = (
'SELECT name, post_abbr FROM `{}.{}.{}` WHERE name LIKE "M%"')
view.view_query = sql_template.format(
project, source_dataset_id, source_table_id)
view = client.update_table(view, ['view_query']) # API request
# [END bigquery_update_view_query]
# [START bigquery_get_view]
# from google.cloud import bigquery
# client = bigquery.Client()
# shared_dataset_id = 'my_shared_dataset'
view_ref = client.dataset(shared_dataset_id).table('my_shared_view')
view = client.get_table(view_ref) # API Request
# Display view properties
print('View at {}'.format(view.full_table_id))
print('View Query:\n{}'.format(view.view_query))
# [END bigquery_get_view]
assert view.view_query is not None
analyst_group_email = 'example-analyst-group@google.com'
# [START bigquery_grant_view_access]
# from google.cloud import bigquery
# client = bigquery.Client()
# Assign access controls to the dataset containing the view
# shared_dataset_id = 'my_shared_dataset'
# analyst_group_email = 'data_analysts@example.com'
shared_dataset = client.get_dataset(
client.dataset(shared_dataset_id)) # API request
access_entries = shared_dataset.access_entries
access_entries.append(
bigquery.AccessEntry('READER', 'groupByEmail', analyst_group_email)
)
shared_dataset.access_entries = access_entries
shared_dataset = client.update_dataset(
shared_dataset, ['access_entries']) # API request
# Authorize the view to access the source dataset
# project = 'my-project'
# source_dataset_id = 'my_source_dataset'
source_dataset = client.get_dataset(
client.dataset(source_dataset_id)) # API request
view_reference = {
'projectId': project,
'datasetId': shared_dataset_id,
'tableId': 'my_shared_view',
}
access_entries = source_dataset.access_entries
access_entries.append(
bigquery.AccessEntry(None, 'view', view_reference)
)
source_dataset.access_entries = access_entries
source_dataset = client.update_dataset(
source_dataset, ['access_entries']) # API request
# [END bigquery_grant_view_access]
def test_table_insert_rows(client, to_delete):
"""Insert / fetch table data."""
dataset_id = 'table_insert_rows_dataset_{}'.format(_millis())
table_id = 'table_insert_rows_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset = client.create_dataset(dataset)
dataset.location = 'US'
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
table = client.create_table(table)
# [START bigquery_table_insert_rows]
# TODO(developer): Uncomment the lines below and replace with your values.
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset' # replace with your dataset ID
# For this sample, the table must already exist and have a defined schema
# table_id = 'my_table' # replace with your table ID
# table_ref = client.dataset(dataset_id).table(table_id)
# table = client.get_table(table_ref) # API request
rows_to_insert = [
(u'Phred Phlyntstone', 32),
(u'Wylma Phlyntstone', 29),
]
errors = client.insert_rows(table, rows_to_insert) # API request
assert errors == []
# [END bigquery_table_insert_rows]
def test_load_table_from_file(client, to_delete):
"""Upload table data from a CSV file."""
dataset_id = 'load_table_from_file_dataset_{}'.format(_millis())
table_id = 'load_table_from_file_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = 'US'
client.create_dataset(dataset)
to_delete.append(dataset)
snippets_dir = os.path.abspath(os.path.dirname(__file__))
filename = os.path.join(
snippets_dir, '..', '..', 'bigquery', 'tests', 'data', 'people.csv')
# [START bigquery_load_from_file]
# from google.cloud import bigquery
# client = bigquery.Client()
# filename = '/path/to/file.csv'
# dataset_id = 'my_dataset'
# table_id = 'my_table'
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
job_config.autodetect = True
with open(filename, 'rb') as source_file:
job = client.load_table_from_file(
source_file,
table_ref,
location='US', # Must match the destination dataset location.
job_config=job_config) # API request
job.result() # Waits for table load to complete.
print('Loaded {} rows into {}:{}.'.format(
job.output_rows, dataset_id, table_id))
# [END bigquery_load_from_file]
table = client.get_table(table_ref)
rows = list(client.list_rows(table)) # API request
assert len(rows) == 2
# Order is not preserved, so compare individually
row1 = bigquery.Row(('Wylma Phlyntstone', 29), {'full_name': 0, 'age': 1})
assert row1 in rows
row2 = bigquery.Row(('Phred Phlyntstone', 32), {'full_name': 0, 'age': 1})
assert row2 in rows
def test_load_table_from_uri_csv(client, to_delete, capsys):
dataset_id = 'load_table_from_uri_csv_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_csv]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
job_config.skip_leading_rows = 1
# The source format defaults to CSV, so the line below is optional.
job_config.source_format = bigquery.SourceFormat.CSV
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.csv'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table('us_states'))
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_csv]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_from_uri_json(client, to_delete, capsys):
dataset_id = 'load_table_from_uri_json_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = 'US'
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_json]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.json'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
location='US', # Location must match that of the destination dataset.
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table('us_states'))
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_json]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_from_uri_cmek(client, to_delete):
dataset_id = 'load_table_from_uri_cmek_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = 'US'
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_json_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
# Set the encryption key to use for the destination.
# TODO: Replace this key with a key you have created in KMS.
kms_key_name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
'cloud-samples-tests', 'us-central1', 'test', 'test')
encryption_config = bigquery.EncryptionConfiguration(
kms_key_name=kms_key_name)
job_config.destination_encryption_configuration = encryption_config
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.json'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
location='US', # Location must match that of the destination dataset.
job_config=job_config) # API request
assert load_job.job_type == 'load'
load_job.result() # Waits for table load to complete.
assert load_job.state == 'DONE'
table = client.get_table(dataset_ref.table('us_states'))
assert table.encryption_configuration.kms_key_name == kms_key_name
# [END bigquery_load_table_gcs_json_cmek]
def test_load_table_from_uri_parquet(client, to_delete, capsys):
dataset_id = 'load_table_from_uri_parquet_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_parquet]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.PARQUET
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.parquet'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table('us_states'))
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_parquet]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_from_uri_orc(client, to_delete, capsys):
dataset_id = 'load_table_from_uri_orc_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_gcs_orc]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.source_format = bigquery.SourceFormat.ORC
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.orc'
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table('us_states'))
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_orc]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_from_uri_autodetect(client, to_delete, capsys):
"""Load table from a GCS URI using various formats and auto-detected schema
Each file format has its own tested load from URI sample. Because most of
the code is common for autodetect, append, and truncate, this sample
includes snippets for all supported formats but only calls a single load
job.
This code snippet is made up of shared code, then format-specific code,
followed by more shared code. Note that only the last format in the
format-specific code section will be tested in this test.
"""
dataset_id = 'load_table_from_uri_auto_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# Shared code
# [START bigquery_load_table_gcs_csv_autodetect]
# [START bigquery_load_table_gcs_json_autodetect]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
job_config = bigquery.LoadJobConfig()
job_config.autodetect = True
# [END bigquery_load_table_gcs_csv_autodetect]
# [END bigquery_load_table_gcs_json_autodetect]
# Format-specific code
# [START bigquery_load_table_gcs_csv_autodetect]
job_config.skip_leading_rows = 1
# The source format defaults to CSV, so the line below is optional.
job_config.source_format = bigquery.SourceFormat.CSV
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.csv'
# [END bigquery_load_table_gcs_csv_autodetect]
# unset csv-specific attribute
del job_config._properties['load']['skipLeadingRows']
# [START bigquery_load_table_gcs_json_autodetect]
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.json'
# [END bigquery_load_table_gcs_json_autodetect]
# Shared code
# [START bigquery_load_table_gcs_csv_autodetect]
# [START bigquery_load_table_gcs_json_autodetect]
load_job = client.load_table_from_uri(
uri,
dataset_ref.table('us_states'),
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(dataset_ref.table('us_states'))
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_csv_autodetect]
# [END bigquery_load_table_gcs_json_autodetect]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_from_uri_truncate(client, to_delete, capsys):
"""Replaces table data with data from a GCS URI using various formats
Each file format has its own tested load from URI sample. Because most of
the code is common for autodetect, append, and truncate, this sample
includes snippets for all supported formats but only calls a single load
job.
This code snippet is made up of shared code, then format-specific code,
followed by more shared code. Note that only the last format in the
format-specific code section will be tested in this test.
"""
dataset_id = 'load_table_from_uri_trunc_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
job_config = bigquery.LoadJobConfig()
job_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
table_ref = dataset.table('us_states')
body = six.BytesIO(b'Washington,WA')
client.load_table_from_file(
body, table_ref, job_config=job_config).result()
# Shared code
# [START bigquery_load_table_gcs_csv_truncate]
# [START bigquery_load_table_gcs_json_truncate]
# [START bigquery_load_table_gcs_parquet_truncate]
# [START bigquery_load_table_gcs_orc_truncate]
# from google.cloud import bigquery
# client = bigquery.Client()
# table_ref = client.dataset('my_dataset').table('existing_table')
previous_rows = client.get_table(table_ref).num_rows
assert previous_rows > 0
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_TRUNCATE
# [END bigquery_load_table_gcs_csv_truncate]
# [END bigquery_load_table_gcs_json_truncate]
# [END bigquery_load_table_gcs_parquet_truncate]
# [END bigquery_load_table_gcs_orc_truncate]
# Format-specific code
# [START bigquery_load_table_gcs_csv_truncate]
job_config.skip_leading_rows = 1
# The source format defaults to CSV, so the line below is optional.
job_config.source_format = bigquery.SourceFormat.CSV
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.csv'
# [END bigquery_load_table_gcs_csv_truncate]
# unset csv-specific attribute
del job_config._properties['load']['skipLeadingRows']
# [START bigquery_load_table_gcs_json_truncate]
job_config.source_format = bigquery.SourceFormat.NEWLINE_DELIMITED_JSON
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.json'
# [END bigquery_load_table_gcs_json_truncate]
# [START bigquery_load_table_gcs_parquet_truncate]
job_config.source_format = bigquery.SourceFormat.PARQUET
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.parquet'
# [END bigquery_load_table_gcs_parquet_truncate]
# [START bigquery_load_table_gcs_orc_truncate]
job_config.source_format = bigquery.SourceFormat.ORC
uri = 'gs://cloud-samples-data/bigquery/us-states/us-states.orc'
# [END bigquery_load_table_gcs_orc_truncate]
# Shared code
# [START bigquery_load_table_gcs_csv_truncate]
# [START bigquery_load_table_gcs_json_truncate]
# [START bigquery_load_table_gcs_parquet_truncate]
# [START bigquery_load_table_gcs_orc_truncate]
load_job = client.load_table_from_uri(
uri,
table_ref,
job_config=job_config) # API request
print('Starting job {}'.format(load_job.job_id))
load_job.result() # Waits for table load to complete.
print('Job finished.')
destination_table = client.get_table(table_ref)
print('Loaded {} rows.'.format(destination_table.num_rows))
# [END bigquery_load_table_gcs_csv_truncate]
# [END bigquery_load_table_gcs_json_truncate]
# [END bigquery_load_table_gcs_parquet_truncate]
# [END bigquery_load_table_gcs_orc_truncate]
out, _ = capsys.readouterr()
assert 'Loaded 50 rows.' in out
def test_load_table_add_column(client, to_delete):
dataset_id = 'load_table_add_column_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
snippets_dir = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(
snippets_dir, '..', '..', 'bigquery', 'tests', 'data', 'people.csv')
table_ref = dataset_ref.table('my_table')
old_schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
]
table = client.create_table(bigquery.Table(table_ref, schema=old_schema))
# [START bigquery_add_column_load_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# filepath = 'path/to/your_file.csv'
# Retrieves the destination table and checks the length of the schema
table_id = 'my_table'
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
print("Table {} contains {} columns.".format(table_id, len(table.schema)))
# Configures the load job to append the data to the destination table,
# allowing field addition
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION,
]
# In this example, the existing table contains only the 'full_name' column.
# 'REQUIRED' fields cannot be added to an existing schema, so the
# additional column must be 'NULLABLE'.
job_config.schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='NULLABLE'),
]
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
with open(filepath, 'rb') as source_file:
job = client.load_table_from_file(
source_file,
table_ref,
location='US', # Must match the destination dataset location.
job_config=job_config) # API request
job.result() # Waits for table load to complete.
print('Loaded {} rows into {}:{}.'.format(
job.output_rows, dataset_id, table_ref.table_id))
# Checks the updated length of the schema
table = client.get_table(table)
print("Table {} now contains {} columns.".format(
table_id, len(table.schema)))
# [END bigquery_add_column_load_append]
assert len(table.schema) == 2
assert table.num_rows > 0
def test_load_table_relax_column(client, to_delete):
dataset_id = 'load_table_relax_column_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
snippets_dir = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(
snippets_dir, '..', '..', 'bigquery', 'tests', 'data', 'people.csv')
table_ref = dataset_ref.table('my_table')
old_schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
bigquery.SchemaField('favorite_color', 'STRING', mode='REQUIRED'),
]
table = client.create_table(bigquery.Table(table_ref, schema=old_schema))
# [START bigquery_relax_column_load_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# filepath = 'path/to/your_file.csv'
# Retrieves the destination table and checks the number of required fields
table_id = 'my_table'
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
original_required_fields = sum(
field.mode == 'REQUIRED' for field in table.schema)
# In this example, the existing table has 3 required fields.
print("{} fields in the schema are required.".format(
original_required_fields))
# Configures the load job to append the data to a destination table,
# allowing field relaxation
job_config = bigquery.LoadJobConfig()
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
]
# In this example, the existing table contains three required fields
# ('full_name', 'age', and 'favorite_color'), while the data to load
# contains only the first two fields.
job_config.schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
job_config.source_format = bigquery.SourceFormat.CSV
job_config.skip_leading_rows = 1
with open(filepath, 'rb') as source_file:
job = client.load_table_from_file(
source_file,
table_ref,
location='US', # Must match the destination dataset location.
job_config=job_config) # API request
job.result() # Waits for table load to complete.
print('Loaded {} rows into {}:{}.'.format(
job.output_rows, dataset_id, table_ref.table_id))
# Checks the updated number of required fields
table = client.get_table(table)
current_required_fields = sum(
field.mode == 'REQUIRED' for field in table.schema)
print("{} fields in the schema are now required.".format(
current_required_fields))
# [END bigquery_relax_column_load_append]
assert original_required_fields - current_required_fields == 1
assert len(table.schema) == 3
assert table.schema[2].mode == 'NULLABLE'
assert table.num_rows > 0
def test_copy_table(client, to_delete):
dataset_id = 'copy_table_dataset_{}'.format(_millis())
dest_dataset = bigquery.Dataset(client.dataset(dataset_id))
dest_dataset.location = 'US'
dest_dataset = client.create_dataset(dest_dataset)
to_delete.append(dest_dataset)
# [START bigquery_copy_table]
# from google.cloud import bigquery
# client = bigquery.Client()
source_dataset = client.dataset('samples', project='bigquery-public-data')
source_table_ref = source_dataset.table('shakespeare')
# dataset_id = 'my_dataset'
dest_table_ref = client.dataset(dataset_id).table('destination_table')
job = client.copy_table(
source_table_ref,
dest_table_ref,
# Location must match that of the source and destination tables.
location='US') # API request
job.result() # Waits for job to complete.
assert job.state == 'DONE'
dest_table = client.get_table(dest_table_ref) # API request
assert dest_table.num_rows > 0
# [END bigquery_copy_table]
def test_copy_table_multiple_source(client, to_delete):
dest_dataset_id = 'dest_dataset_{}'.format(_millis())
dest_dataset = bigquery.Dataset(client.dataset(dest_dataset_id))
dest_dataset.location = 'US'
dest_dataset = client.create_dataset(dest_dataset)
to_delete.append(dest_dataset)
source_dataset_id = 'source_dataset_{}'.format(_millis())
source_dataset = bigquery.Dataset(client.dataset(source_dataset_id))
source_dataset.location = 'US'
source_dataset = client.create_dataset(source_dataset)
to_delete.append(source_dataset)
schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
table_data = {'table1': b'Washington,WA', 'table2': b'California,CA'}
for table_id, data in table_data.items():
table_ref = source_dataset.table(table_id)
job_config = bigquery.LoadJobConfig()
job_config.schema = schema
body = six.BytesIO(data)
client.load_table_from_file(
body,
table_ref,
# Location must match that of the destination dataset.
location='US',
job_config=job_config).result()
# [START bigquery_copy_table_multiple_source]
# from google.cloud import bigquery
# client = bigquery.Client()
# source_dataset_id = 'my_source_dataset'
# dest_dataset_id = 'my_destination_dataset'
table1_ref = client.dataset(source_dataset_id).table('table1')
table2_ref = client.dataset(source_dataset_id).table('table2')
dest_table_ref = client.dataset(dest_dataset_id).table('destination_table')
job = client.copy_table(
[table1_ref, table2_ref],
dest_table_ref,
# Location must match that of the source and destination tables.
location='US') # API request
job.result() # Waits for job to complete.
assert job.state == 'DONE'
dest_table = client.get_table(dest_table_ref) # API request
assert dest_table.num_rows > 0
# [END bigquery_copy_table_multiple_source]
assert dest_table.num_rows == 2
def test_copy_table_cmek(client, to_delete):
dataset_id = 'copy_table_cmek_{}'.format(_millis())
dest_dataset = bigquery.Dataset(client.dataset(dataset_id))
dest_dataset.location = 'US'
dest_dataset = client.create_dataset(dest_dataset)
to_delete.append(dest_dataset)
# [START bigquery_copy_table_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
source_dataset = bigquery.DatasetReference(
'bigquery-public-data', 'samples')
source_table_ref = source_dataset.table('shakespeare')
# dataset_id = 'my_dataset'
dest_dataset_ref = client.dataset(dataset_id)
dest_table_ref = dest_dataset_ref.table('destination_table')
# Set the encryption key to use for the destination.
# TODO: Replace this key with a key you have created in KMS.
kms_key_name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
'cloud-samples-tests', 'us-central1', 'test', 'test')
encryption_config = bigquery.EncryptionConfiguration(
kms_key_name=kms_key_name)
job_config = bigquery.CopyJobConfig()
job_config.destination_encryption_configuration = encryption_config
job = client.copy_table(
source_table_ref,
dest_table_ref,
# Location must match that of the source and destination tables.
location='US',
job_config=job_config) # API request
job.result() # Waits for job to complete.
assert job.state == 'DONE'
dest_table = client.get_table(dest_table_ref)
assert dest_table.encryption_configuration.kms_key_name == kms_key_name
# [END bigquery_copy_table_cmek]
def test_extract_table(client, to_delete):
bucket_name = 'extract_shakespeare_{}'.format(_millis())
storage_client = storage.Client()
bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
to_delete.append(bucket)
# [START bigquery_extract_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# bucket_name = 'my-bucket'
project = 'bigquery-public-data'
dataset_id = 'samples'
table_id = 'shakespeare'
destination_uri = 'gs://{}/{}'.format(bucket_name, 'shakespeare.csv')
dataset_ref = client.dataset(dataset_id, project=project)
table_ref = dataset_ref.table(table_id)
extract_job = client.extract_table(
table_ref,
destination_uri,
# Location must match that of the source table.
location='US') # API request
extract_job.result() # Waits for job to complete.
print('Exported {}:{}.{} to {}'.format(
project, dataset_id, table_id, destination_uri))
# [END bigquery_extract_table]
blob = retry_storage_errors(bucket.get_blob)('shakespeare.csv')
assert blob.exists
assert blob.size > 0
to_delete.insert(0, blob)
def test_extract_table_json(client, to_delete):
bucket_name = 'extract_shakespeare_json_{}'.format(_millis())
storage_client = storage.Client()
bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
to_delete.append(bucket)
# [START bigquery_extract_table_json]
# from google.cloud import bigquery
# client = bigquery.Client()
# bucket_name = 'my-bucket'
destination_uri = 'gs://{}/{}'.format(bucket_name, 'shakespeare.json')
dataset_ref = client.dataset('samples', project='bigquery-public-data')
table_ref = dataset_ref.table('shakespeare')
job_config = bigquery.job.ExtractJobConfig()
job_config.destination_format = (
bigquery.DestinationFormat.NEWLINE_DELIMITED_JSON)
extract_job = client.extract_table(
table_ref,
destination_uri,
job_config=job_config,
# Location must match that of the source table.
location='US') # API request
extract_job.result() # Waits for job to complete.
# [END bigquery_extract_table_json]
blob = retry_storage_errors(bucket.get_blob)('shakespeare.json')
assert blob.exists
assert blob.size > 0
to_delete.insert(0, blob)
def test_extract_table_compressed(client, to_delete):
bucket_name = 'extract_shakespeare_compress_{}'.format(_millis())
storage_client = storage.Client()
bucket = retry_storage_errors(storage_client.create_bucket)(bucket_name)
to_delete.append(bucket)
# [START bigquery_extract_table_compressed]
# from google.cloud import bigquery
# client = bigquery.Client()
# bucket_name = 'my-bucket'
destination_uri = 'gs://{}/{}'.format(bucket_name, 'shakespeare.csv.gz')
dataset_ref = client.dataset('samples', project='bigquery-public-data')
table_ref = dataset_ref.table('shakespeare')
job_config = bigquery.job.ExtractJobConfig()
job_config.compression = bigquery.Compression.GZIP
extract_job = client.extract_table(
table_ref,
destination_uri,
# Location must match that of the source table.
location='US',
job_config=job_config) # API request
extract_job.result() # Waits for job to complete.
# [END bigquery_extract_table_compressed]
blob = retry_storage_errors(bucket.get_blob)('shakespeare.csv.gz')
assert blob.exists
assert blob.size > 0
to_delete.insert(0, blob)
def test_delete_table(client, to_delete):
"""Delete a table."""
from google.cloud.exceptions import NotFound
dataset_id = 'delete_table_dataset_{}'.format(_millis())
table_id = 'delete_table_table_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table_ref = dataset.table(table_id)
table = bigquery.Table(table_ref, schema=SCHEMA)
client.create_table(table)
# [START bigquery_delete_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# table_id = 'my_table'
table_ref = client.dataset(dataset_id).table(table_id)
client.delete_table(table_ref) # API request
print('Table {}:{} deleted.'.format(dataset_id, table_id))
# [END bigquery_delete_table]
with pytest.raises(NotFound):
client.get_table(table) # API request
def test_undelete_table(client, to_delete):
dataset_id = 'undelete_table_dataset_{}'.format(_millis())
table_id = 'undelete_table_table_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table = bigquery.Table(dataset.table(table_id), schema=SCHEMA)
client.create_table(table)
# [START bigquery_undelete_table]
# TODO(developer): Uncomment the lines below and replace with your values.
# import time
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset' # Replace with your dataset ID.
# table_id = 'my_table' # Replace with your table ID.
table_ref = client.dataset(dataset_id).table(table_id)
# TODO(developer): Choose an appropriate snapshot point as epoch
# milliseconds. For this example, we choose the current time as we're about
# to delete the table immediately afterwards.
snapshot_epoch = int(time.time() * 1000)
# [END bigquery_undelete_table]
# Due to very short lifecycle of the table, ensure we're not picking a time
# prior to the table creation due to time drift between backend and client.
table = client.get_table(table_ref)
created_epoch = datetime_helpers.to_microseconds(table.created)
if created_epoch > snapshot_epoch:
snapshot_epoch = created_epoch
# [START bigquery_undelete_table]
# "Accidentally" delete the table.
client.delete_table(table_ref) # API request
# Construct the restore-from table ID using a snapshot decorator.
snapshot_table_id = '{}@{}'.format(table_id, snapshot_epoch)
source_table_ref = client.dataset(dataset_id).table(snapshot_table_id)
# Choose a new table ID for the recovered table data.
recovered_table_id = '{}_recovered'.format(table_id)
dest_table_ref = client.dataset(dataset_id).table(recovered_table_id)
# Construct and run a copy job.
job = client.copy_table(
source_table_ref,
dest_table_ref,
# Location must match that of the source and destination tables.
location='US') # API request
job.result() # Waits for job to complete.
print('Copied data from deleted table {} to {}'.format(
table_id, recovered_table_id))
# [END bigquery_undelete_table]
def test_client_query(client):
"""Run a simple query."""
# [START bigquery_query]
# from google.cloud import bigquery
# client = bigquery.Client()
query = (
'SELECT name FROM `bigquery-public-data.usa_names.usa_1910_2013` '
'WHERE state = "TX" '
'LIMIT 100')
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US') # API request - starts the query
for row in query_job: # API request - fetches results
# Row values can be accessed by field name or index
assert row[0] == row.name == row['name']
print(row)
# [END bigquery_query]
def test_client_query_legacy_sql(client):
"""Run a query with Legacy SQL explicitly set"""
# [START bigquery_query_legacy]
# from google.cloud import bigquery
# client = bigquery.Client()
query = (
'SELECT name FROM [bigquery-public-data:usa_names.usa_1910_2013] '
'WHERE state = "TX" '
'LIMIT 100')
# Set use_legacy_sql to True to use legacy SQL syntax.
job_config = bigquery.QueryJobConfig()
job_config.use_legacy_sql = True
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results.
for row in query_job: # API request - fetches results
print(row)
# [END bigquery_query_legacy]
def test_manage_job(client):
sql = """
SELECT corpus
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY corpus;
"""
location = 'us'
job = client.query(sql, location=location)
job_id = job.job_id
# [START bigquery_cancel_job]
# TODO(developer): Uncomment the lines below and replace with your values.
# from google.cloud import bigquery
# client = bigquery.Client()
# job_id = 'bq-job-123x456-123y123z123c' # replace with your job ID
# location = 'us' # replace with your location
job = client.cancel_job(job_id, location=location)
# [END bigquery_cancel_job]
# [START bigquery_get_job]
# TODO(developer): Uncomment the lines below and replace with your values.
# from google.cloud import bigquery
# client = bigquery.Client()
# job_id = 'bq-job-123x456-123y123z123c' # replace with your job ID
# location = 'us' # replace with your location
job = client.get_job(job_id, location=location) # API request
# Print selected job properties
print('Details for job {} running in {}:'.format(job_id, location))
print('\tType: {}\n\tState: {}\n\tCreated: {}'.format(
job.job_type, job.state, job.created))
# [END bigquery_get_job]
def test_client_query_destination_table(client, to_delete):
"""Run a query"""
dataset_id = 'query_destination_table_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
to_delete.append(dataset_ref)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
client.create_dataset(dataset)
# [START bigquery_query_destination_table]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'your_dataset_id'
job_config = bigquery.QueryJobConfig()
# Set the destination table
table_ref = client.dataset(dataset_id).table('your_table_id')
job_config.destination = table_ref
sql = """
SELECT corpus
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY corpus;
"""
# Start the query, passing in the extra configuration.
query_job = client.query(
sql,
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location='US',
job_config=job_config) # API request - starts the query
query_job.result() # Waits for the query to finish
print('Query results loaded to table {}'.format(table_ref.path))
# [END bigquery_query_destination_table]
def test_client_query_destination_table_legacy(client, to_delete):
dataset_id = 'query_destination_table_legacy_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
to_delete.append(dataset_ref)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
client.create_dataset(dataset)
# [START bigquery_query_legacy_large_results]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'your_dataset_id'
job_config = bigquery.QueryJobConfig()
# Set use_legacy_sql to True to use legacy SQL syntax.
job_config.use_legacy_sql = True
# Set the destination table
table_ref = client.dataset(dataset_id).table('your_table_id')
job_config.destination = table_ref
job_config.allow_large_results = True
sql = """
SELECT corpus
FROM [bigquery-public-data:samples.shakespeare]
GROUP BY corpus;
"""
# Start the query, passing in the extra configuration.
query_job = client.query(
sql,
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location='US',
job_config=job_config) # API request - starts the query
query_job.result() # Waits for the query to finish
print('Query results loaded to table {}'.format(table_ref.path))
# [END bigquery_query_legacy_large_results]
def test_client_query_destination_table_cmek(client, to_delete):
"""Run a query"""
dataset_id = 'query_destination_table_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
to_delete.append(dataset_ref)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
client.create_dataset(dataset)
# [START bigquery_query_destination_table_cmek]
# from google.cloud import bigquery
# client = bigquery.Client()
job_config = bigquery.QueryJobConfig()
# Set the destination table. Here, dataset_id is a string, such as:
# dataset_id = 'your_dataset_id'
table_ref = client.dataset(dataset_id).table('your_table_id')
job_config.destination = table_ref
# Set the encryption key to use for the destination.
# TODO: Replace this key with a key you have created in KMS.
kms_key_name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(
'cloud-samples-tests', 'us-central1', 'test', 'test')
encryption_config = bigquery.EncryptionConfiguration(
kms_key_name=kms_key_name)
job_config.destination_encryption_configuration = encryption_config
# Start the query, passing in the extra configuration.
query_job = client.query(
'SELECT 17 AS my_col;',
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location='US',
job_config=job_config) # API request - starts the query
query_job.result()
# The destination table is written using the encryption configuration.
table = client.get_table(table_ref)
assert table.encryption_configuration.kms_key_name == kms_key_name
# [END bigquery_query_destination_table_cmek]
def test_client_query_batch(client, to_delete):
# [START bigquery_query_batch]
# from google.cloud import bigquery
# client = bigquery.Client()
job_config = bigquery.QueryJobConfig()
# Run at batch priority, which won't count toward concurrent rate limit.
job_config.priority = bigquery.QueryPriority.BATCH
sql = """
SELECT corpus
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY corpus;
"""
# Location must match that of the dataset(s) referenced in the query.
location = 'US'
# API request - starts the query
query_job = client.query(sql, location=location, job_config=job_config)
# Check on the progress by getting the job's updated state. Once the state
# is `DONE`, the results are ready.
query_job = client.get_job(
query_job.job_id, location=location) # API request - fetches job
print('Job {} is currently in state {}'.format(
query_job.job_id, query_job.state))
# [END bigquery_query_batch]
def test_client_query_relax_column(client, to_delete):
dataset_id = 'query_relax_column_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table_ref = dataset_ref.table('my_table')
schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
table = client.create_table(
bigquery.Table(table_ref, schema=schema))
# [START bigquery_relax_column_query_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# Retrieves the destination table and checks the number of required fields
table_id = 'my_table'
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
original_required_fields = sum(
field.mode == 'REQUIRED' for field in table.schema)
# In this example, the existing table has 2 required fields
print("{} fields in the schema are required.".format(
original_required_fields))
# Configures the query to append the results to a destination table,
# allowing field relaxation
job_config = bigquery.QueryJobConfig()
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_RELAXATION,
]
job_config.destination = table_ref
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
query_job = client.query(
# In this example, the existing table contains 'full_name' and 'age' as
# required columns, but the query results will omit the second column.
'SELECT "Beyonce" as full_name;',
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location='US',
job_config=job_config
) # API request - starts the query
query_job.result() # Waits for the query to finish
print("Query job {} complete.".format(query_job.job_id))
# Checks the updated number of required fields
table = client.get_table(table)
current_required_fields = sum(
field.mode == 'REQUIRED' for field in table.schema)
print("{} fields in the schema are now required.".format(
current_required_fields))
# [END bigquery_relax_column_query_append]
assert original_required_fields - current_required_fields > 0
assert len(table.schema) == 2
assert table.schema[1].mode == 'NULLABLE'
assert table.num_rows > 0
def test_client_query_add_column(client, to_delete):
dataset_id = 'query_add_column_{}'.format(_millis())
dataset_ref = client.dataset(dataset_id)
dataset = bigquery.Dataset(dataset_ref)
dataset.location = 'US'
dataset = client.create_dataset(dataset)
to_delete.append(dataset)
table_ref = dataset_ref.table('my_table')
schema = [
bigquery.SchemaField('full_name', 'STRING', mode='REQUIRED'),
bigquery.SchemaField('age', 'INTEGER', mode='REQUIRED'),
]
table = client.create_table(bigquery.Table(table_ref, schema=schema))
# [START bigquery_add_column_query_append]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_ref = client.dataset('my_dataset')
# Retrieves the destination table and checks the length of the schema
table_id = 'my_table'
table_ref = dataset_ref.table(table_id)
table = client.get_table(table_ref)
print("Table {} contains {} columns.".format(table_id, len(table.schema)))
# Configures the query to append the results to a destination table,
# allowing field addition
job_config = bigquery.QueryJobConfig()
job_config.schema_update_options = [
bigquery.SchemaUpdateOption.ALLOW_FIELD_ADDITION,
]
job_config.destination = table_ref
job_config.write_disposition = bigquery.WriteDisposition.WRITE_APPEND
query_job = client.query(
# In this example, the existing table contains only the 'full_name' and
# 'age' columns, while the results of this query will contain an
# additional 'favorite_color' column.
'SELECT "Timmy" as full_name, 85 as age, "Blue" as favorite_color;',
# Location must match that of the dataset(s) referenced in the query
# and of the destination table.
location='US',
job_config=job_config
) # API request - starts the query
query_job.result() # Waits for the query to finish
print("Query job {} complete.".format(query_job.job_id))
# Checks the updated length of the schema
table = client.get_table(table)
print("Table {} now contains {} columns.".format(
table_id, len(table.schema)))
# [END bigquery_add_column_query_append]
assert len(table.schema) == 3
assert table.num_rows > 0
def test_client_query_w_named_params(client, capsys):
"""Run a query using named query parameters"""
# [START bigquery_query_params_named]
# from google.cloud import bigquery
# client = bigquery.Client()
query = """
SELECT word, word_count
FROM `bigquery-public-data.samples.shakespeare`
WHERE corpus = @corpus
AND word_count >= @min_word_count
ORDER BY word_count DESC;
"""
query_params = [
bigquery.ScalarQueryParameter('corpus', 'STRING', 'romeoandjuliet'),
bigquery.ScalarQueryParameter('min_word_count', 'INT64', 250)
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results
for row in query_job:
print('{}: \t{}'.format(row.word, row.word_count))
assert query_job.state == 'DONE'
# [END bigquery_query_params_named]
out, _ = capsys.readouterr()
assert 'the' in out
def test_client_query_w_positional_params(client, capsys):
"""Run a query using query parameters"""
# [START bigquery_query_params_positional]
# from google.cloud import bigquery
# client = bigquery.Client()
query = """
SELECT word, word_count
FROM `bigquery-public-data.samples.shakespeare`
WHERE corpus = ?
AND word_count >= ?
ORDER BY word_count DESC;
"""
# Set the name to None to use positional parameters.
# Note that you cannot mix named and positional parameters.
query_params = [
bigquery.ScalarQueryParameter(None, 'STRING', 'romeoandjuliet'),
bigquery.ScalarQueryParameter(None, 'INT64', 250)
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results
for row in query_job:
print('{}: \t{}'.format(row.word, row.word_count))
assert query_job.state == 'DONE'
# [END bigquery_query_params_positional]
out, _ = capsys.readouterr()
assert 'the' in out
def test_client_query_w_timestamp_params(client, capsys):
"""Run a query using query parameters"""
# [START bigquery_query_params_timestamps]
# from google.cloud import bigquery
# client = bigquery.Client()
import datetime
import pytz
query = 'SELECT TIMESTAMP_ADD(@ts_value, INTERVAL 1 HOUR);'
query_params = [
bigquery.ScalarQueryParameter(
'ts_value',
'TIMESTAMP',
datetime.datetime(2016, 12, 7, 8, 0, tzinfo=pytz.UTC))
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results
for row in query_job:
print(row)
assert query_job.state == 'DONE'
# [END bigquery_query_params_timestamps]
out, _ = capsys.readouterr()
assert '2016, 12, 7, 9, 0' in out
def test_client_query_w_array_params(client, capsys):
"""Run a query using array query parameters"""
# [START bigquery_query_params_arrays]
# from google.cloud import bigquery
# client = bigquery.Client()
query = """
SELECT name, sum(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_2013`
WHERE gender = @gender
AND state IN UNNEST(@states)
GROUP BY name
ORDER BY count DESC
LIMIT 10;
"""
query_params = [
bigquery.ScalarQueryParameter('gender', 'STRING', 'M'),
bigquery.ArrayQueryParameter(
'states', 'STRING', ['WA', 'WI', 'WV', 'WY'])
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results
for row in query_job:
print('{}: \t{}'.format(row.name, row.count))
assert query_job.state == 'DONE'
# [END bigquery_query_params_arrays]
out, _ = capsys.readouterr()
assert 'James' in out
def test_client_query_w_struct_params(client, capsys):
"""Run a query using struct query parameters"""
# [START bigquery_query_params_structs]
# from google.cloud import bigquery
# client = bigquery.Client()
query = 'SELECT @struct_value AS s;'
query_params = [
bigquery.StructQueryParameter(
'struct_value',
bigquery.ScalarQueryParameter('x', 'INT64', 1),
bigquery.ScalarQueryParameter('y', 'STRING', 'foo')
)
]
job_config = bigquery.QueryJobConfig()
job_config.query_parameters = query_params
query_job = client.query(
query,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request - starts the query
# Print the results
for row in query_job:
print(row.s)
assert query_job.state == 'DONE'
# [END bigquery_query_params_structs]
out, _ = capsys.readouterr()
assert '1' in out
assert 'foo' in out
def test_client_query_dry_run(client):
"""Run a dry run query"""
# [START bigquery_query_dry_run]
# from google.cloud import bigquery
# client = bigquery.Client()
job_config = bigquery.QueryJobConfig()
job_config.dry_run = True
job_config.use_query_cache = False
query_job = client.query(
('SELECT name, COUNT(*) as name_count '
'FROM `bigquery-public-data.usa_names.usa_1910_2013` '
"WHERE state = 'WA' "
'GROUP BY name'),
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request
# A dry run query completes immediately.
assert query_job.state == 'DONE'
assert query_job.dry_run
print("This query will process {} bytes.".format(
query_job.total_bytes_processed))
# [END bigquery_query_dry_run]
assert query_job.total_bytes_processed > 0
def test_query_no_cache(client):
# [START bigquery_query_no_cache]
# from google.cloud import bigquery
# client = bigquery.Client()
job_config = bigquery.QueryJobConfig()
job_config.use_query_cache = False
sql = """
SELECT corpus
FROM `bigquery-public-data.samples.shakespeare`
GROUP BY corpus;
"""
query_job = client.query(
sql,
# Location must match that of the dataset(s) referenced in the query.
location='US',
job_config=job_config) # API request
# Print the results.
for row in query_job: # API request - fetches results
print(row)
# [END bigquery_query_no_cache]
def test_query_external_gcs_temporary_table(client):
# [START bigquery_query_external_gcs_temp]
# from google.cloud import bigquery
# client = bigquery.Client()
# Configure the external data source and query job
external_config = bigquery.ExternalConfig('CSV')
external_config.source_uris = [
'gs://cloud-samples-data/bigquery/us-states/us-states.csv',
]
external_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
external_config.options.skip_leading_rows = 1 # optionally skip header row
table_id = 'us_states'
job_config = bigquery.QueryJobConfig()
job_config.table_definitions = {table_id: external_config}
# Example query to find states starting with 'W'
sql = 'SELECT * FROM `{}` WHERE name LIKE "W%"'.format(table_id)
query_job = client.query(sql, job_config=job_config) # API request
w_states = list(query_job) # Waits for query to finish
print('There are {} states with names starting with W.'.format(
len(w_states)))
# [END bigquery_query_external_gcs_temp]
assert len(w_states) == 4
def test_query_external_gcs_permanent_table(client, to_delete):
dataset_id = 'query_external_gcs_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_query_external_gcs_perm]
# from google.cloud import bigquery
# client = bigquery.Client()
# dataset_id = 'my_dataset'
# Configure the external data source
dataset_ref = client.dataset(dataset_id)
table_id = 'us_states'
schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
table = bigquery.Table(dataset_ref.table(table_id), schema=schema)
external_config = bigquery.ExternalConfig('CSV')
external_config.source_uris = [
'gs://cloud-samples-data/bigquery/us-states/us-states.csv',
]
external_config.options.skip_leading_rows = 1 # optionally skip header row
table.external_data_configuration = external_config
# Create a permanent table linked to the GCS file
table = client.create_table(table) # API request
# Example query to find states starting with 'W'
sql = 'SELECT * FROM `{}.{}` WHERE name LIKE "W%"'.format(
dataset_id, table_id)
query_job = client.query(sql) # API request
w_states = list(query_job) # Waits for query to finish
print('There are {} states with names starting with W.'.format(
len(w_states)))
# [END bigquery_query_external_gcs_perm]
assert len(w_states) == 4
def test_query_external_sheets_temporary_table(client):
# [START bigquery_query_external_sheets_temp]
# [START bigquery_auth_drive_scope]
import google.auth
# from google.cloud import bigquery
# Create credentials with Drive & BigQuery API scopes
# Both APIs must be enabled for your project before running this code
credentials, project = google.auth.default(scopes=[
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/bigquery',
])
client = bigquery.Client(credentials=credentials, project=project)
# [END bigquery_auth_drive_scope]
# Configure the external data source and query job
external_config = bigquery.ExternalConfig('GOOGLE_SHEETS')
# Use a shareable link or grant viewing access to the email address you
# used to authenticate with BigQuery (this example Sheet is public)
sheet_url = (
'https://docs.google.com/spreadsheets'
'/d/1i_QCL-7HcSyUZmIbP9E6lO_T5u3HnpLe7dnpHaijg_E/edit?usp=sharing')
external_config.source_uris = [sheet_url]
external_config.schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
external_config.options.skip_leading_rows = 1 # optionally skip header row
table_id = 'us_states'
job_config = bigquery.QueryJobConfig()
job_config.table_definitions = {table_id: external_config}
# Example query to find states starting with 'W'
sql = 'SELECT * FROM `{}` WHERE name LIKE "W%"'.format(table_id)
query_job = client.query(sql, job_config=job_config) # API request
w_states = list(query_job) # Waits for query to finish
print('There are {} states with names starting with W.'.format(
len(w_states)))
# [END bigquery_query_external_sheets_temp]
assert len(w_states) == 4
def test_query_external_sheets_permanent_table(client, to_delete):
dataset_id = 'query_external_sheets_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_query_external_sheets_perm]
import google.auth
# from google.cloud import bigquery
# dataset_id = 'my_dataset'
# Create credentials with Drive & BigQuery API scopes
# Both APIs must be enabled for your project before running this code
credentials, project = google.auth.default(scopes=[
'https://www.googleapis.com/auth/drive',
'https://www.googleapis.com/auth/bigquery',
])
client = bigquery.Client(credentials=credentials, project=project)
# Configure the external data source
dataset_ref = client.dataset(dataset_id)
table_id = 'us_states'
schema = [
bigquery.SchemaField('name', 'STRING'),
bigquery.SchemaField('post_abbr', 'STRING')
]
table = bigquery.Table(dataset_ref.table(table_id), schema=schema)
external_config = bigquery.ExternalConfig('GOOGLE_SHEETS')
# Use a shareable link or grant viewing access to the email address you
# used to authenticate with BigQuery (this example Sheet is public)
sheet_url = (
'https://docs.google.com/spreadsheets'
'/d/1i_QCL-7HcSyUZmIbP9E6lO_T5u3HnpLe7dnpHaijg_E/edit?usp=sharing')
external_config.source_uris = [sheet_url]
external_config.options.skip_leading_rows = 1 # optionally skip header row
table.external_data_configuration = external_config
# Create a permanent table linked to the Sheets file
table = client.create_table(table) # API request
# Example query to find states starting with 'W'
sql = 'SELECT * FROM `{}.{}` WHERE name LIKE "W%"'.format(
dataset_id, table_id)
query_job = client.query(sql) # API request
w_states = list(query_job) # Waits for query to finish
print('There are {} states with names starting with W.'.format(
len(w_states)))
# [END bigquery_query_external_sheets_perm]
assert len(w_states) == 4
def test_ddl_create_view(client, to_delete, capsys):
"""Create a view via a DDL query."""
project = client.project
dataset_id = 'ddl_view_{}'.format(_millis())
table_id = 'new_view'
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_ddl_create_view]
# from google.cloud import bigquery
# project = 'my-project'
# dataset_id = 'my_dataset'
# table_id = 'new_view'
# client = bigquery.Client(project=project)
sql = """
CREATE VIEW `{}.{}.{}`
OPTIONS(
expiration_timestamp=TIMESTAMP_ADD(
CURRENT_TIMESTAMP(), INTERVAL 48 HOUR),
friendly_name="new_view",
description="a view that expires in 2 days",
labels=[("org_unit", "development")]
)
AS SELECT name, state, year, number
FROM `bigquery-public-data.usa_names.usa_1910_current`
WHERE state LIKE 'W%'
""".format(project, dataset_id, table_id)
job = client.query(sql) # API request.
job.result() # Waits for the query to finish.
print('Created new view "{}.{}.{}".'.format(
job.destination.project,
job.destination.dataset_id,
job.destination.table_id))
# [END bigquery_ddl_create_view]
out, _ = capsys.readouterr()
assert 'Created new view "{}.{}.{}".'.format(
project, dataset_id, table_id) in out
# Test that listing query result rows succeeds so that generic query
# processing tools work with DDL statements.
rows = list(job)
assert len(rows) == 0
if pandas is not None:
df = job.to_dataframe()
assert len(df) == 0
def test_client_list_jobs(client):
"""List jobs for a project."""
# [START bigquery_list_jobs]
# TODO(developer): Uncomment the lines below and replace with your values.
# from google.cloud import bigquery
# project = 'my_project' # replace with your project ID
# client = bigquery.Client(project=project)
import datetime
# List the 10 most recent jobs in reverse chronological order.
# Omit the max_results parameter to list jobs from the past 6 months.
print("Last 10 jobs:")
for job in client.list_jobs(max_results=10): # API request(s)
print(job.job_id)
# The following are examples of additional optional parameters:
# Use min_creation_time and/or max_creation_time to specify a time window.
print("Jobs from the last ten minutes:")
ten_mins_ago = datetime.datetime.utcnow() - datetime.timedelta(minutes=10)
for job in client.list_jobs(min_creation_time=ten_mins_ago):
print(job.job_id)
# Use all_users to include jobs run by all users in the project.
print("Last 10 jobs run by all users:")
for job in client.list_jobs(max_results=10, all_users=True):
print("{} run by user: {}".format(job.job_id, job.user_email))
# Use state_filter to filter by job state.
print("Jobs currently running:")
for job in client.list_jobs(state_filter='RUNNING'):
print(job.job_id)
# [END bigquery_list_jobs]
@pytest.mark.skipif(pandas is None, reason='Requires `pandas`')
def test_query_results_as_dataframe(client):
# [START bigquery_query_results_dataframe]
# from google.cloud import bigquery
# client = bigquery.Client()
sql = """
SELECT name, SUM(number) as count
FROM `bigquery-public-data.usa_names.usa_1910_current`
GROUP BY name
ORDER BY count DESC
LIMIT 10
"""
df = client.query(sql).to_dataframe()
# [END bigquery_query_results_dataframe]
assert isinstance(df, pandas.DataFrame)
assert len(list(df)) == 2 # verify the number of columns
assert len(df) == 10 # verify the number of rows
@pytest.mark.skipif(pandas is None, reason='Requires `pandas`')
def test_list_rows_as_dataframe(client):
# [START bigquery_list_rows_dataframe]
# from google.cloud import bigquery
# client = bigquery.Client()
dataset_ref = client.dataset('samples', project='bigquery-public-data')
table_ref = dataset_ref.table('shakespeare')
table = client.get_table(table_ref)
df = client.list_rows(table).to_dataframe()
# [END bigquery_list_rows_dataframe]
assert isinstance(df, pandas.DataFrame)
assert len(list(df)) == len(table.schema) # verify the number of columns
assert len(df) == table.num_rows # verify the number of rows
@pytest.mark.skipif(pandas is None, reason='Requires `pandas`')
@pytest.mark.skipif(pyarrow is None, reason='Requires `pyarrow`')
def test_load_table_from_dataframe(client, to_delete):
dataset_id = 'load_table_from_dataframe_{}'.format(_millis())
dataset = bigquery.Dataset(client.dataset(dataset_id))
client.create_dataset(dataset)
to_delete.append(dataset)
# [START bigquery_load_table_dataframe]
# from google.cloud import bigquery
# import pandas
# client = bigquery.Client()
# dataset_id = 'my_dataset'
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table('monty_python')
records = [
{'title': 'The Meaning of Life', 'release_year': 1983},
{'title': 'Monty Python and the Holy Grail', 'release_year': 1975},
{'title': 'Life of Brian', 'release_year': 1979},
{
'title': 'And Now for Something Completely Different',
'release_year': 1971
},
]
# Optionally set explicit indices.
# If indices are not specified, a column will be created for the default
# indices created by pandas.
index = ['Q24980', 'Q25043', 'Q24953', 'Q16403']
dataframe = pandas.DataFrame(
records, index=pandas.Index(index, name='wikidata_id'))
job = client.load_table_from_dataframe(dataframe, table_ref, location='US')
job.result() # Waits for table load to complete.
assert job.state == 'DONE'
table = client.get_table(table_ref)
assert table.num_rows == 4
# [END bigquery_load_table_dataframe]
column_names = [field.name for field in table.schema]
assert sorted(column_names) == ['release_year', 'title', 'wikidata_id']
if __name__ == '__main__':
pytest.main()
|
{
"content_hash": "9dedecbab3f89b04f687248b7f06b799",
"timestamp": "",
"source": "github",
"line_count": 3004,
"max_line_length": 79,
"avg_line_length": 36.32623169107856,
"alnum_prop": 0.667424214654888,
"repo_name": "jonparrott/google-cloud-python",
"id": "9e8ba524a1151f13381ef4437e0b25cf91e06162",
"size": "109699",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bigquery/docs/snippets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62009"
},
{
"name": "Python",
"bytes": "3459300"
},
{
"name": "Shell",
"bytes": "7548"
}
],
"symlink_target": ""
}
|
from database.db4 import db4, Channel4, ConstDB4
from utils.log import Logger, Action
class Location:
channel_name = Channel4.gis_gateway_location + '*'
def __init__(self):
self.ps = db4.pubsub()
def psubscribe_gis(self):
self.ps.psubscribe(self.channel_name)
return self.ps
def stop_listen(self):
if hasattr(self, 'ps'):
self.ps.punsubscribe()
def listen_gis_gateway_location(self):
Logger.info(Action.listen, 'psubscribe', self.channel_name, 'Begin listen')
ps_init = self.psubscribe_gis()
for item in ps_init.listen():
if item is not None:
if item['type'] == 'pmessage':
Logger.info(Action.listen, item['channel'].decode(), 'MESSAGE', item['data'].decode())
gateway_id = item['channel'].decode().split(':')[1]
location_data = item['data'].decode().split(',')
if len(location_data) == 3:
lng = float(location_data[0])
lat = float(location_data[1])
alt = int(location_data[2])
msg = self.Object(gateway_id, lat=lat, lng=lng, alt=alt)
yield msg
else:
Logger.info(Action.listen, item['channel'].decode(), item['type'], item['data'])
class Object:
def __init__(self, gw_id, lat, lng, alt):
self.gateway_id = gw_id
self.latitude = lat
self.longitude = lng
self.altitude = alt
|
{
"content_hash": "f3cce2b7bc3510517f800d952ed31333",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 106,
"avg_line_length": 35.44444444444444,
"alnum_prop": 0.5216300940438872,
"repo_name": "soybean217/lora-python",
"id": "3f3813851c53809aa07a4cb83bb60ebdc4368e6b",
"size": "1618",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UServer/admin_server/admin_data_update/model/gateway_locaton_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "721"
},
{
"name": "JavaScript",
"bytes": "27647"
},
{
"name": "Python",
"bytes": "808327"
}
],
"symlink_target": ""
}
|
from . import views
def construct_blueprint(celery):
return views.construct_blueprint(celery)
|
{
"content_hash": "2e0d7105e2f8a805d743dcfe8ac59ca5",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 44,
"avg_line_length": 24.75,
"alnum_prop": 0.7777777777777778,
"repo_name": "slarse/pdfebc-web",
"id": "6ab2e1f339e21e005f47700fb89c1d7ee34d0858",
"size": "99",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pdfebc_web/main/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "425"
},
{
"name": "HTML",
"bytes": "2532"
},
{
"name": "Python",
"bytes": "20252"
},
{
"name": "Shell",
"bytes": "272"
}
],
"symlink_target": ""
}
|
"""A simple demo that allows the user to drive the 'Robot Arm H25' located
here: http://robotsquare.com/2013/10/01/education-ev3-45544-instruction/
The program waits for key presses and responds to the following keys:
'w' - Raises the claw
's' - Lowers the claw
'a' - Swivels the claw left
'd' - Swivels the claw right
'c' - Opens the claw
'v' - Closes the claw
'q' - Exits the program
Before running the program ensure that you have binded the brick to rfcomm0
(i.e. 'sudo rfcomm bind /dev/rfcomm0 XX:XX:XX:XX:XX:XX').
"""
import sys
import tty
import termios
from ev3 import *
def getch():
fd = sys.stdin.fileno()
old = termios.tcgetattr(fd)
try:
tty.setraw(fd)
return sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old)
# Ensures that the claw is firmly closed.
close_claw_cmd = direct_command.DirectCommand()
close_claw_cmd.add_output_speed(direct_command.OutputPort.PORT_D, 10)
close_claw_cmd.add_output_start(direct_command.OutputPort.PORT_D)
close_claw_cmd.add_timer_wait(1000)
close_claw_cmd.add_output_stop(direct_command.OutputPort.PORT_D,
direct_command.StopType.BRAKE)
# Opens the claw about half way.
open_claw_cmd = direct_command.DirectCommand()
open_claw_cmd.add_output_speed(direct_command.OutputPort.PORT_D, -10)
open_claw_cmd.add_output_start(direct_command.OutputPort.PORT_D)
open_claw_cmd.add_timer_wait(600)
open_claw_cmd.add_output_stop(direct_command.OutputPort.PORT_D,
direct_command.StopType.BRAKE)
raise_claw_cmd = direct_command.DirectCommand()
raise_claw_cmd.add_output_step_speed(direct_command.OutputPort.PORT_B,
-15,
0,
20,
10,
direct_command.StopType.BRAKE)
raise_claw_cmd.add_output_ready(direct_command.OutputPort.PORT_B)
raise_claw_cmd.add_keep_alive()
lower_claw_cmd = direct_command.DirectCommand()
lower_claw_cmd.add_output_step_speed(direct_command.OutputPort.PORT_B,
15,
0,
20,
10,
direct_command.StopType.BRAKE)
lower_claw_cmd.add_output_ready(direct_command.OutputPort.PORT_B)
lower_claw_cmd.add_keep_alive()
swivel_left_cmd = direct_command.DirectCommand()
swivel_left_cmd.add_output_step_speed(direct_command.OutputPort.PORT_C,
-15,
0,
20,
10,
direct_command.StopType.BRAKE)
swivel_left_cmd.add_output_ready(direct_command.OutputPort.PORT_C)
swivel_left_cmd.add_keep_alive()
swivel_right_cmd = direct_command.DirectCommand()
swivel_right_cmd.add_output_step_speed(direct_command.OutputPort.PORT_C,
15,
0,
20,
10,
direct_command.StopType.BRAKE)
swivel_right_cmd.add_output_ready(direct_command.OutputPort.PORT_C)
swivel_right_cmd.add_keep_alive()
if ("__main__" == __name__):
with ev3.EV3() as brick:
print "Connection opened (press 'q' to quit)."
while (True):
c = getch()
if ('c' == c):
print 'Opening claw.'
open_claw_cmd.send(brick)
elif ('v' == c):
print 'Closing claw.'
close_claw_cmd.send(brick)
elif ('w' == c):
print 'Raising claw.'
raise_claw_cmd.send(brick)
elif ('s' == c):
print 'Lowering claw.'
lower_claw_cmd.send(brick)
elif ('a' == c):
print 'Swivel left.'
swivel_left_cmd.send(brick)
elif ('d' == c):
print 'Swivel right.'
swivel_right_cmd.send(brick)
elif ('q' == c):
break
|
{
"content_hash": "af6e49b8dfa63f3629b3a797a0ab84ac",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 75,
"avg_line_length": 35.82258064516129,
"alnum_prop": 0.5159837910850968,
"repo_name": "inductivekickback/ev3",
"id": "bb877ac5c10efc8371d31c6496da70ac3eb90f0a",
"size": "4442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Robot_Arm_H25_demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "102252"
}
],
"symlink_target": ""
}
|
from .forms import RegisterForm, LoginForm, PasswordResetForm, \
PasswordChangeForm
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, \
login as django_login, logout as django_logout
from .models import PasswordResetToken
import uuid
from django.contrib import messages
from django.utils.translation import ugettext as _
def register(request):
form = RegisterForm()
if request.method == "POST":
form = RegisterForm(request.POST)
if form.is_valid():
data = form.cleaned_data
user = User.objects.create_user(username=data["username"],
email=data.get("email", ""),
password=data["password1"])
auth_user = authenticate(username=data["username"],
password=data["password1"])
django_login(request, auth_user)
return HttpResponseRedirect(reverse("dashboard"))
return render_to_response("accounts/register.html",
{"form": form},
context_instance=RequestContext(request))
def login(request):
form = LoginForm()
if request.method == "POST":
form = LoginForm(request.POST)
if form.is_valid():
data = form.cleaned_data
auth_user = authenticate(username=data["username"],
password=data["password"])
django_login(request, auth_user)
return HttpResponseRedirect(reverse("dashboard"))
return render_to_response("accounts/login.html", {"form": form},
context_instance=RequestContext(request))
def logout(request):
django_logout(request)
return HttpResponseRedirect(reverse("accounts_login"))
def password_reset(request):
form = PasswordResetForm()
if request.method == "POST":
form = PasswordResetForm(request.POST)
if form.is_valid():
user = User.objects.get(email=form.cleaned_data["email"])
PasswordResetToken.objects.create(user=user, token=str(uuid.uuid4()))
messages.add_message(request, messages.SUCCESS,
_("Password reset email has been sent successfully"))
return render_to_response("accounts/password_reset.html", {"form": form},
context_instance=RequestContext(request))
def password_reset_confirm(request, token=None):
if not token:
messages.add_message(request, messages.ERROR,
_("Invalid Token"))
return HttpResponseRedirect(reverse("accounts_login"))
try:
token = PasswordResetToken.objects.get(token=token)
except PasswordResetToken.DoesNotExist:
messages.add_message(request, messages.ERROR,
_("Invalid Token"))
return HttpResponseRedirect(reverse("accounts_login"))
form = PasswordChangeForm(initial={"token": token.token})
if request.method == "POST":
form = PasswordChangeForm(request.POST)
if form.is_valid():
token.user.set_password(form.cleaned_data["password1"])
token.user.save()
token.delete()
messages.add_message(request, messages.SUCCESS,
_("Password has been successfully changed."))
return HttpResponseRedirect(reverse("accounts_login"))
return render_to_response("accounts/password_reset_form.html",
{"form": form,
"password_token": token.token},
context_instance=RequestContext(request))
|
{
"content_hash": "d3f6e11fcd42f895ab65ac333776c9f0",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 86,
"avg_line_length": 46.01176470588236,
"alnum_prop": 0.6090513935054973,
"repo_name": "theju/f1oracle",
"id": "da0d6e712abe3ed352a04fa9e0ecd9646e7a4dba",
"size": "3911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "accounts/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "204"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "52538"
}
],
"symlink_target": ""
}
|
""" Python 'utf-16' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.utf_16_encode
decode = codecs.utf_16_decode
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return (Codec.encode,Codec.decode,StreamReader,StreamWriter)
|
{
"content_hash": "9cbb82ebfd1f50199878196952662f95",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 69,
"avg_line_length": 20.032258064516128,
"alnum_prop": 0.7133655394524959,
"repo_name": "Integral-Technology-Solutions/ConfigNOW",
"id": "c034a97d1dd4c333528690a08c718f0c575e7e7b",
"size": "621",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "Lib/encodings/utf_16.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1499"
},
{
"name": "HTML",
"bytes": "2243"
},
{
"name": "Java",
"bytes": "594"
},
{
"name": "Python",
"bytes": "2973691"
},
{
"name": "Shell",
"bytes": "5797"
}
],
"symlink_target": ""
}
|
"""
Code to build HMM models of PSSMs of various Markov orders.
"""
import hmm, numpy, pickle
def _load_model_parameters(model, f):
parameters = pickle.load(f)
if len(parameters) != len(model.parameters):
raise RuntimeError('Different number of parameters in file (%d) and in model (%d)' % (len(parameters), len(model.parameters)))
for i, t in enumerate(parameters):
model.parameters[i] = t
def _dump_model_parameters(model, f):
pickle.dump(list(model.parameters), f)
class ModelBuilder(object):
"""
Helps to build models over order n alphabets
"""
def __init__(self, order, alphabet_size=4):
self.order = order
self.alphabet_size = alphabet_size
self.M = alphabet_size ** (order + 1)
self.converter = hmm.MarkovOrderConverter(alphabet_size, order)
def new_model_by_states(self):
"@return: A new hmm.ModelByStates."
return hmm.ModelByStates(self.M, self.order)
def add_fully_parameterised_state(self, model, pi = 1.0, emission_dist = None):
"""
Adds a state with a different emission parameter for each of self.M
possible order-n output characters
"""
# add the state
state = model.add_state(pi = model.add_parameter(pi))
# allocate the parameters to the emissions
for m in xrange(self.M):
if None != emission_dist:
assert len(emission_dist) == self.M
state.b[m] = model.add_parameter(emission_dist[m])
else:
state.b[m] = model.add_parameter( 1.0 / self.M )
def add_order_0_parameterised_state(self, model, pi = 1.0, emission_dist = None):
"""
Adds a state with shared emission parameters for each of the self.M
possible order-n output characters that represent the same order-0 character
"""
# add the state
state = model.add_state(pi = model.add_parameter(pi))
# from IPython.Shell import IPShellEmbed
# ipshell = IPShellEmbed()
# ipshell()
# add the parameters for the emissions
if None != emission_dist:
assert len(emission_dist) == self.alphabet_size
params = [ model.add_parameter( x ) for x in emission_dist ]
else:
params = [ model.add_parameter( 1.0 / self.M ) for i in xrange(self.alphabet_size) ]
# allocate the parameters to the emissions
for m in xrange(self.M):
state.b[m] = params[m % self.alphabet_size]
return state
def add_order_0_rev_comp_state(self, model, forward_state, pi = 1.0):
"""
Adds a state with shared emission parameters for each of the self.M
possible order-n output characters that represent the same order-0 character.
This state's output is the reverse complement of the given state
"""
# add the state
state = model.add_state(pi = model.add_parameter(pi))
# allocate the parameters to the emissions
for m in xrange(self.M):
state.b[m] = forward_state.b[self.alphabet_size - 1 - (m % self.alphabet_size)]
return state
def create_uniform_background_model(self):
"""
@return: A HMM with one mosaic with uniform emission probabilities.
"""
model = hmm.ModelByStates(self.M, self.order)
self.add_fully_parameterised_state(
model,
emission_dist = numpy.ones(self.M)/4.
)
transition_param = model.add_parameter(1.0)
model.states[0].add_successor(model.states[0], transition_param)
return model
def create_background_mosaic_model(self, num_mosaics, p_transition, dirichlet_prior_strength):
"""
Create a mosaic model
"""
model = hmm.ModelByStates(self.M, self.order)
transition_param = model.add_parameter(p_transition)
no_transition_param = model.add_parameter(1.0 - p_transition)
for i in xrange(num_mosaics):
self.add_fully_parameterised_state(
model,
emission_dist = hmm.dirichlet_draw(numpy.ones(self.M)*dirichlet_prior_strength)
)
for state_1 in model.states:
for state_2 in model.states:
if state_1 == state_2: state_1.add_successor(state_2, no_transition_param)
else: state_1.add_successor(state_2, transition_param)
return model
def load_background_mosaic_model(self, f):
'''
Load a background model from the given file (or filename)
'''
if isinstance(f, str):
f = open(f)
# how many mosaics?
num_mosaics = pickle.load(f)
# create a model of the desired structure
model = self.create_background_mosaic_model(num_mosaics, 0.0, 1.0)
# load the parameters into the model
_load_model_parameters(model, f)
return model
def dump_background_mosaic_model(self, model, f):
'''
Dump a background model into the given file (or filename)
'''
if isinstance(f, str):
f = open(f, 'w')
# how many mosaics?
pickle.dump(model.N, f)
# load the parameters into the model
_dump_model_parameters(model, f)
def create_background_model(order, N):
return hmm.pssm.ModelBuilder(order).create_background_mosaic_model(N, .01, 100.0)
|
{
"content_hash": "4f65d1650c4e04f480a6daf283f61d9a",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 134,
"avg_line_length": 34.904458598726116,
"alnum_prop": 0.6031021897810219,
"repo_name": "JohnReid/biopsy",
"id": "aabbb47a6d58e9ce7270cec510305aee2d5c700c",
"size": "5519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/hmm/pssm/model_builder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "2639"
},
{
"name": "C",
"bytes": "392541"
},
{
"name": "C++",
"bytes": "3946426"
},
{
"name": "Gnuplot",
"bytes": "42000"
},
{
"name": "Python",
"bytes": "976684"
},
{
"name": "R",
"bytes": "2714"
},
{
"name": "Shell",
"bytes": "3356"
},
{
"name": "TeX",
"bytes": "4212"
}
],
"symlink_target": ""
}
|
import errno
import functools
import os
import shutil
import tempfile
import time
import weakref
from eventlet import semaphore
from oslo.config import cfg
from nova.openstack.common import fileutils
from nova.openstack.common.gettextutils import _
from nova.openstack.common import local
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
help=('Directory to use for lock files. Default to a '
'temp directory'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError, e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile, msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
def synchronized(name, lock_file_prefix, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix. The prefix should end with a hyphen ('-') if specified.
The external keyword argument denotes whether this lock should work across
multiple processes. This means that if two different workers both run a
a method decorated with @synchronized('mylock', external=True), only one
of them will execute at a time.
The lock_path keyword argument is used to specify a special location for
external lock files to live. If nothing is set, then CONF.lock_path is
used as a default.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
# NOTE(soren): If we ever go natively threaded, this will be racy.
# See http://stackoverflow.com/questions/5390569/dyn
# amically-allocating-and-destroying-mutexes
sem = _semaphores.get(name, semaphore.Semaphore())
if name not in _semaphores:
# this check is not racy - we're already holding ref locally
# so GC won't remove the item and there was no IO switch
# (only valid in greenthreads)
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s" for method '
'"%(method)s"...'), {'lock': name,
'method': f.__name__})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s" '
'for method "%(method)s"...'),
{'lock': name, 'method': f.__name__})
cleanup_dir = False
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path
if not local_lock_path:
local_lock_path = CONF.lock_path
if not local_lock_path:
cleanup_dir = True
local_lock_path = tempfile.mkdtemp()
if not os.path.exists(local_lock_path):
cleanup_dir = True
fileutils.ensure_tree(local_lock_path)
# NOTE(mikal): the lock name cannot contain directory
# separators
safe_name = name.replace(os.sep, '_')
lock_file_name = '%s%s' % (lock_file_prefix, safe_name)
lock_file_path = os.path.join(local_lock_path,
lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock:
LOG.debug(_('Got file lock "%(lock)s" at '
'%(path)s for method '
'"%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
retval = f(*args, **kwargs)
finally:
LOG.debug(_('Released file lock "%(lock)s" at '
'%(path)s for method "%(method)s"...'),
{'lock': name,
'path': lock_file_path,
'method': f.__name__})
# NOTE(vish): This removes the tempdir if we needed
# to create one. This is used to
# cleanup the locks left behind by unit
# tests.
if cleanup_dir:
shutil.rmtree(local_lock_path)
else:
retval = f(*args, **kwargs)
finally:
local.strong_store.locks_held.remove(name)
return retval
return inner
return wrap
|
{
"content_hash": "3ec51f2b316efecb7bee6a4cb94c28f0",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 79,
"avg_line_length": 37.58874458874459,
"alnum_prop": 0.5291949786939998,
"repo_name": "maheshp/novatest",
"id": "3270f0473645db2a9d79ad2a96e7de9278682829",
"size": "9365",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "nova/openstack/common/lockutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "8947329"
},
{
"name": "Shell",
"bytes": "17067"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='speaker',
options={'verbose_name': 'palestrante', 'verbose_name_plural': 'palestrantes'},
),
migrations.AlterField(
model_name='speaker',
name='description',
field=models.TextField(blank=True, verbose_name='Descrição'),
),
migrations.AlterField(
model_name='speaker',
name='name',
field=models.CharField(max_length=255, verbose_name='Nome'),
),
migrations.AlterField(
model_name='speaker',
name='photo',
field=models.URLField(verbose_name='Foto'),
),
migrations.AlterField(
model_name='speaker',
name='slug',
field=models.SlugField(verbose_name='Slug'),
),
migrations.AlterField(
model_name='speaker',
name='website',
field=models.URLField(blank=True, verbose_name='Website'),
),
]
|
{
"content_hash": "0d1fd5c377d4ba093fba5cdb6b27176f",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 91,
"avg_line_length": 29.214285714285715,
"alnum_prop": 0.5484922575387123,
"repo_name": "Golker/wttd",
"id": "69007d9361a289bcbbe220a3b373959ae78468d8",
"size": "1301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventex/core/migrations/0002_auto_20160218_2359.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "37236"
},
{
"name": "JavaScript",
"bytes": "8834"
},
{
"name": "Python",
"bytes": "103988"
}
],
"symlink_target": ""
}
|
"""Monitor the state of Thermos tasks on a system
This module contains the TaskMonitor, used to reconstruct the state of active or finished Thermos
tasks based on their checkpoint streams. It exposes two key pieces of information about a Task, both
as their corresponding Thrift structs:
- a RunnerState, representing the latest state of the Task
- a list of ProcessStates, representing the processes currently running within the Task
"""
import copy
import errno
import os
import threading
from twitter.common import log
from twitter.common.recordio import ThriftRecordReader
from apache.thermos.common.ckpt import CheckpointDispatcher
from apache.thermos.common.path import TaskPath
from gen.apache.thermos.ttypes import ProcessState, RunnerCkpt, RunnerState, TaskState
class TaskMonitor(object):
"""
Class responsible for reconstructing and monitoring the state of an individual Thermos task via
its runner checkpoint. Also exports information on active processes in the task.
"""
def __init__(self, root, task_id):
"""Construct a TaskMonitor.
:param root: The checkpoint root of the task.
:param task_id: The task id of the task.
"""
pathspec = TaskPath(root=root, task_id=task_id)
self._dispatcher = CheckpointDispatcher()
self._runnerstate = RunnerState(processes={})
self._runner_ckpt = pathspec.getpath('runner_checkpoint')
self._active_file, self._finished_file = (pathspec.given(state=state).getpath('task_path')
for state in ('active', 'finished'))
self._ckpt_head = 0
self._apply_states()
self._lock = threading.Lock()
def _apply_states(self):
"""
os.stat() the corresponding checkpoint stream of this task and determine if there are new ckpt
records. Attempt to read those records and update the high watermark for that stream.
Returns True if new states were applied, False otherwise.
"""
ckpt_offset = None
try:
ckpt_offset = os.stat(self._runner_ckpt).st_size
updated = False
if self._ckpt_head < ckpt_offset:
with open(self._runner_ckpt, 'r') as fp:
fp.seek(self._ckpt_head)
rr = ThriftRecordReader(fp, RunnerCkpt)
while True:
runner_update = rr.try_read()
if not runner_update:
break
try:
self._dispatcher.dispatch(self._runnerstate, runner_update)
except CheckpointDispatcher.InvalidSequenceNumber as e:
log.error('Checkpoint stream is corrupt: %s' % e)
break
new_ckpt_head = fp.tell()
updated = self._ckpt_head != new_ckpt_head
self._ckpt_head = new_ckpt_head
return updated
except OSError as e:
if e.errno == errno.ENOENT:
# The log doesn't yet exist, will retry later.
log.warning('Could not read from checkpoint %s' % self._runner_ckpt)
return False
else:
raise
def refresh(self):
"""
Check to see if there are new updates and apply them. Return true if
updates were applied, false otherwise.
"""
with self._lock:
return self._apply_states()
def get_sandbox(self):
"""Get the sandbox of this task, or None if it has not yet been discovered."""
state = self.get_state()
if state.header:
return state.header.sandbox
def get_state(self):
"""Get the latest state of this Task."""
with self._lock:
self._apply_states()
return copy.deepcopy(self._runnerstate)
def task_state(self):
state = self.get_state()
return state.statuses[-1].state if state.statuses else TaskState.ACTIVE
@property
def active(self):
return os.path.exists(self._active_file)
@property
def finished(self):
return os.path.exists(self._finished_file)
def get_active_processes(self):
"""
Get active processes. Returned is a list of tuples of the form:
(ProcessStatus object of running object, its run number)
"""
active_processes = []
with self._lock:
self._apply_states()
state = self._runnerstate
for process, runs in state.processes.items():
if len(runs) == 0:
continue
last_run = runs[-1]
if last_run.state == ProcessState.RUNNING:
active_processes.append((last_run, len(runs) - 1))
return active_processes
|
{
"content_hash": "f547b43d7a529c94aa15c6631077b669",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 100,
"avg_line_length": 33.62307692307692,
"alnum_prop": 0.6650652024708305,
"repo_name": "rosmo/aurora",
"id": "d77703e9027e6c23ffb67d936cf8beef0384b4a6",
"size": "4920",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "src/main/python/apache/thermos/monitoring/monitor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "7070"
},
{
"name": "Groovy",
"bytes": "13037"
},
{
"name": "HTML",
"bytes": "56610"
},
{
"name": "Java",
"bytes": "3365999"
},
{
"name": "JavaScript",
"bytes": "105302"
},
{
"name": "Makefile",
"bytes": "1372"
},
{
"name": "Python",
"bytes": "1414935"
},
{
"name": "Ruby",
"bytes": "4315"
},
{
"name": "Shell",
"bytes": "59236"
},
{
"name": "Smalltalk",
"bytes": "79"
},
{
"name": "Smarty",
"bytes": "25233"
},
{
"name": "Thrift",
"bytes": "56144"
}
],
"symlink_target": ""
}
|
from ACJ import ACJ
|
{
"content_hash": "5beef6367f1f3a41c18252825b5e951e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 19,
"avg_line_length": 20,
"alnum_prop": 0.8,
"repo_name": "RossMcKenzie/ACJ",
"id": "6d9aa8a37d74de2b31e85881493ac66a69dfc1a5",
"size": "20",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "25274"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import random
import tensorflow as tf
import gym
from deep_rl.graphs import create_a3c_graph
from deep_rl.misc import first_in_collection
from deep_rl.trajectories import discount
from six.moves import range
EPSILON_ENDS = [0.1, 0.01, 0.5]
class A3CAgent:
"""A3CAgent"""
def __init__(self,
graph,
exploration_steps,
total_steps,
gamma,
a3c_update_interval,
action_sampler):
"""
graph should have the placeholders called "states", "actions",
and "returns". It should also have operations called "loss_op", "train_op",
"probs", and "value".
"""
self.graph = graph
self.gamma = gamma
self.a3c_update_interval = a3c_update_interval
self.action_sampler = action_sampler
self.T = graph.get_collection("global_step")[0]
self.exploration_steps = exploration_steps
self.total_steps = total_steps
self.incr_T = tf.assign_add(self.T, 1)
def pick_epsilon(self, T):
rv = random.random()
if rv < 0.4:
end = EPSILON_ENDS[0]
elif rv < 0.7:
end = EPSILON_ENDS[1]
else:
end = EPSILON_ENDS[2]
if T > self.exploration_steps:
return end
return 1.0 - T * (1.0 - end) / self.exploration_steps
def run(self, t_id, session, coord, env):
t = 0
t_start = 0 # for updating params
t_ep = 0 # for checking is an episode is done
ep_reward = 0
actions = []
states = []
rewards = []
# inputs and ops
_actions = self.graph.get_collection("actions")[0]
_returns = self.graph.get_collection("returns")[0]
pol_in = self.graph.get_collection("policy_in")[0]
pol_out = self.graph.get_collection("policy_out")[0]
pol_train_op = self.graph.get_collection("policy_train_op")[0]
val_in = self.graph.get_collection("value_in")[0]
val_out = self.graph.get_collection("value_out")[0]
val_train_op = self.graph.get_collection("value_train_op")[0]
state = env.reset()
try:
while not coord.should_stop():
T = session.run(self.T)
if T > self.total_steps:
break
epsilon = self.pick_epsilon(T)
if random.random() < epsilon:
action = env.action_space.sample()
else:
probs = session.run(pol_out, feed_dict={pol_in: state.reshape(1, *state.shape)})
action = self.action_sampler(probs)[0]
next_state, reward, done, info = env.step(action)
states.append(state)
actions.append(action)
rewards.append(reward)
ep_reward += reward
t += 1
session.run(self.incr_T)
# update params
if done or t - t_start == self.a3c_update_interval:
last_state = states[-1]
val = 0
if not done:
val = session.run(val_out,
feed_dict={val_in:
last_state.reshape(1, *last_state.shape)})
rewards.append(val)
returns = discount(rewards, self.gamma)[:-1]
session.run([val_train_op, pol_train_op],
feed_dict={val_in: states,
pol_in: states,
_returns: returns,
_actions: actions})
actions = []
states = []
rewards = []
t_start = t
# TODO: see if we can monitor all the envs
if done or t - t_ep == env.spec.timestep_limit:
state = env.reset()
print("Thread id {}: Episode reward = {}, timestep = {}".format(t_id, ep_reward,
t))
ep_reward = 0
t_ep = t
else:
state = next_state
except Exception as e:
coord.request_stop(e)
|
{
"content_hash": "d6f1b3c2b33c60447303b9622c1b3063",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 100,
"avg_line_length": 33.86466165413534,
"alnum_prop": 0.47357904085257546,
"repo_name": "domluna/deep_rl",
"id": "3e58d71104e453f28fde791f1b5a8282c6d7b3cd",
"size": "4504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_rl/agents/a3c.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18932"
}
],
"symlink_target": ""
}
|
"""
pixeloperations provides an implementation of PixelOperations class used by ImageFrame to interact with basis
and background
"""
import numpy as np
import deconvolution.auxlib as aux
import deconvolution.exceptions as ex
from copy import deepcopy
_infzero = 0.00001
_white255 = np.array([255, 255, 255], dtype=float)
_white1 = np.array([1, 1, 1], dtype=float)
def _entries_in_closed_interval(vect):
"""Checks if all list components are in [0,1]
Parameters
----------
vect : array_like
Container with values to check
Returns
-------
bool
True if all components lie in the interval [0,1]. False otherwise
"""
if len(vect) == 0:
return True
try:
return not (np.amax(vect) > 1 or np.amin(vect) < 0)
except TypeError:
return False
def _entries_in_half_closed_interval(vect):
"""Checks if all list components are in (0,1]
Parameters
----------
vect : array_like
Container with values to check
Returns
-------
bool
True if all components lie in the interval [0,1]. False otherwise
"""
if len(vect) == 0:
return True
try:
return not (np.amax(vect) > 1 or np.amin(vect) <= 0)
except TypeError:
return False
def _array_to_colour_255(arr):
"""Changes array of numbers into arrays of colour entries
Parameters
---------
arr : ndarray
numpy array of shape (x,y,3) with float or int values
Returns
-------
ndarray
array entries converted to integers from [0,255], shape (x,y,3)
See Also
--------
_array_to_colour_1
"""
if len(arr) == 0:
return np.array([], dtype=np.uint8)
return np.array(np.minimum(np.maximum(arr, 0), 255), dtype=np.uint8)
def _array_to_colour_1(arr):
"""Changes array of numbers into arrays of colour entries
Parameters
----------
arr: ndarray
shape (x,y,3)
Returns
-------
ndarray
array entries converted to floats from [0,1]
"""
if len(arr) == 0:
return np.array([], dtype=np.float)
return np.array(np.minimum(np.maximum(arr, 0), 1), dtype=float)
def _array_positive(arr):
"""Changes numbers smaller than arbitrary infinitesimal number to that number
Parameters
----------
arr : ndarray
an array that may contain non-positive entries
Returns
-------
ndarray
an array of the same shape with strictly positive entries
"""
return np.array(np.maximum(arr, _infzero), dtype=float)
class PixelOperations:
def __init__(self, basis=None, background=None):
"""
Class used to interact with basis and background (e.g. transforming pixels using this)
Parameters
----------
basis : array_like
a list (or numpy array) with three-dimensional vectors. Can have 0, 1, 2 or 3 vectors
background : array_like
array with shape (3,) or list with three entries. Entries should be numbers from interval (0, 1]
See Also
--------
PixelOperations.set_basis
PixelOperations.set_background
Notes
-----
It is equivalent to setting basis and background using setters
"""
self.__basis, self.__basis_dim, self.__background, self.__basis_log_matrix = None, None, None, None
self.set_basis(basis)
self.set_background(background)
def set_basis(self, basis):
"""Sets basis
Parameters
----------
basis : array_like
a list (or numpy array) with three-dimensional vectors. Can have 0, 1, 2 or 3 vectors
Raises
------
BasisException
Erroneous basis
"""
if basis is None:
basis = []
basis = np.array(basis, dtype=float)
if basis.shape not in [(0,), (1, 3), (2, 3), (3, 3)]:
raise ex.BasisException("Basis has invalid dimensions, and was not set.")
if not _entries_in_closed_interval(basis):
raise ex.BasisException("Check components of the base vectors.")
self.__basis = _array_positive(basis)
self.__basis_dim = len(basis)
if self.check_basis():
self.__basis_log_matrix = np.transpose(-np.log(self.__basis))
if np.linalg.matrix_rank(self.__basis_log_matrix) < self.get_basis_dim():
raise ex.BasisException("Base vectors are (pseudo)linearly dependent.")
def set_background(self, background=None):
"""Sets background
Parameters
----------
background : array_like
array with shape (3,) or list with three entries. Entries should be numbers from interval (0, 1]
Raises
------
ValueError
Erroneous background vector
"""
if background is None:
self.__background = _white1
return
background = np.array(background, dtype=float)
if background.shape != (3,):
raise ValueError("Check background vector shape.")
if not _entries_in_half_closed_interval(background):
raise ValueError("Check components of the background vector.")
self.__background = _array_positive(_array_to_colour_1(background))
def check_basis(self):
"""Checks if the basis is complete (has exactly two or three vectors)
Returns
-------
bool
True if it is, False otherwise
"""
return self.__basis.shape in [(2, 3), (3, 3)]
def get_basis_dim(self):
"""Returns number of the base vectors
Returns
-------
int
number of base vectors
"""
return self.__basis_dim
def get_basis(self):
"""Returns copy of the basis
Returns
-------
ndarray
array with basis. It can be empty or have shape (x,3) where x is 1, 2 or 3
"""
return deepcopy(self.__basis)
def get_background(self):
"""Returns copy of the background vector
Returns
-------
ndarray
background (numpy array)
"""
return deepcopy(self.__background)
def __transform_image2(self, image, mode):
"""Using basis with two vectors produce new images
Parameters
----------
image : ndarray
shape (x,y,3)
mode : array_like
elements can be 0 (image generated from white light and two stains), 1 (white light and first stain),
2 (white light and second stain) or -1 (remove both stains to obtain the rest)
Returns
-------
list
list of ndarrays with shape same as image according to mode
Raises
------
ValueError
if image has wrong shape
"""
r = np.array(image, dtype=float)
dim1, dim2, dim3 = r.shape
if dim3 != 3:
raise ValueError("Basis has wrong shape.")
v, u = self.__basis
vf, uf = np.zeros_like(r), np.zeros_like(r)
vf[:], uf[:] = v, u
a, b = map(_array_positive, self.get_coef(r))
af = np.repeat(a, 3).reshape(r.shape)
bf = np.repeat(b, 3).reshape(r.shape)
dec = []
for i in mode:
if i == 0:
dec.append(_array_to_colour_255(_white255 * (vf ** af) * (uf ** bf)))
elif i == 1:
dec.append(_array_to_colour_255(_white255 * (vf ** af)))
elif i == 2:
dec.append(_array_to_colour_255(_white255 * (uf ** bf)))
elif i == -1:
dec.append(_array_to_colour_255(r * (vf ** -af) * (uf ** -bf)))
return dec
def __transform_image3(self, image, mode):
"""Using basis with three vectors produce new images
Parameters
----------
image : ndarray
shape (x,y,3)
mode : array_like
elements can be 0 (image generated from white light and two stains), 1 (white light and first stain),
2 (white light and second stain), 3 (white light and third stain)
or -1 (remove all stains to obtain the rest)
Returns
-------
list
list of ndarrays with shape same as image according to mode
See Also
--------
PixelOperations.__transform_image2
"""
r = np.array(image, dtype=float)
v, u, w = self.__basis
vf, uf, wf = np.zeros_like(r), np.zeros_like(r), np.zeros_like(r)
vf[:], uf[:], wf[:] = v, u, w
a, b, c = map(_array_positive, self.get_coef(r))
af = np.repeat(a, 3).reshape(r.shape)
bf = np.repeat(b, 3).reshape(r.shape)
cf = np.repeat(c, 3).reshape(r.shape)
dec = []
for i in mode:
if i == 0:
dec.append(_array_to_colour_255(_white255 * (vf ** af) * (uf ** bf) * (wf ** cf)))
elif i == 1:
dec.append(_array_to_colour_255(_white255 * (vf ** af)))
elif i == 2:
dec.append(_array_to_colour_255(_white255 * (uf ** bf)))
elif i == 3:
dec.append(_array_to_colour_255(_white255 * (wf ** cf)))
elif i == -1:
dec.append(_array_to_colour_255(r * (vf ** -af) * (uf ** -bf) * (wf ** -cf)))
return dec
def transform_image(self, image, mode=None):
"""Transforms given image array and gives output accordingly to iterable mode
Parameters
----------
image : ndarray
shape (x,y,3)
mode : array_like
elements can be 0 (image generated from white light and two stains), 1 (white light and first stain),
2 (white light and second stain), 3 (white light and third stain. Note that this works only if a basis
with three vectors is used) or -1 (remove all stains to obtain the rest)
Returns
-------
list
list of ndarrays with shape same as image according to mode. Type is np.uint8.
Raises
------
BasisException
No basis has been set
See Also
--------
PixelOperations.__transform_image2
PixelOperations.__transform_image3
"""
if self.__basis_dim == 2:
return self.__transform_image2(image, [1, 2] if mode is None else mode)
elif self.__basis_dim == 3:
return self.__transform_image3(image, [1, 2, 3] if mode is None else mode)
else:
raise ex.BasisException("No proper basis set.")
def __get_coef2(self, pixel):
"""Finds exponentials in which stains are present. Returns non-negative values"""
r = np.array(pixel, dtype=float)
r = _array_positive(_array_to_colour_1(r/255.))
r = r/self.__background
sol = aux.find_vals(self.__basis_log_matrix, np.log(r))
sol = np.maximum(0, sol)
return sol
def __get_coef3(self, pixel):
"""Finds exponentials in which stains are present. Returns non-negative values"""
r = np.array(pixel, dtype=float)
r = _array_positive(_array_to_colour_1(r/255.))
r /= self.__background
sol = np.linalg.solve(self.__basis_log_matrix, -np.log(r))
sol = np.maximum(0, sol)
return sol
def get_coef(self, image):
"""For a given image returns deconvolution coefficient field
Parameters
----------
image : numpy array
array representing image, shape (x,y,3) and entries [0,255]
Returns
-------
list
length of the list is number of vectors in the basis. Each entry is numpy array with shape (x,y)
representing field of exponent coefficients
Raises
------
BasisException
No basis has been set
ValueError
Image channel number not supported.
"""
if image.shape[-1] != 3:
raise ValueError("Pixel dimensionality is wrong. Maybe it has an alpha channel?")
if self.get_basis_dim() == 2:
fv = np.vectorize(self.__get_coef2, signature='(n)->(k)')
elif self.get_basis_dim() == 3:
fv = np.vectorize(self.__get_coef3, signature='(n)->(k)')
else:
raise ex.BasisException("Basis of dimension 2 or 3 has not been set.")
return np.array(fv(image)).transpose((2, 0, 1))
|
{
"content_hash": "b7727780dd5f54ccb0255383fd53ea42",
"timestamp": "",
"source": "github",
"line_count": 421,
"max_line_length": 114,
"avg_line_length": 29.802850356294538,
"alnum_prop": 0.5522435641986132,
"repo_name": "grfrederic/deconvolution",
"id": "267d0a5fe13c834f4f2ec1b1699ecf17a71ba654",
"size": "12547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deconvolution/pixeloperations.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "562"
},
{
"name": "Python",
"bytes": "82041"
}
],
"symlink_target": ""
}
|
from a10sdk.common.A10BaseClass import A10BaseClass
class HostidAppendToVrid(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param hostid_append_to_vrid_value: {"description": "hostid append to vrid num", "partition-visibility": "shared", "format": "number", "maximum": 31, "minimum": 1, "not": "hostid-append-to-vrid-default", "type": "number"}
:param hostid_append_to_vrid_default: {"description": "hostid append to vrid default", "partition-visibility": "shared", "default": 0, "format": "flag", "not": "hostid-append-to-vrid-value", "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "hostid-append-to-vrid"
self.DeviceProxy = ""
self.hostid_append_to_vrid_value = ""
self.hostid_append_to_vrid_default = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class InlineModeCfg(A10BaseClass):
"""This class does not support CRUD Operations please use parent.
:param inline_mode: {"default": 0, "partition-visibility": "shared", "type": "number", "description": "Enable Layer 2 Inline Hot Standby Mode", "format": "flag"}
:param preferred_trunk: {"not": "preferred-port", "partition-visibility": "shared", "type": "number", "description": "Preferred trunk Port", "format": "interface"}
:param preferred_port: {"not": "preferred-trunk", "partition-visibility": "shared", "type": "number", "description": "Preferred ethernet Port", "format": "interface"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.b_key = "inline-mode-cfg"
self.DeviceProxy = ""
self.inline_mode = ""
self.preferred_trunk = ""
self.preferred_port = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
class Common(A10BaseClass):
"""Class Description::
HA VRRP-A Global Commands.
Class common supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param hello_interval: {"description": "VRRP-A Hello Interval (1-255, in unit of 100millisec, default is 2)", "partition-visibility": "shared", "default": 2, "optional": true, "format": "number", "maximum": 255, "minimum": 1, "type": "number"}
:param uuid: {"description": "uuid of the object", "format": "string", "minLength": 1, "modify-not-allowed": 1, "optional": true, "maxLength": 64, "type": "string"}
:param preemption_delay: {"description": "Delay before changing state from Active to Standby (1-255, in unit of 100millisec, default is 60)", "partition-visibility": "shared", "default": 60, "optional": true, "format": "number", "maximum": 255, "minimum": 1, "type": "number"}
:param set_id: {"description": "Set-ID for HA configuration (Set id from 1 to 15)", "partition-visibility": "shared", "optional": true, "format": "number", "maximum": 15, "minimum": 1, "type": "number"}
:param device_id: {"platform-specific-range": 1, "platform-specific-default": 1, "description": "Unique ID for each VRRP-A box (Device-id number)", "partition-visibility": "shared", "optional": true, "format": "number", "type": "number"}
:param arp_retry: {"description": "Number of additional gratuitous ARPs sent out after HA failover (1-255, default is 4)", "partition-visibility": "shared", "default": 4, "optional": true, "format": "number", "maximum": 255, "minimum": 1, "type": "number"}
:param dead_timer: {"description": "VRRP-A dead timer in terms of how many hello messages missed, default is 5 (2-255, default is 5)", "partition-visibility": "shared", "default": 5, "optional": true, "format": "number", "maximum": 255, "minimum": 2, "type": "number"}
:param disable_default_vrid: {"description": "Disable default vrid", "partition-visibility": "shared", "default": 0, "type": "number", "format": "flag", "optional": true}
:param track_event_delay: {"description": "Delay before changing state after up/down event (Units of 100 milliseconds (default 30))", "partition-visibility": "shared", "default": 30, "optional": true, "format": "number", "maximum": 100, "minimum": 1, "type": "number"}
:param action: {"description": "'enable': enable vrrp-a; 'disable': disable vrrp-a; ", "format": "enum", "default": "disable", "type": "string", "enum": ["enable", "disable"], "optional": true}
:param restart_time: {"description": "Time between restarting ports on standby system after transition", "partition-visibility": "shared", "default": 20, "optional": true, "format": "number", "maximum": 100, "minimum": 1, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/vrrp-a/common`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "common"
self.a10_url="/axapi/v3/vrrp-a/common"
self.DeviceProxy = ""
self.hello_interval = ""
self.uuid = ""
self.preemption_delay = ""
self.set_id = ""
self.device_id = ""
self.arp_retry = ""
self.dead_timer = ""
self.disable_default_vrid = ""
self.track_event_delay = ""
self.action = ""
self.hostid_append_to_vrid = {}
self.restart_time = ""
self.inline_mode_cfg = {}
for keys, value in kwargs.items():
setattr(self,keys, value)
|
{
"content_hash": "72f1b168c6c9efbbd3378824dd1b2ef8",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 280,
"avg_line_length": 54.794392523364486,
"alnum_prop": 0.6372164420944909,
"repo_name": "amwelch/a10sdk-python",
"id": "55f4103aeb8dfa607c5ced19e6df05af73cab32a",
"size": "5863",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "a10sdk/core/vrrp/vrrp_a_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6956398"
}
],
"symlink_target": ""
}
|
"""API data models - schemas and their properties.
This module handles the objects created for the "schema" section of an API.
"""
__author__ = 'aiuto@google.com (Tony Aiuto)'
import collections
import logging
from googleapis.codegen import data_types
from googleapis.codegen import template_objects
from googleapis.codegen.api_exception import ApiException
_ADDITIONAL_PROPERTIES = 'additionalProperties'
_LOGGER = logging.getLogger('codegen')
class Schema(data_types.ComplexDataType):
"""The definition of a schema."""
def __init__(self, api, default_name, def_dict, parent=None):
"""Construct a Schema object from a discovery dictionary.
Schemas represent data models in the API.
Args:
api: (Api) the Api instance owning the Schema
default_name: (str) the default name of the Schema. If there is an 'id'
member in the definition, that is used for the name instead.
def_dict: (dict) a discovery dictionary
parent: (Schema) The containing schema. To be used to establish unique
names for anonymous sub-schemas.
"""
super(Schema, self).__init__(default_name, def_dict, api, parent=parent)
name = def_dict.get('id', default_name)
_LOGGER.debug('Schema(%s)', name)
# Protect against malicious discovery
template_objects.CodeObject.ValidateName(name)
self.SetTemplateValue('wireName', name)
class_name = api.ToClassName(name, self, element_type='schema')
self.SetTemplateValue('className', class_name)
self.SetTemplateValue('isSchema', True)
self.SetTemplateValue('properties', [])
self._module = (template_objects.Module.ModuleFromDictionary(self.values)
or api.model_module)
@classmethod
def Create(cls, api, default_name, def_dict, wire_name, parent=None):
"""Construct a Schema or DataType from a discovery dictionary.
Schemas contain either object declarations, simple type declarations, or
references to other Schemas. Object declarations conceptually map to real
classes. Simple types will map to a target language built-in type.
References should effectively be replaced by the referenced Schema.
Args:
api: (Api) the Api instance owning the Schema
default_name: (str) the default name of the Schema. If there is an 'id'
member in the definition, that is used for the name instead.
def_dict: (dict) a discovery dictionary
wire_name: The name which will identify objects of this type in data on
the wire. The path of wire_names can trace an item back through
discovery.
parent: (Schema) The containing schema. To be used to establish nesting
for anonymous sub-schemas.
Returns:
A Schema or DataType.
Raises:
ApiException: If the definition dict is not correct.
"""
schema_id = def_dict.get('id')
if schema_id:
name = schema_id
else:
name = default_name
class_name = api.ToClassName(name, None, element_type='schema')
_LOGGER.debug('Create: %s, parent=%s', name,
parent.values.get('wireName', '<anon>') if parent else 'None')
# Schema objects come in several patterns.
#
# 1. Simple objects
# { type: object, properties: { "foo": {schema} ... }}
#
# 2. Maps of objects
# { type: object, additionalProperties: { "foo": {inner_schema} ... }}
#
# What we want is a data type which is Map<string, {inner_schema}>
# The schema we create here is essentially a built in type which we
# don't want to generate a class for.
#
# 3. Arrays of objects
# { type: array, items: { inner_schema }}
#
# Same kind of issue as the map, but with List<{inner_schema}>
#
# 4. Primitive data types, described by type and format.
# { type: string, format: int32 }
# { type: string, enum: ["value", ...], enumDescriptions: ["desc", ...]}
#
# 5. Refs to another schema.
# { $ref: name }
#
# 6. Variant schemas
# { type: object, variant: { discriminant: "prop", map:
# [ { 'type_value': value, '$ref': wireName }, ... ] } }
#
# What we do is map the variant schema to a schema with a single
# property for the discriminant. To that property, we attach
# the variant map which specifies which discriminator values map
# to which schema references. We also collect variant information
# in the api so we can later associate discriminator value and
# base type with the generated variant subtypes.
if 'type' in def_dict:
# The 'type' field of the schema can either be 'array', 'object', or a
# base json type.
json_type = def_dict['type']
if json_type == 'object':
# Look for variants
variant = def_dict.get('variant')
if variant:
return cls._CreateVariantType(variant, api, name,
def_dict, wire_name, parent)
# Look for full object definition. You can have properties or
# additionalProperties, but it does not do anything useful to have
# both.
# Replace properties dict with Property's
props = def_dict.get('properties')
if props:
# This case 1 from above
return cls._CreateObjectWithProperties(props, api, name,
def_dict, wire_name, parent)
# Look for case 2
additional_props = def_dict.get(_ADDITIONAL_PROPERTIES)
if additional_props:
return cls._CreateMapType(additional_props, api, name, wire_name,
class_name, parent)
# no properties
return cls._CreateSchemaWithoutProperties(api, name, def_dict,
wire_name, parent)
elif json_type == 'array':
# Case 3: Look for array definition
return cls._CreateArrayType(api, def_dict, wire_name, class_name,
schema_id, parent)
else:
# Case 4: This must be a basic type. Create a DataType for it.
return data_types.CreatePrimitiveDataType(def_dict, api, wire_name,
parent=parent)
referenced_schema = def_dict.get('$ref')
if referenced_schema:
# Case 5: Reference to another Schema.
#
# There are 4 ways you can see '$ref' in discovery.
# 1. In a property of a schema or a method request/response, pointing
# back to a previously defined schema
# 2. As above, pointing to something not defined yet.
# 3. In a method request or response or property of a schema pointing to
# something undefined.
#
# For case 1, the schema will be in the API name to schema map.
#
# For case 2, just creating this placeholder here is fine. When the
# actual schema is hit in the loop in _BuildSchemaDefinitions, we will
# replace the entry and DataTypeFromJson will resolve the to the new def.
#
# For case 3, we will end up with a dangling reference and fail later.
schema = api.SchemaByName(referenced_schema)
# The stored "schema" may not be an instance of Schema, but rather a
# data_types.PrimitiveDataType, which has no 'wireName' value.
if schema:
_LOGGER.debug('Schema.Create: %s => %s',
default_name, schema.values.get('wireName', '<unknown>'))
return schema
return data_types.SchemaReference(referenced_schema, api)
raise ApiException('Cannot decode JSON Schema for: %s' % def_dict)
@classmethod
def _CreateObjectWithProperties(cls, props, api, name, def_dict,
wire_name, parent):
properties = []
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
for prop_name in sorted(props):
prop_dict = props[prop_name]
_LOGGER.debug(' adding prop: %s to %s', prop_name, name)
properties.append(Property(api, schema, prop_name, prop_dict))
# Some APIs express etag directly in the response, others don't.
# Knowing that we have it explicitly makes special case code generation
# easier
if prop_name == 'etag':
schema.SetTemplateValue('hasEtagProperty', True)
schema.SetTemplateValue('properties', properties)
# check for @ clashing. E.g. No 'foo' and '@foo' in the same object.
names = set()
for p in properties:
wire_name = p.GetTemplateValue('wireName')
no_at_sign = wire_name.replace('@', '')
if no_at_sign in names:
raise ApiException(
'Property name clash in schema %s:'
' %s conflicts with another property' % (name, wire_name))
names.add(no_at_sign)
return schema
@classmethod
def _CreateVariantType(cls, variant, api, name, def_dict,
wire_name, parent):
"""Creates a variant type."""
variants = collections.OrderedDict()
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
discriminant = variant['discriminant']
# Walk over variants building the variant map and register
# variant info on the api.
for variant_entry in variant['map']:
discriminant_value = variant_entry['type_value']
variant_schema = api.DataTypeFromJson(variant_entry, name, parent=parent)
variants[discriminant_value] = variant_schema
# Set variant info. We get the original wire name from the JSON properties
# via '$ref' it is not currently accessible via variant_schema.
api.SetVariantInfo(variant_entry.get('$ref'), discriminant,
discriminant_value, schema)
prop = Property(api, schema, discriminant, {'type': 'string'},
key_for_variants=variants)
schema.SetTemplateValue('is_variant_base', True)
schema.SetTemplateValue('discriminant', prop)
schema.SetTemplateValue('properties', [prop])
return schema
@classmethod
def _CreateMapType(cls, additional_props, api, name, wire_name,
class_name, parent):
_LOGGER.debug('Have only additionalProps for %s, dict=%s',
name, additional_props)
# TODO(user): Remove this hack at the next large breaking change
# The "Items" added to the end is unneeded and ugly. This is for
# temporary backwards compatibility. Same for _CreateArrayType().
if additional_props.get('type') == 'array':
name = '%sItem' % name
subtype_name = additional_props.get('id', name + 'Element')
# Note, since this is an interim, non class just to hold the map
# make the parent schema the parent passed in, not myself.
_LOGGER.debug('name:%s, wire_name:%s, subtype name %s', name, wire_name,
subtype_name)
# When there is a parent, we synthesize a wirename when none exists.
# Purpose is to avoid generating an extremely long class name, since we
# don't do so for other nested classes.
if parent and wire_name:
base_wire_name = wire_name + 'Element'
else:
base_wire_name = None
base_type = api.DataTypeFromJson(
additional_props, subtype_name, parent=parent,
wire_name=base_wire_name)
map_type = data_types.MapDataType(name, base_type, parent=parent,
wire_name=wire_name)
map_type.SetTemplateValue('className', class_name)
_LOGGER.debug(' %s is MapOf<string, %s>',
class_name, base_type.class_name)
return map_type
@classmethod
def _CreateSchemaWithoutProperties(cls, api, name, def_dict, wire_name,
parent):
if parent:
try:
pname = parent['id']
except KeyError:
pname = '<unknown>'
name_to_log = '%s.%s' % (pname, name)
else:
name_to_log = name
# logging.warning('object without properties %s: %s',
# name_to_log, def_dict)
schema = cls(api, name, def_dict, parent=parent)
if wire_name:
schema.SetTemplateValue('wireName', wire_name)
return schema
@classmethod
def _CreateArrayType(cls, api, def_dict, wire_name,
class_name, schema_id, parent):
items = def_dict.get('items')
if not items:
raise ApiException('array without items in: %s' % def_dict)
tentative_class_name = class_name
# TODO(user): We should not rename things items.
# if we have an anonymous type within a map or array, it should be
# called 'Item', and let the namespacing sort it out.
if schema_id:
_LOGGER.debug('Top level schema %s is an array', class_name)
tentative_class_name += 'Items'
base_type = api.DataTypeFromJson(items, tentative_class_name,
parent=parent, wire_name=wire_name)
_LOGGER.debug(' %s is ArrayOf<%s>', class_name, base_type.class_name)
array_type = data_types.ArrayDataType(tentative_class_name, base_type,
wire_name=wire_name,
parent=parent)
if schema_id:
array_type.SetTemplateValue('className', schema_id)
return array_type
@property
def class_name(self):
return self.values['className']
@property
def anonymous(self):
return 'id' not in self.raw
@property
def properties(self):
return self.values['properties']
@property
def isContainerWrapper(self):
"""Is this schema just a simple wrapper around another container.
A schema is just a wrapper for another datatype if it is an object that
contains just a single container datatype and (optionally) a kind and
etag field. This may be used by language generators to create iterators
directly on the schema. E.g. You could have
SeriesList ret = api.GetSomeSeriesMethod(args).Execute();
for (series in ret) { ... }
rather than
for (series in ret->items) { ... }
Returns:
None or ContainerDataType
"""
return self._GetPropertyWhichWeWrap() is not None
@property
def containerProperty(self):
"""If isContainerWrapper, returns the propery which holds the container."""
return self._GetPropertyWhichWeWrap()
def _GetPropertyWhichWeWrap(self):
"""Returns the property which is the type we are wrapping."""
container_property = None
for p in self.values['properties']:
if p.values['wireName'] == 'kind' or p.values['wireName'] == 'etag':
continue
if p.data_type.GetTemplateValue('isContainer'):
if container_property:
return None
container_property = p
else:
return None
return container_property
def __str__(self):
return '<%s Schema {%s}>' % (self.values['wireName'], self.values)
class Property(template_objects.CodeObject):
"""The definition of a schema property.
Example property in the discovery schema:
"id": {"type": "string"}
"""
def __init__(self, api, schema, name, def_dict, key_for_variants=None):
"""Construct a Property.
A Property requires several elements in its template value dictionary which
are set here:
wireName: the string which labels this Property in the JSON serialization.
dataType: the DataType of this property.
Args:
api: (Api) The Api which owns this Property
schema: (Schema) the schema this Property is part of
name: (string) the name for this Property
def_dict: (dict) the JSON schema dictionary
key_for_variants: (dict) if given, maps discriminator values to
variant schemas.
Raises:
ApiException: If we have an array type without object definitions.
"""
super(Property, self).__init__(def_dict, api, wire_name=name)
self.ValidateName(name)
self.schema = schema
self._key_for_variants = key_for_variants
# TODO(user): find a better way to mark a schema as an array type
# so we can display schemas like BlogList in method responses
try:
if self.values['wireName'] == 'items' and self.values['type'] == 'array':
self.schema.values['isList'] = True
except KeyError:
pass
# If the schema value for this property defines a new object directly,
# rather than refering to another schema, we will have to create a class
# name for it. We create a unique name by prepending the schema we are
# in to the object name.
tentative_class_name = api.NestedClassNameForProperty(name, schema)
self._data_type = api.DataTypeFromJson(def_dict, tentative_class_name,
parent=schema, wire_name=name)
@property
def code_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.code_type
@property
def safe_code_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.safe_code_type
@property
def primitive_data_type(self):
if self._language_model:
self._data_type.SetLanguageModel(self._language_model)
return self._data_type.primitive_data_type
@property
def data_type(self):
return self._data_type
@property
def member_name_is_json_name(self):
return self.memberName == self.values['wireName']
@property
def is_variant_key(self):
return self._key_for_variants
@property
def variant_map(self):
return self._key_for_variants
|
{
"content_hash": "a3b36a47b4a60c12ad7a2b26b1e947d9",
"timestamp": "",
"source": "github",
"line_count": 460,
"max_line_length": 80,
"avg_line_length": 38.41304347826087,
"alnum_prop": 0.6387662705149971,
"repo_name": "googleapis/google-api-php-client-services",
"id": "f0a7289554991a2cca0b9078372841a16854fe1a",
"size": "18297",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "generator/src/googleapis/codegen/schema.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "55414116"
},
{
"name": "Python",
"bytes": "427325"
},
{
"name": "Shell",
"bytes": "787"
}
],
"symlink_target": ""
}
|
from os.path import dirname, abspath, join as pjoin
import pytest
import numpy as np
from Corrfunc.tests.common import gals_Mr19
from Corrfunc.tests.common import (check_against_reference,
check_vpf_against_reference)
from Corrfunc.tests.common import generate_isa_and_nthreads_combos
@pytest.mark.parametrize('isa,nthreads', generate_isa_and_nthreads_combos())
def test_DD(gals_Mr19, isa, nthreads):
from Corrfunc.theory import DD
boxsize = 420.
binfile = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "bins")
autocorr = 1
periodic = 1
x,y,z,w = gals_Mr19
results_DD = DD(autocorr, nthreads, binfile, x, y, z,
weights1=w, weight_type='pair_product',
periodic=periodic, boxsize=boxsize,
output_ravg=True, verbose=True,
isa=isa)
file_ref = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "Mr19_DD_periodic")
check_against_reference(results_DD, file_ref,
ravg_name='ravg', ref_cols=(0, 4, 1))
@pytest.mark.parametrize('isa,nthreads', generate_isa_and_nthreads_combos())
def test_DDrppi(gals_Mr19, isa, nthreads):
from Corrfunc.theory import DDrppi
boxsize = 420.
pimax = 40.0
binfile = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "bins")
autocorr = 1
periodic = 1
x,y,z,w = gals_Mr19
results_DDrppi = DDrppi(autocorr, nthreads, pimax, binfile, x, y, z,
weights1=w, weight_type='pair_product',
periodic=periodic, boxsize=boxsize,
output_rpavg=True, verbose=True,
isa=isa)
file_ref = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "Mr19_DDrppi_periodic")
check_against_reference(results_DDrppi, file_ref, ravg_name='rpavg', ref_cols=(0, 4, 1))
@pytest.mark.parametrize('isa,nthreads', generate_isa_and_nthreads_combos())
def test_DDsmu(gals_Mr19, isa, nthreads):
from Corrfunc.theory import DDsmu
boxsize = 420.
binfile = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "bins")
autocorr = 1
periodic = 1
mu_max = 0.5
nmu_bins = 10
x,y,z,w = gals_Mr19
results_DDsmu = DDsmu(autocorr, nthreads, binfile,
mu_max, nmu_bins,
x, y, z,
weights1=w, weight_type='pair_product',
periodic=periodic, boxsize=boxsize,
output_savg=True, verbose=True,
isa=isa)
file_ref = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "Mr19_DDsmu_periodic")
check_against_reference(results_DDsmu, file_ref, ravg_name='savg', ref_cols=(0, 4, 1))
@pytest.mark.parametrize('isa,nthreads', generate_isa_and_nthreads_combos(extra_isa=['AVX2']))
def test_wp(gals_Mr19, isa, nthreads):
from Corrfunc.theory import wp
boxsize = 420.
pimax = 40.
binfile = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "bins")
x,y,z,w = gals_Mr19
results_wp = wp(boxsize, pimax, nthreads, binfile,
x, y, z,
weights=w, weight_type='pair_product',
output_rpavg=True, verbose=True,
isa=isa)
file_ref = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "Mr19_wp")
check_against_reference(results_wp, file_ref, ravg_name='rpavg', cf_name='wp',
ref_cols=(4, 5, 1, 0))
@pytest.mark.parametrize('isa,nthreads', generate_isa_and_nthreads_combos())
def test_xi(gals_Mr19, isa, nthreads):
from Corrfunc.theory import xi
boxsize = 420.
binfile = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "bins")
x,y,z,w = gals_Mr19
results_xi = xi(boxsize, nthreads, binfile,
x, y, z,
weights=w, weight_type='pair_product',
output_ravg=True, verbose=True,
isa=isa)
file_ref = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "Mr19_xi")
check_against_reference(results_xi, file_ref, ravg_name='ravg', cf_name='xi', ref_cols=(4, 5, 1, 0))
@pytest.mark.parametrize('isa,nthreads', generate_isa_and_nthreads_combos())
def test_vpf(gals_Mr19, isa, nthreads):
from Corrfunc.theory import vpf
boxsize = 420.
rmax = 10.0
nbin = 10
nspheres = 10000
num_pN = 6
seed = -1234
periodic = 1
x,y,z,w = gals_Mr19
results_vpf = vpf(rmax, nbin, nspheres, num_pN,
seed, x, y, z, verbose=True, periodic=periodic,
boxsize=boxsize)
#results_vpf = results_vpf.view(dtype=np.float64).reshape(nbin,-1) # flatten to same shape as results
file_ref = pjoin(dirname(abspath(__file__)),
"../../theory/tests/", "Mr19_vpf_periodic")
check_vpf_against_reference(results_vpf, file_ref)
|
{
"content_hash": "be805951b0266e0a51a548750b4e6d3c",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 106,
"avg_line_length": 37.289655172413795,
"alnum_prop": 0.5357869428518587,
"repo_name": "manodeep/Corrfunc",
"id": "4247d5033c2b475fce24f511903c65823c3c2041",
"size": "5430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Corrfunc/tests/test_theory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1729496"
},
{
"name": "Makefile",
"bytes": "92426"
},
{
"name": "Python",
"bytes": "387527"
},
{
"name": "TeX",
"bytes": "11905"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('meas_models', '0001_squashed_0016_auto_20170117_2325'),
]
operations = [
migrations.RemoveField(
model_name='formula',
name='question',
),
]
|
{
"content_hash": "3e6a96b1f25a2160fcb28fd2ca9a95ad",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 65,
"avg_line_length": 20.058823529411764,
"alnum_prop": 0.6011730205278593,
"repo_name": "deka108/mathqa-server",
"id": "05f11c943a16f2ae5692efe26f468fdfdd4b734d",
"size": "414",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "meas_models/migrations/0002_remove_formula_question.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "120893"
},
{
"name": "HTML",
"bytes": "500139"
},
{
"name": "JavaScript",
"bytes": "1112441"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "2233286"
},
{
"name": "Python",
"bytes": "350711"
}
],
"symlink_target": ""
}
|
"""Created with Pycharm IDEA
@Create on 2015/9/12 16:31
@my_story controllers.py
@author : OmegaMiao"""
from app import app
from flask import jsonify, request, render_template, redirect, flash, url_for
from flask.ext.login import login_required, login_user, logout_user, current_user
from models import Story, User
from forms import StoryForm, LoginForm, RegistrationForm
from service import StoryService
@app.route('/')
def index():
form = StoryForm()
return render_template('index.html', form=form)
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('index'))
flash(u'用户名或密码不正确')
return render_template('login.html', form=form)
@app.route('/logout')
@login_required
def logout():
logout_user()
flash(u'你已经登出')
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
StoryService.add_user(user)
flash(u"注册成功,请登录")
return redirect(url_for('login'))
return render_template('register.html', form=form)
@app.route('/story/<int:story_id>')
def get_story(story_id):
return jsonify(StoryService.get_story(story_id))
# @app.route('/storys')
# def get_storys():
# return jsonify(StoryService.get_storys())
@app.route('/show_storys', methods=['GET'])
def show_storys():
page = request.args.get('page', 1, type=int)
pagination = StoryService.get_storys(page)
storys = pagination.items
return render_template('story_detail.html', storys=storys, pagination=pagination)
@app.route('/delete_story/<int:story_id>')
def delete_story(story_id):
return jsonify(StoryService.delete_story(story_id))
@app.route('/add_story', methods=['POST'])
def add_story():
form = StoryForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
story = Story(form.title.data, form.content.data)
try:
StoryService.add_story(_story=story,
_nick_name=current_user.username,
_category=form.category.data,
_author=current_user.username)
flash("add success")
return redirect(url_for('index'))
except Exception, e:
return jsonify({"errorMessage": e.message})
return render_template('index.html', form=form)
@app.route('/story/<string:author_nickname>')
def get_story_by_author(author_nickname):
# TODO: complete this get_story_by_author method
pass
@app.route('/story/<string:category>')
def get_story_by_category(category):
# TODO: complete this get_story_by_category method
pass
|
{
"content_hash": "a5f83c1f35a2b3e92225fd17fbf6cd93",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 100,
"avg_line_length": 29.08411214953271,
"alnum_prop": 0.6542416452442159,
"repo_name": "OmegaM/story",
"id": "c9fef566774c86872d9770d0d6cd42c1976355a0",
"size": "3203",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/controller.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "189"
},
{
"name": "HTML",
"bytes": "10775"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "23487"
}
],
"symlink_target": ""
}
|
exercicio4 = open('exercicio4.txt', 'w')
texto = "eu não sei o que estou fazendo"
print (texto)
exercicio4.close()
lerexercicio4 = open('exercicio4.txt', 'r')
ler = lerexercicio4.readline()
print ('agora eu sei o que estou fazendo')
lerexercicio4.close()
|
{
"content_hash": "3329e3a8dc2f5bb0b80de393c00d11db",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 43,
"avg_line_length": 19.642857142857142,
"alnum_prop": 0.6763636363636364,
"repo_name": "ribeironaldo/Desenvolvimento-de-Sistemas",
"id": "855afee4c7adcff8e0a3965b68474e9a4e17d57d",
"size": "276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Exercicio4.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4317"
}
],
"symlink_target": ""
}
|
import arff
import argparse
import collections
import matplotlib
import matplotlib.pyplot as plt
import openml
import openmlpimp
import os
import pickle
import random
import sklearn
from statistics import mean
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--virtual_env', type=str, default=os.path.expanduser('~') + '/projects/pythonvirtual/plot2/bin/python')
parser.add_argument('--scripts_dir', type=str, default=os.path.expanduser('~') + '/projects/plotting_scripts/scripts')
parser.add_argument('--result_directory', type=str, default=os.path.expanduser('~') + '/nemo/experiments/priorbased_experiments/')
parser.add_argument('--defaults_directory', type=str, default=os.path.expanduser('~') + '/experiments/defaults/')
parser.add_argument('--output_directory', type=str, default=os.path.expanduser('~') + '/experiments/optimizers/priors/')
parser.add_argument('--measure', type=str, default='predictive_accuracy')
parser.add_argument('--study_id', type=str, default='OpenML100')
parser.add_argument('--task_limit', type=int, default=None)
parser.add_argument('--setup', type=str, default='hyperband_5')
parser.add_argument('--seed', type=int, default=None, help='the seed (efficiency)')
args = parser.parse_args()
return args
def list_classifiers(directory):
result = []
for classifier in os.listdir(directory):
for param_setting in os.listdir(directory + '/' + classifier):
if param_setting == 'vanilla':
name = classifier
else:
name = classifier + ' (' + param_setting.split('_')[1] + ')'
result.append((name, classifier + '/' + param_setting))
return result
def get_score_from_xml(run_xml_location, measure):
with open(run_xml_location, 'r') as fp:
run = openml.runs.functions._create_run_from_xml(fp.read(), from_server=False)
scores = []
for repeat in run.fold_evaluations[measure]:
for fold in run.fold_evaluations[measure][repeat]:
scores.append(float(run.fold_evaluations[measure][repeat][fold]))
return sum(scores) / len(scores)
# def get_score_from_avgcurve(csv_location):
# with open(csv_location, 'r') as fp:
# reader = csv.DictReader(fp)
# for row in reader:
# score = float(row['evaluation'])
# return score
def trace_to_score(trace_file):
with open(trace_file, 'r') as fp:
trace_arff = arff.load(fp)
trace = openml.runs.functions._create_trace_from_arff(trace_arff)
curves_total = collections.defaultdict(float)
curves_count = collections.defaultdict(int)
for itt in trace.trace_iterations:
cur = trace.trace_iterations[itt]
curves_total[itt] += cur.evaluation
curves_count[itt] += 1
if len(set(curves_count.values())) != 1:
# all curves should have same amount of subcurves (folds * repeats)
raise ValueError()
curves_avg = dict()
for idx in curves_total:
curves_avg[idx] = curves_total[idx] / curves_count[idx]
# now return the max value:
return max(curves_avg.values())
def plot_scatter(defaults_results, priors_results):
colors = ['r', 'b', 'g', 'c']
# print("task_id default uniform kde")
plt.figure()
plt.plot([-1, 1], [-1, 1], 'k-', linestyle='-', lw=1)
for idx, classifier in enumerate(priors_results.keys()):
plot_data_x = []
plot_data_y = []
for task_id in priors_results[classifier]:
if len(priors_results[classifier][task_id]) == 2 and 'uniform' in priors_results[classifier][task_id] and 'kde' in priors_results[classifier][task_id] and task_id in defaults_results[classifier]:
# print("%7d %7f %7f %7f" %(task_id, defaults_results[classifier][task_id], priors_results[classifier][task_id]['uniform'], priors_results[classifier][task_id]['kde']))
plot_data_x.append(mean(priors_results[classifier][task_id]['uniform'].values()) - defaults_results[classifier][task_id])
plot_data_y.append(mean(priors_results[classifier][task_id]['kde'].values()) - defaults_results[classifier][task_id])
if len(plot_data_x) > 0:
print(plot_data_x, plot_data_y)
plt.scatter(plot_data_x, plot_data_y, color=colors[idx], label=classifier)
# print("(%5d) %7f %7f %7f" %(len(plot_data_x), 0, sum(plot_data_x) / len(plot_data_x), sum(plot_data_y) / len(plot_data_y)))
plt.legend(loc='upper left')
plt.xlim((-0.25, 0.8))
plt.ylim((-0.25, 0.8))
plt.savefig('/home/vanrijn/experiments/priors_scatter.pdf')
plt.close()
def plot_boxplot(priors_results, mode):
output_directory = os.path.join(args.output_directory, args.setup)
all = []
keys = []
for idx, classifier in enumerate(priors_results.keys()):
data = []
kde_wins = 0
uni_wins = 0
draws = 0
for task_id in priors_results[classifier]:
if len(priors_results[classifier][task_id]) == 2 and 'uniform' in priors_results[classifier][task_id] and 'kde' in priors_results[classifier][task_id]:
scores_kde = priors_results[classifier][task_id]['kde']
scores_uniform = priors_results[classifier][task_id]['uniform']
current = sum(scores_kde.values()) / len(scores_kde) - sum(scores_uniform.values()) / len(scores_uniform)
data.append(current)
if current > 0: kde_wins +=1
elif current < 0: uni_wins += 1
else: draws += 1
n = kde_wins + uni_wins + draws
if n > 5:
all.append(data)
keys.append(classifier)
rank_kde = 2 - (kde_wins + (draws / 2)) / float(n)
rank_uni = 2 - (uni_wins + (draws / 2)) / float(n)
print(openmlpimp.utils.get_time(), mode, "%s kde %d vs %d uniform (and %d draws). N = %d, Ranks: kde %f - %f uniform" %(classifier, kde_wins, uni_wins, draws, n, rank_kde, rank_uni))
plt.figure(figsize=(4, 12))
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off')
plt.plot([0.5, 1.5], [0, 0], 'k-', linestyle='--', lw=1)
plt.violinplot(data)
plt.savefig(output_directory + '/priors_%s_%s.pdf' %(classifier, mode), bbox_inches='tight')
plt.close()
plt.figure()
plt.violinplot(all, list(range(len(all))))
plt.plot([-0.5, len(all)-0.5], [0, 0], 'k-', linestyle='--', lw=1)
plt.xticks(list(range(len(keys))), keys, rotation=45, ha='right')
output_file = output_directory + '/priors_violin_%s.pdf' %mode
plt.savefig(output_file, bbox_inches='tight')
plt.close()
if __name__ == '__main__':
matplotlib.rcParams['ps.useafm'] = True
matplotlib.rcParams['pdf.use14corefonts'] = True
matplotlib.rcParams['text.usetex'] = True
args = parse_args()
study = openml.study.get_study(args.study_id, 'tasks')
all_tasks = study.tasks
if args.task_limit:
all_tasks = random.sample(all_tasks, args.task_limit)
all_classifiers = list_classifiers(args.result_directory + '/' + args.setup)
print(openmlpimp.utils.get_time(), all_classifiers)
priors_results_test = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(dict)))
priors_results_valid = collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(dict)))
defaults_results = collections.defaultdict(dict)
for classifier, directory_suffix in all_classifiers:
classifier_dir = os.path.join(args.result_directory, args.setup, directory_suffix)
cache_dir = os.path.join(args.output_directory, args.setup, directory_suffix)
try:
os.makedirs(cache_dir)
except FileExistsError:
pass
if os.path.isfile(cache_dir + '/cache_test.pkl') and os.path.isfile(cache_dir + '/cache_valid.pkl'):
cache_results_test = pickle.load(open(cache_dir + '/cache_test.pkl', 'rb'))
cache_results_valid = pickle.load(open(cache_dir + '/cache_valid.pkl', 'rb'))
else:
cache_results_test = dict()
cache_results_valid = dict()
for task_id in all_tasks:
if task_id not in cache_results_test: cache_results_test[task_id] = dict()
if task_id not in cache_results_valid: cache_results_valid[task_id] = dict()
default_run_directory = args.defaults_directory + directory_suffix + '/' + str(task_id)
default_run_xml = default_run_directory + '/run.xml'
if os.path.isfile(default_run_xml):
default_score = get_score_from_xml(default_run_xml, args.measure)
defaults_results[classifier][task_id] = default_score
for strategy in os.listdir(classifier_dir):
strategy_name = strategy.split('__')[0]
if strategy_name not in cache_results_test[task_id]: cache_results_test[task_id][strategy_name] = dict()
if strategy_name not in cache_results_valid[task_id]: cache_results_valid[task_id][strategy_name] = dict()
task_dir = os.path.join(classifier_dir, strategy, str(task_id))
if os.path.isdir(task_dir):
default_run_directory = args.defaults_directory + directory_suffix + '/' + str(task_id)
default_run_xml = default_run_directory + '/run.xml'
if os.path.isfile(default_run_xml):
default_score = get_score_from_xml(default_run_xml, args.measure)
defaults_results[classifier][task_id] = default_score
for seed in os.listdir(task_dir):
seed = int(seed)
if args.seed is not None and args.seed is not seed:
continue
strategy_trace = classifier_dir + '/' + strategy + '/' + str(task_id) + '/' + str(seed) + '/trace.arff'
strategy_predictions = classifier_dir + '/' + strategy + '/' + str(task_id) + '/' + str(seed) + '/predictions.arff'
if seed not in cache_results_test[task_id][strategy_name]:
# data not in cache yet
if not os.path.isfile(strategy_predictions) or not os.path.isfile(strategy_trace):
print(openmlpimp.utils.get_time(), '%s: Task %d not finished for strategy %s seed %s' %(classifier, task_id, strategy, seed))
continue
with open(strategy_predictions, 'r') as fp:
predictions_arff = arff.load(fp)
run = openml.runs.OpenMLRun(flow_id=-1, dataset_id=-1, task_id=task_id)
run.data_content = predictions_arff['data']
score = run.get_metric_fn(sklearn.metrics.accuracy_score)
cache_results_test[task_id][strategy_name][seed] = score.mean()
cache_results_valid[task_id][strategy_name][seed] = trace_to_score(strategy_trace)
# data is in cache
priors_results_test[classifier][task_id][strategy_name][seed] = cache_results_test[task_id][strategy_name][seed]
priors_results_valid[classifier][task_id][strategy_name][seed] = cache_results_valid[task_id][strategy_name][seed]
pickle.dump(cache_results_test, open(cache_dir + '/cache_test.pkl', 'wb'))
pickle.dump(cache_results_valid, open(cache_dir + '/cache_valid.pkl', 'wb'))
plot_scatter(defaults_results, priors_results_test)
plot_boxplot(priors_results_test, 'test')
plot_boxplot(priors_results_valid, 'validation')
|
{
"content_hash": "7133891a772c85465a937eedb45d55aa",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 207,
"avg_line_length": 47.644,
"alnum_prop": 0.6048190748048022,
"repo_name": "janvanrijn/openml-pimp",
"id": "3f77652f9c314e8f4ec8da0217b343e4c830ee9f",
"size": "11911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/plot/priors_vs_uniform_violin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "33"
},
{
"name": "Jupyter Notebook",
"bytes": "15643"
},
{
"name": "Python",
"bytes": "84680"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
import httplib as http
from django.utils import timezone
from nose.tools import * # noqa (PEP8 asserts)
from framework.auth import campaigns, views as auth_views, cas
from website.util import web_url_for
from osf_tests import factories
from tests.base import OsfTestCase
from tests.utils import mock_auth
def set_preprint_providers():
"""Populate `PreprintProvider` to test database for testing."""
providers = {
'osf': 'Open Science Framework',
'socarxiv': 'SocArXiv',
'engrxiv': 'EngrXiv',
'psyarxiv': 'PsyArXiv',
}
for key, value in providers.items():
provider = factories.PreprintProviderFactory()
provider._id = key
provider.name = value
provider.save()
# tests for campaign initialization and update
class TestCampaignInitialization(OsfTestCase):
def setUp(self):
super(TestCampaignInitialization, self).setUp()
set_preprint_providers()
self.campaign_lists = [
'prereg',
'erpc',
'institution',
'osf-preprints',
'socarxiv-preprints',
'engrxiv-preprints',
'psyarxiv-preprints',
'osf-registries',
]
self.refresh = timezone.now()
campaigns.CAMPAIGNS = None # force campaign refresh now that preprint providers are populated
campaigns.CAMPAIGNS_LAST_REFRESHED = self.refresh
def test_get_campaigns_init(self):
campaign_dict = campaigns.get_campaigns()
assert_equal(len(campaign_dict), len(self.campaign_lists))
for campaign in campaign_dict:
assert_in(campaign, self.campaign_lists)
assert_not_equal(self.refresh, campaigns.CAMPAIGNS_LAST_REFRESHED)
def test_get_campaigns_update_not_expired(self):
campaigns.get_campaigns()
self.refresh = campaigns.CAMPAIGNS_LAST_REFRESHED
campaigns.get_campaigns()
assert_equal(self.refresh, campaigns.CAMPAIGNS_LAST_REFRESHED)
def test_get_campaigns_update_expired(self):
campaigns.get_campaigns()
self.refresh = timezone.now() - timedelta(minutes=5)
campaigns.CAMPAIGNS_LAST_REFRESHED = self.refresh
campaigns.get_campaigns()
assert_not_equal(self.refresh, campaigns.CAMPAIGNS_LAST_REFRESHED)
# tests for campaign helper methods
class TestCampaignMethods(OsfTestCase):
def setUp(self):
super(TestCampaignMethods, self).setUp()
set_preprint_providers()
self.campaign_lists = [
'prereg',
'erpc',
'institution',
'osf-preprints',
'socarxiv-preprints',
'engrxiv-preprints',
'psyarxiv-preprints',
]
self.invalid_campaign = 'invalid_campaign'
campaigns.CAMPAIGNS = None # force campaign refresh now that preprint providers are populated
def test_is_institution_login(self):
for campaign in self.campaign_lists:
institution = campaigns.is_institution_login(campaign)
if campaign == 'institution':
assert_true(institution)
else:
assert_false(institution)
institution = campaigns.is_institution_login(self.invalid_campaign)
assert_true(institution is None)
def test_is_native_login(self):
for campaign in self.campaign_lists:
native = campaigns.is_native_login(campaign)
if campaign == 'prereg' or campaign == 'erpc':
assert_true(native)
else:
assert_false(native)
native = campaigns.is_proxy_login(self.invalid_campaign)
assert_true(native is None)
def test_is_proxy_login(self):
for campaign in self.campaign_lists:
proxy = campaigns.is_proxy_login(campaign)
if campaign.endswith('-preprints'):
assert_true(proxy)
else:
assert_false(proxy)
proxy = campaigns.is_proxy_login(self.invalid_campaign)
assert_true(proxy is None)
def test_system_tag_for_campaign(self):
for campaign in self.campaign_lists:
tag = campaigns.system_tag_for_campaign(campaign)
assert_true(tag is not None)
tag = campaigns.system_tag_for_campaign(self.invalid_campaign)
assert_true(tag is None)
def test_email_template_for_campaign(self):
for campaign in self.campaign_lists:
template = campaigns.email_template_for_campaign(campaign)
if campaigns.is_institution_login(campaign):
assert_true(template is None)
else:
assert_true(template is not None)
template = campaigns.email_template_for_campaign(self.invalid_campaign)
assert_true(template is None)
def test_campaign_url_for(self):
for campaign in self.campaign_lists:
url = campaigns.campaign_url_for(campaign)
assert_true(url is not None)
url = campaigns.campaign_url_for(self.invalid_campaign)
assert_true(url is None)
def test_get_service_provider(self):
for campaign in self.campaign_lists:
provider = campaigns.get_service_provider(campaign)
if campaigns.is_proxy_login(campaign):
assert_true(provider is not None)
else:
assert_true(provider is None)
provider = campaigns.get_service_provider(self.invalid_campaign)
assert_true(provider is None)
def test_campaign_for_user(self):
user = factories.UserFactory()
user.add_system_tag('osf_preprints')
user.save()
campaign = campaigns.campaign_for_user(user)
assert_equal(campaign, 'osf-preprints')
# tests for prereg, erpc, which follow similar auth login/register logic
class TestCampaignsAuthViews(OsfTestCase):
def setUp(self):
super(TestCampaignsAuthViews, self).setUp()
self.campaigns = {
'prereg': {
'title_register': 'Preregistration Challenge',
'title_landing': 'Welcome to the Prereg Challenge!'
},
'erpc': {
'title_register': 'Election Research Preacceptance Competition',
'title_landing': 'The Election Research Preacceptance Competition is Now Closed'
},
}
for key, value in self.campaigns.items():
value.update({'url_login': web_url_for('auth_login', campaign=key)})
value.update({'url_register': web_url_for('auth_register', campaign=key)})
value.update({'url_landing': campaigns.campaign_url_for(key)})
self.user = factories.AuthUserFactory()
def test_campaign_register_view_logged_in(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_register'], auth=self.user.auth)
assert_equal(resp.status_code, http.FOUND)
assert_equal(value['url_landing'], resp.headers['Location'])
def test_campaign_register_view_logged_out(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_register'])
assert_equal(resp.status_code, http.OK)
assert_in(value['title_register'], resp)
def test_campaign_login_logged_in(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_login'], auth=self.user.auth)
assert_equal(resp.status_code, http.FOUND)
assert_in(value['url_landing'], resp.headers['Location'])
def test_campaign_login_logged_out(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_login'])
assert_equal(resp.status_code, http.FOUND)
assert_in(value['url_register'], resp.headers['Location'])
def test_campaign_landing_logged_in(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_landing'], auth=self.user.auth)
assert_equal(resp.status_code, http.OK)
def test_auth_prereg_landing_page_logged_out(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_landing'])
assert_equal(resp.status_code, http.OK)
# tests for registration through campaigns
class TestRegistrationThroughCampaigns(OsfTestCase):
def setUp(self):
super(TestRegistrationThroughCampaigns, self).setUp()
def test_confirm_email_get_with_campaign(self):
for key, value in campaigns.CAMPAIGNS.items():
user = factories.UnconfirmedUserFactory()
user.add_system_tag(value.get('system_tag'))
user.save()
token = user.get_confirmation_token(user.username)
kwargs = {
'uid': user._id,
}
with self.app.app.test_request_context(), mock_auth(user):
res = auth_views.confirm_email_get(token, **kwargs)
assert_equal(res.status_code, http.FOUND)
assert_equal(res.location, campaigns.campaign_url_for(key))
# tests for institution
class TestCampaignsCASInstitutionLogin(OsfTestCase):
def setUp(self):
super(TestCampaignsCASInstitutionLogin, self).setUp()
self.url_login = web_url_for('auth_login', campaign='institution')
self.url_register = web_url_for('auth_register', campaign='institution')
self.service_url = web_url_for('dashboard', _absolute=True)
# go to CAS institution login page if not logged in
def test_institution_not_logged_in(self):
resp = self.app.get(self.url_login)
assert_equal(resp.status_code, http.FOUND)
assert_in(cas.get_login_url(self.service_url, campaign='institution'), resp.headers['Location'])
# register behave the same as login
resp2 = self.app.get(self.url_register)
assert_equal(resp.headers['Location'], resp2.headers['Location'])
# go to target page (service url_ if logged in
def test_institution_logged_in(self):
resp = self.app.get(self.url_login)
assert_equal(resp.status_code, http.FOUND)
assert_in(self.service_url, resp.headers['Location'])
# register behave the same as login
resp2 = self.app.get(self.url_register)
assert_equal(resp.headers['Location'], resp2.headers['Location'])
|
{
"content_hash": "763155c1802d538090021070fedb4352",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 104,
"avg_line_length": 39.51136363636363,
"alnum_prop": 0.6320582877959927,
"repo_name": "leb2dg/osf.io",
"id": "1306ec2ce8b80ff569264864f931a9d34a9cc0aa",
"size": "10431",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "tests/test_campaigns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "158060"
},
{
"name": "HTML",
"bytes": "110361"
},
{
"name": "JavaScript",
"bytes": "1621074"
},
{
"name": "Mako",
"bytes": "669660"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "5400700"
}
],
"symlink_target": ""
}
|
"""Class DistributionStrategy, ReplicaContext, and supporting APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context as eager_context
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.platform import tf_logging
from tensorflow.python.training import device_util
from tensorflow.python.training import distribution_strategy_context
from tensorflow.python.util import nest
# ------------------------------------------------------------------------------
# Context tracking whether in a distribution.update() or .update_non_slot()
# call.
_update_device = threading.local()
def get_update_device():
"""Get the current device if in a `DistributionStrategy.update()` call."""
try:
return _update_device.current
except AttributeError:
return None
class UpdateContext(object):
"""Context manager when you are in `update()` or `update_non_slot()`."""
def __init__(self, device):
self._device = device
self._old_device = None
def __enter__(self):
self._old_device = get_update_device()
_update_device.current = self._device
def __exit__(self, exception_type, exception_value, traceback):
del exception_type, exception_value, traceback
_update_device.current = self._old_device
# ------------------------------------------------------------------------------
# Public utility functions.
def get_loss_reduction():
"""Reduce `aggregation` corresponding to the last loss reduction."""
loss_reduction = ops.get_default_graph()._last_loss_reduction # pylint: disable=protected-access
if loss_reduction == losses_impl.Reduction.SUM:
return variable_scope.VariableAggregation.SUM
return variable_scope.VariableAggregation.MEAN
# ------------------------------------------------------------------------------
# Internal API for validating the current thread mode
def _require_cross_replica_context(distribution_strategy):
"""Verify in cross-replica context for `distribution_strategy`."""
context = _get_per_thread_mode()
if context.cross_replica_context is distribution_strategy: return
# We have an error to report, figure out the right message.
if context.distribution_strategy is not distribution_strategy:
if (context.distribution_strategy is
distribution_strategy_context._get_default_distribution_strategy()): # pylint: disable=protected-access
raise RuntimeError(
'Need to be inside "with distribution_strategy.scope()" for %s' %
(distribution_strategy,))
else:
raise RuntimeError(
"Mixing different DistributionStrategy objects: %s is not %s" %
(context.distribution_strategy, distribution_strategy))
assert context.cross_replica_context is None
raise RuntimeError("Method requires being in cross-replica context, use "
"get_replica_context().merge_call()")
def require_replica_context(replica_ctx):
"""Verify in `replica_ctx` replica context."""
context = _get_per_thread_mode()
if context.replica_context is replica_ctx: return
# We have an error to report, figure out the right message.
if context.replica_context is None:
raise RuntimeError("Need to be inside `call_for_each_replica()`")
if context.distribution_strategy is replica_ctx.distribution_strategy:
# Two different ReplicaContexts with the same DistributionStrategy.
raise RuntimeError("Mismatching replica context.")
raise RuntimeError(
"Mismatching DistributionStrategy objects: %s is not %s." %
(context.distribution_strategy, replica_ctx.distribution_strategy))
def _require_distribution_strategy_scope(distribution_strategy):
"""Verify in a `distribution_strategy.scope()` in this thread."""
context = _get_per_thread_mode()
if context.distribution_strategy is distribution_strategy: return
# We have an error to report, figure out the right message.
if (context.distribution_strategy is
distribution_strategy_context._get_default_distribution_strategy()): # pylint: disable=protected-access
raise RuntimeError(
'Need to be inside "with distribution_strategy.scope()" for %s' %
(distribution_strategy,))
else:
raise RuntimeError(
"Mixing different DistributionStrategy objects: %s is not %s" %
(context.distribution_strategy, distribution_strategy))
# ------------------------------------------------------------------------------
# Internal context managers used to implement the DistributionStrategy
# base class
class _CurrentDistributionContext(object):
"""Context manager for setting the `DistributionStrategy` and var creator."""
def __init__(self,
distribution_strategy,
var_creator_scope,
var_scope=None,
default_device=None):
self._context = distribution_strategy_context._CrossReplicaThreadMode( # pylint: disable=protected-access
distribution_strategy)
self._var_creator_scope = var_creator_scope
self._var_scope = var_scope
if default_device:
self._device_scope = ops.device(default_device)
else:
self._device_scope = None
def __enter__(self):
_push_per_thread_mode(self._context)
if self._var_scope:
self._var_scope.__enter__()
self._var_creator_scope.__enter__()
if self._device_scope:
self._device_scope.__enter__()
return self._context.distribution_strategy
def __exit__(self, exception_type, exception_value, traceback):
if self._device_scope:
self._device_scope.__exit__(exception_type, exception_value, traceback)
self._var_creator_scope.__exit__(exception_type, exception_value, traceback)
if self._var_scope:
self._var_scope.__exit__(exception_type, exception_value, traceback)
_pop_per_thread_mode()
class _SameScopeAgainContext(object):
"""Trivial context manager when you are already in `scope()`."""
def __init__(self, distribution_strategy):
self._distribution_strategy = distribution_strategy
def __enter__(self):
return self._distribution_strategy
def __exit__(self, exception_type, exception_value, traceback):
del exception_type, exception_value, traceback
# ------------------------------------------------------------------------------
# Base classes for all distribution strategies.
class DistributionStrategy(object):
"""A list of devices with a state & compute distribution policy.
See [tensorflow/contrib/distribute/README.md](
https://www.tensorflow.org/code/tensorflow/contrib/distribute/README.md)
for overview and examples.
The intent is that you can write an algorithm in a stylized way and
it will be usable with a variety of different `DistributionStrategy`
implementations. Each descendant will implement a different strategy
for distributing the algorithm across multiple devices/machines.
Furthermore, these changes can be hidden inside the specific layers
and other library classes that need special treatment to run in a
distributed setting, so that most users' model definition code can
run unchanged. The `DistributionStrategy` API works the same way
with eager and graph execution.
First let's introduce a few high-level concepts:
* _Data parallelism_ is where we run multiple copies of the model
on different slices of the input data. This is in contrast to
_model parallelism_ where we divide up a single copy of a model
across multiple devices.
Note: we only support data parallelism for now, but
hope to add support for model parallelism in the future.
* A _replica_ is one copy of the model, running on one slice of the
input data.
* _Synchronous_, or more commonly _sync_, training is where the
updates from each replica are aggregated together before updating
the model variables. This is in contrast to _asynchronous_, or
_async_ training, where each replica updates the model variables
independently.
* Furthermore you might run your computation on multiple devices
on one machine (or "host"), or on multiple machines/hosts.
If you are running on multiple machines, you might have a
single master host that drives computation across all of them,
or you might have multiple clients driving the computation
asynchronously.
To distribute an algorithm, we might use some of these ingredients:
* Parameter servers: These are hosts that hold a single copy of
parameters/variables. All replicas that want to operate on a variable
retrieve it at the beginning of a step and send an update to be
applied at the end of the step. Can support either sync or async
training.
* Mirrored variables: These are variables that are copied to multiple
devices, where we keep the copies in sync by applying the same
updates to every copy. Normally would only be used with sync training.
* Reductions and Allreduce: A _reduction_ is some method of
aggregating multiple values into one value, like "sum" or
"mean". If doing sync training, we will perform a reduction on the
gradients to a parameter from all replicas before applying the
update. Allreduce is an algorithm for performing a reduction on
values from multiple devices and making the result available on
all of those devices.
* In the future we will have support for TensorFlow's partitioned
variables, where a single variable is split across multiple
devices.
We have then a few approaches we want to support:
* Code written (as if) with no knowledge of class `DistributionStrategy`.
This code should work as before, even if some of the layers, etc.
used by that code are written to be distribution-aware. This is done
by having a default `DistributionStrategy` that gives ordinary behavior,
and by default being in a single replica context.
* Ordinary model code that you want to run using a specific
`DistributionStrategy`. This can be as simple as:
```
with my_distribution.scope():
iterator = my_distribution.distribute_dataset(
dataset).make_one_shot_iterator()
replica_train_ops = my_distribution.call_for_each_replica(
replica_fn, iterator.get_next())
train_op = tf.group(my_distribution.unwrap(replica_train_ops))
```
This takes an ordinary `dataset` and `replica_fn` and runs it
distributed using a particular `DistributionStrategy` in
`my_distribution`. Any variables created in `replica_fn` are created
using `my_distribution`'s policy, and library functions called by
`replica_fn` can use the `get_replica_context()` API to get enhanced
behavior in this case.
You can also create an initializable iterator instead of a one-shot
iterator. In that case, you will need to ensure that you initialize the
iterator before calling get_next.
```
iterator = my_distribution.distribute_dataset(
dataset).make_initializable_iterator())
session.run(iterator.initializer)
```
* If you want to write a distributed algorithm, you may use any of
the `DistributionStrategy` APIs inside a
`with my_distribution.scope():` block of code.
Lower-level concepts:
* Wrapped values: In order to represent values parallel across devices
(either replicas or the devices associated with a particular value), we
wrap them in a "PerDevice" or "Mirrored" object that contains a map
from device to values. "PerDevice" is used when the value may be
different across devices, and "Mirrored" when the value are the same.
* Unwrapping and merging: Consider calling a function `fn` on
multiple devices, like `call_for_each_replica(fn, w)` with an
argument `w` that is a wrapped value. This means `w` will have a
map taking replica device `d0` to `w0`, replica device `d1` to `w1`,
etc. `call_for_each_replica()` unwraps `w` before calling `fn`, so
it calls `fn(w0)` on `d0`, `fn(w1)` on `d1`, etc. It then merges
the return values from `fn()`, which can possibly result in
wrapped values. For example, let's say `fn()` returns a tuple with
three components: `(x, a, v0)` from replica 0, `(x, b, v1)` on replica 1,
etc. If the first component is the same object `x` from every
replica, then the first component of the merged result will also be
`x`. If the second component is different (`a`, `b`, ...) from
each replica, then the merged value will have a wrapped map from
replica device to the different values. If the third component is
the members of a mirrored variable (`v` maps `d0` to `v0`, `d1` to
`v1`, etc.), then the merged result will be that mirrored variable
(`v`).
* Replica context vs. Cross-replica context: _replica context_ is when we
are in some function that is being called once for each replica.
Otherwise we are in cross-replica context, which is useful for
calling `DistributionStrategy` methods which operate across the
replicas (like `reduce()`). By default you start in a replica context
(the default "single replica context") and then some methods can
switch you back and forth, as described below.
* Worker devices vs. parameter devices: Most replica computations will
happen on worker devices. Since we don't yet support model
parallelism, there will be one worker device per replica. When using
parameter servers (see above), the set of devices holding
variables may be different, otherwise the parameter devices might
match the worker devices.
* Non-slot devices are some subset of the parameter devices where we
put all the non-slot variables. We need to ensure that all
non-slot variables are allocated on the same device, or mirrored
across the same set of devices. If you have some variable you want
to colocate all the non-slot variables with, you can use
`colocate_vars_with()` to get the remaining non-slot variables on
the same device. Otherwise you can use `non_slot_devices()` to
pick a consistent set of devices to pass to both
`colocate_vars_with()` and `update_non_slot()`.
When using a `DistributionStrategy`, we have a new type dimension
called _locality_ that says what values are compatible with which
APIs:
* T: different value for each replica (e.g. a PerDevice-wrapped value).
* M: value is "mirrored" across replicas, i.e. there are copies with the
same value on each replica (e.g. a Mirrored-wrapped value).
* V(`v`): value is "mirrored" across all the devices which have a
copy of variable `v` (also a Mirrored-wrapped value, but over
parameter devices instead of worker devices).
* N: value is "mirrored" across all the "non-slot" devices
Rules for methods with respect to locality and single-replica vs.
cross-replica context:
* `with d.scope()`: default single-replica context -> cross-replica context
for `d`
* `with d.colocate_vars_with(v)`: in replica/cross-replica context, variables
will be created with locality V(`v`). That is, if we write
`with d.colocate_vars_with(v1): v2 = tf.get_variable(...)`, then
`v2` will have locality V(`v1`), i.e. locality V(`v2`) will equal
V(`v1`).
* `with d.colocate_vars_with(d.non_slot_devices(...))`: in
replica/cross-replica context, variables will be created with locality N
* `v = tf.get_variable(...)`: in replica/cross-replica context, creates
a variable (which by definition will have locality V(`v`), though
will match another locality if inside a `colocate_vars_with`
scope).
* `d.distribute_dataset(dataset).make_one_shot_iterator()`: in cross-replica
context, produces an iterator with locality T
* `d.broadcast(t)`: in cross-replica context, produces a value with locality M
* `d.broadcast(t, v)`: in cross-replica context, produces a value with
locality V(`v`)
* `d.call_for_each_replica(fn, ...)`: in cross-replica context, runs
`fn()` in a replica context (and so may call `get_replica_context()` and
use its API, including `merge_call()` to get back to cross-replica
context), once for each replica. May use values with locality T or
M, and any variable.
* `d.reduce(m, t, t)`: in cross-replica context, accepts t with locality T
and produces a value with locality M.
* `d.reduce(m, t, v)`: in cross-replica context, accepts t with
locality T and produces a value with locality V(`v`).
* `d.batch_reduce(m, [(t, v)]): see `d.reduce()`
* `d.update(v, fn, ...)`: in cross-replica context, runs `fn()` once
for each device `v` is copied to, all inputs should have locality
V(`v`), output will have locality V(`v`) as well.
* `d.update_non_slot(d.non_slot_devices(), fn)`: in cross-replica
context, like `d.update()` except with locality N.
* `d.read_var(v)`: Gets the (read-only) value of the variable `v` (on
the device determined by the current device scope), aggregating
across replicas for replica-local variables. Frequently, this will be
done automatically when using `v` in an expression or fetching it in
a cross-replica context, but this function can be used to force that
conversion happens at a particular point in time (for example, to
add the result of the conversion to a graph collection).
The standard pattern for updating variables is to:
1. Wrap your input dataset in `d.distribute_dataset()` and create an iterator.
2. Define each replica `d.call_for_each_replica()` up to the point of
getting a list of gradient, variable pairs.
3. Call `d.reduce(VariableAggregation.SUM, t, v)` or `d.batch_reduce()` to sum
the gradients (with locality T) into values with locality V(`v`).
4. Call `d.update(v)` for each variable to update its value.
Steps 3 and 4 are done automatically by class `Optimizer` if you call
its `apply_gradients` method in a replica context. Otherwise you can
manually call its `_distributed_apply` method in a cross-replica context.
Another thing you might want to do in the middle of your replica function
is an all-reduce of some intermediate value, using `d.reduce()` or
`d.batch_reduce()`. You simply provide the same tensor as the input and
destination.
Layers should expect to be called in a replica context, and can use
the `get_replica_context()` function to get a `ReplicaContext` object. The
`ReplicaContext` object has a `merge_call()` method for entering
cross-replica context where you can use `reduce()` (or
`batch_reduce()`) and then optionally `update()` to update state.
You may use this API whether or not a `DistributionStrategy` is
being used, since there is a default implementation of
`ReplicaContext` and `DistributionStrategy`. Or you can use the
`get_replica_context().is_single_replica` property to run different code
in the distributed vs. single replica cases.
"""
# TODO(josh11b): Raise an exception if variable partitioning requested before
# we add support.
# TODO(josh11b): Also `parameter_device_index` property?
# TODO(josh11b): `map()`
# TODO(josh11b): ClusterSpec/ClusterResolver
# TODO(josh11b): Partitioned computations, state; sharding
# TODO(josh11b): Model parallelism: "replicas" with multiple devices; shuffling
# TODO(josh11b): List of replicas with their worker and parameter devices
# (where the parameter devices may overlap in the ps case).
def __init__(self):
self._default_device = None
# This property is used to determine if we should set drop_remainder=True
# when creating Datasets from numpy array inputs.
self._require_static_shapes = False
def scope(self):
"""Returns a context manager selecting this DistributionStrategy as current.
Inside a `with distribution_strategy.scope():` code block, this thread
will use a variable creator set by `distribution_strategy`, and will
enter its "cross-replica context".
Returns:
A context manager.
"""
if distribution_strategy_context.has_distribution_strategy():
_require_cross_replica_context(self)
return _SameScopeAgainContext(self)
def creator_with_resource_vars(*args, **kwargs):
_require_distribution_strategy_scope(self)
kwargs["use_resource"] = True
return self._create_variable(*args, **kwargs)
def disable_partitioned_variables(getter, *args, **kwargs):
if kwargs.pop("partitioner", None) is not None:
tf_logging.log_first_n(
tf_logging.WARN, "Partitioned variables are disabled when using "
"DistributionStrategy.", 1)
return getter(*args, **kwargs)
return _CurrentDistributionContext(
self, variable_scope.variable_creator_scope(creator_with_resource_vars),
variable_scope.variable_scope(
variable_scope.get_variable_scope(),
custom_getter=disable_partitioned_variables),
self._default_device)
def _create_variable(self, next_creator, *args, **kwargs):
# Note: should support "colocate_with" argument.
raise NotImplementedError("must be implemented in descendants")
def read_var(self, v):
"""Reads the value of a variable.
Returns the aggregate value of a replica-local variable, or the
(read-only) value of any other variable.
Args:
v: A variable allocated within the scope of this `DistributionStrategy`.
Returns:
A tensor representing the value of `v`, aggregated across replicas if
necessary.
"""
raise NotImplementedError("must be implemented in descendants")
def colocate_vars_with(self, colocate_with_variable):
"""Scope that controls which devices variables will be created on.
No operations should be added to the graph inside this scope, it
should only be used when creating variables (some implementations
work by changing variable creation, others work by using a
tf.colocate_with() scope).
This may only be used inside `self.scope()`.
Example usage:
```
with distribution_strategy.scope():
var1 = tf.get_variable(...)
with distribution_strategy.colocate_vars_with(v1):
# var2 and var3 will be created on the same device(s) as var1
var2 = tf.get_variable(...)
var3 = tf.get_variable(...)
def fn(v1, v2, v3):
# operates on v1 from var1, v2 from var2, and v3 from var3
# `fn` runs on every device `v1` is on, `v2` and `v3` will be there too.
distribution_strategy.update(v1, fn, v2, v3)
```
Args:
colocate_with_variable: A created in `self.scope()`. Variables created
while in the returned context manager will be on the same set of
devices as `colocate_with_variable`.
Returns:
A context manager.
"""
def create_colocated_variable(next_creator, *args, **kwargs):
_require_distribution_strategy_scope(self)
kwargs["use_resource"] = True
kwargs["colocate_with"] = colocate_with_variable
return next_creator(*args, **kwargs)
_require_distribution_strategy_scope(self)
return variable_scope.variable_creator_scope(create_colocated_variable)
def _call_dataset_fn(self, dataset_fn):
result = dataset_fn()
if not isinstance(result, dataset_ops.Dataset):
raise ValueError(
"dataset_fn() must return a tf.data.Dataset when using a "
"DistributionStrategy.")
return result
# TODO(josh11b): `PerDeviceDataset` currently only implements a few methods of
# Dataset API such as make_one_shot_iterator and make_initializable_iterator.
# Extend to implement more functionality of datasets.
def distribute_dataset(self, dataset_fn):
"""Return a `dataset` split across all replicas.
Suitable for providing input to for `call_for_each_replica()` by creating an
iterator:
```
def dataset_fn():
return tf.data.Dataset.from_tensors([[1.]]).repeat()
with distribution_strategy.scope():
distributed_dataset = distribution_strategy.distribute_dataset(dataset_fn)
iterator = distributed_dataset.make_one_shot_iterator()
replica_results = distribution_strategy.call_for_each_replica(
replica_fn, iterator.get_next())
```
Args:
dataset_fn: A function that returns a `tf.data.Dataset`.
Returns:
A `PerDeviceDataset` that will produce data for each replica.
"""
raise NotImplementedError("must be implemented in descendants")
def broadcast(self, tensor, destinations=None):
"""Mirror a tensor on one device to all worker devices.
Args:
tensor: A Tensor value to broadcast.
destinations: An optional mirrored variable, device string, or
list of device strings, specifying the destination devices
to copy `tensor` to. Defaults to `self.worker_devices`.
Returns:
A value mirrored to `destinations` devices.
"""
# TODO(josh11b): More docstring
_require_cross_replica_context(self)
return self._broadcast(tensor, destinations)
def _broadcast(self, tensor, destinations):
raise NotImplementedError("must be implemented in descendants")
def initialize(self):
"""Any initialization to be done before running any computations.
In eager mode, it executes any initialization as a side effect.
In graph mode, it creates the initialization ops and returns them.
For example, TPU initialize_system ops.
Returns:
In eager mode, returns `None`.
In graph mode, a list of ops to execute. Empty list if nothing to be done.
"""
if eager_context.executing_eagerly():
return
else:
return []
def finalize(self):
"""Any final actions to be done at the end of all computations.
In eager mode, it executes any finalize actions as a side effect.
In graph mode, it creates the finalize ops and returns them.
For example, TPU shutdown ops.
Returns:
In eager mode, returns `None`.
In graph mode, a list of ops to execute. Empty list if nothing to be done.
"""
if eager_context.executing_eagerly():
return
else:
return []
def run_steps_on_dataset(self, fn, iterator, iterations=1,
initial_loop_values=None):
"""Run `fn` with input from `iterator` for `iterations` times.
This method can be used to run a step function for training a number of
times using input from a dataset.
Args:
fn: function to run using this distribution strategy. The function must
have the following signature: `def fn(context, *inputs)`.
`context` is an instance of `MultiStepContext` that will be passed when
`fn` is run. `context` can be used to specify the outputs to be returned
from `fn` by calling `context.set_last_step_output`. It can also be used
to capture non tensor outputs by `context.set_non_tensor_output`.
See `MultiStepContext` documentation for more information.
`inputs` will have same type/structure as `iterator.get_next()`. If the
`iterator.get_next()` returns a tuple say `return x, y` then whose will
be unpacked and passed to the `step_fn`; and step_fn signature would
look like `def step_fn(context, x, y)`. If the iterator returns a single
value say `return x` then the value is passed as is; the step_fn
signature would look like `def step_fn(context, x)`.
Typically, `fn` will use `call_for_each_replica` method of the strategy
to distribute the computation over multiple replicas.
iterator: Iterator of a dataset that represents the input for `fn`. The
caller is responsible for initializing the iterator as needed.
iterations: (Optional) Number of iterations that `fn` should be run.
Defaults to 1.
initial_loop_values: (Optional) Initial values to be passed into the
loop that runs `fn`. Defaults to `None`. # TODO(priyag): Remove
initial_loop_values argument when we have a mechanism to infer the
outputs of `fn`.
Returns:
Returns the `MultiStepContext` object which has the following properties,
among other things:
- run_op: An op that runs `fn` `iterations` times.
- last_step_outputs: A dictionary containing tensors set using
`context.set_last_step_output`. Evaluating this returns the value of
the tensors after the last iteration.
- non_tensor_outputs: A dictionatry containing anything that was set by
`fn` by calling `context.set_non_tensor_output`.
"""
_require_cross_replica_context(self)
return self._run_steps_on_dataset(fn, iterator, iterations,
initial_loop_values)
def _run_steps_on_dataset(self, fn, iterator, iterations,
initial_loop_values):
raise NotImplementedError("must be implemented in descendants")
def call_for_each_replica(self, fn, *args, **kwargs):
"""Run `fn` once per replica.
`fn` may call `tf.get_replica_context()` to access methods such as
`replica_id()` and `merge_call()`.
`merge_call()` is used to communicate between the replicas and
re-enter the cross-replica context. All replicas pause their execution
having encountered a `merge_call()` call. After that the
`merge_fn`-function is executed. Its results are then unwrapped and
given back to each replica call. After that execution resumes until
`fn` is complete or encounters another `merge_call()`. Example:
```python
# Called once in "cross-replica" context.
def merge_fn(distribution, three_plus_replica_id):
# sum the values across replicas
return sum(distribution.unwrap(three_plus_replica_id))
# Called once per replica in `distribution`, in a "replica" context.
def fn(three):
replica_ctx = tf.get_replica_context()
v = three + replica_ctx.replica_id
# Computes the sum of the `v` values across all replicas.
s = replica_ctx.merge_call(merge_fn, v)
return s + v
with distribution.scope():
# in "cross-replica" context
...
merged_results = distribution.call_for_each_replica(fn, 3)
# merged_results has the values from every replica execution of `fn`.
print(distribution.unwrap(merged_results)) # Prints a list
```
Args:
fn: function to run (will be run once per replica).
*args: positional arguments for `fn`
**kwargs: keyword arguments for `fn`.
`"run_concurrently"`: Boolean indicating whether executions of `fn`
can be run concurrently (under eager execution only), defaults to
`True`.
Returns:
Merged return value of `fn` across all replicas.
"""
_require_cross_replica_context(self)
return self._call_for_each_tower(fn, *args, **kwargs)
def call_for_each_tower(self, fn, *args, **kwargs):
"""Run `fn` once per replica. DEPRECATED.
DEPRECATED: Use `call_for_each_replica` instead.
`fn` may call `tf.get_replica_context()` to access methods such as
`replica_id()` and `merge_call()`.
`merge_call()` is used to communicate between the replicas and
re-enter the cross-replica context. All replicas pause their execution
having encountered a `merge_call()` call. After that the
`merge_fn`-function is executed. Its results are then unwrapped and
given back to each replica call. After that execution resumes until
`fn` is complete or encounters another `merge_call()`. Example:
```python
# Called once in "cross-replica" context.
def merge_fn(distribution, three_plus_replica_id):
# sum the values across replicas
return sum(distribution.unwrap(three_plus_replica_id))
# Called once per replica in `distribution`, in a "replica" context.
def fn(three):
replica_ctx = tf.get_replica_context()
v = three + replica_ctx.replica_id
# Computes the sum of the `v` values across all replicas.
s = replica_ctx.merge_call(merge_fn, v)
return s + v
with distribution.scope():
# in "cross-replica" context
...
merged_results = distribution.call_for_each_replica(fn, 3)
# merged_results has the values from every replica execution of `fn`.
print(distribution.unwrap(merged_results)) # Prints a list
```
Args:
fn: function to run (will be run once per replica).
*args: positional arguments for `fn`
**kwargs: keyword arguments for `fn`.
`"run_concurrently"`: Boolean indicating whether executions of `fn`
can be run concurrently (under eager execution only), defaults to
`True`.
Returns:
Merged return value of `fn` across all replicas.
"""
_require_cross_replica_context(self)
return self._call_for_each_tower(fn, *args, **kwargs)
def _call_for_each_tower(self, fn, *args, **kwargs):
raise NotImplementedError("must be implemented in descendants")
def reduce(self, aggregation, value, destinations):
"""Combine (via e.g. sum or mean) values across replicas.
Args:
aggregation: Indicates how a variable will be aggregated. Accepted values
are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`,
`tf.VariableAggregation.ONLY_FIRST_REPLICA`.
value: A per-device value with one value per replica.
destinations: A mirrored variable, a per-device tensor, a device string,
or list of device strings. The return value will be copied to all
destination devices (or all the devices where the `destinations` value
resides). To perform an all-reduction, pass `value` to `destinations`.
Returns:
A value mirrored to `destinations`.
"""
# TODO(josh11b): More docstring
# TODO(josh11b): Return an unwrapped value if colocate_with is a
# single device.
_require_cross_replica_context(self)
assert aggregation in [
variable_scope.VariableAggregation.SUM,
variable_scope.VariableAggregation.MEAN,
variable_scope.VariableAggregation.ONLY_FIRST_REPLICA
]
return self._reduce(aggregation, value, destinations)
def _reduce(self, aggregation, value, destinations):
raise NotImplementedError("must be implemented in descendants")
def batch_reduce(self, aggregation, value_destination_pairs):
"""Combine multiple `reduce` calls into one for faster execution.
Args:
aggregation: Indicates how a variable will be aggregated. Accepted values
are `tf.VariableAggregation.SUM`, `tf.VariableAggregation.MEAN`,
`tf.VariableAggregation.ONLY_FIRST_REPLICA`.
value_destination_pairs: A sequence of (value, destinations)
pairs. See `reduce()` for a description.
Returns:
A list of mirrored values, one per pair in `value_destination_pairs`.
"""
# TODO(josh11b): More docstring
_require_cross_replica_context(self)
assert aggregation in [
variable_scope.VariableAggregation.SUM,
variable_scope.VariableAggregation.MEAN,
variable_scope.VariableAggregation.ONLY_FIRST_REPLICA
]
return self._batch_reduce(aggregation, value_destination_pairs)
def _batch_reduce(self, aggregation, value_destination_pairs):
return [
self.reduce(aggregation, t, destinations=v)
for t, v in value_destination_pairs
]
def update(self, var, fn, *args, **kwargs):
"""Run `fn` to update `var` using inputs mirrored to the same devices.
If `var` is mirrored across multiple devices, then this implements
logic like:
```
results = {}
for device, v in var:
with tf.device(device):
# *args and **kwargs will be unwrapped if they are mirrored.
results[device] = fn(v, *args, **kwargs)
return merged(results)
```
Otherwise this returns `fn(var, *args, **kwargs)` colocated with `var`.
Neither `*args` nor `**kwargs` may contain per-device values.
If they contain mirrored values, they will be unwrapped before
calling `fn`.
Args:
var: Variable, possibly mirrored to multiple devices, to operate on.
fn: Function to call. Should take the variable as the first argument.
*args: Additional positional arguments to pass to `fn()`.
**kwargs: Keyword arguments to pass to `fn()`. If "grouped=False" is
specified, the return value will be unwrapped.
Returns:
By default, the merged return value of `fn` across all replicas. The
merged result has dependencies to make sure that if it is evaluated at
all, the side effects (updates) will happen on every replica. If instead
"grouped=False" is specified, this function will return a nest of lists
where each list has an element per replica, and the caller is responsible
for ensuring all elements are executed.
"""
_require_cross_replica_context(self)
options = {"grouped": kwargs.pop("grouped", True)}
return self._update(var, options, fn, *args, **kwargs)
def _update(self, var, options, fn, *args, **kwargs):
raise NotImplementedError("must be implemented in descendants")
def update_non_slot(self, colocate_with, fn, *args, **kwargs):
"""Runs `fn(*args, **kwargs)` on `colocate_with` devices.
Args:
colocate_with: The return value of `non_slot_devices()`.
fn: Function to execute.
*args: Positional arguments to pass to `fn()`.
**kwargs: Keyword arguments to pass to `fn()`. If "grouped=False" is
specified, the return value will be unwrapped and the caller is
responsible for ensuring all elements are executed.
Returns:
Return value of `fn`, possibly merged across devices.
"""
_require_cross_replica_context(self)
options = {"grouped": kwargs.pop("grouped", True)}
return self._update_non_slot(colocate_with, options, fn, *args, **kwargs)
def _update_non_slot(self, colocate_with, options, fn, *args, **kwargs):
raise NotImplementedError("must be implemented in descendants")
def unwrap(self, value):
"""Returns the list of all per-device values contained in `value`.
Args:
value: A value returned by `call_for_each_replica()` or a variable
created in `scope()`.
Returns:
A list of values contained in `value`. If `value` represents a single
value, this returns `[value].`
"""
return self._unwrap(value)
def value_container(self, value):
"""Returns the container that this per-device `value` belongs to.
Args:
value: A value returned by `call_for_each_replica()` or a variable
created in `scope()`.
Returns:
A container that `value` belongs to.
If value does not belong to any container (including the case of
container having been destroyed), returns the value itself.
`value in unwrap(value_container(value))` will always be true.
"""
raise NotImplementedError("must be implemented in descendants")
def _unwrap(self, distributed_value):
raise NotImplementedError("must be implemented in descendants")
def group(self, value, name=None):
"""Shortcut for `tf.group(distribution.unwrap(value))`."""
value = nest.flatten(self.unwrap(value))
if len(value) != 1 or name is not None:
return control_flow_ops.group(value, name=name)
# Special handling for the common case of one op.
v, = value
if hasattr(v, "op"):
v = v.op
return v
@property
def is_single_replica(self):
"""Returns whether there is a single replica or multiple.
Returns:
A boolean. If `True`, `call_for_each_replica(fn)` will only call
`fn` once. If `False`, `call_for_each_replica(fn)` may call
`fn` multiple times.
"""
return self.is_single_tower
@property
def is_single_tower(self):
"""DEPRECATED: Use `is_single_replica` instead."""
raise NotImplementedError("must be implemented in descendants")
@property
def require_static_shapes(self):
return self._require_static_shapes
@property
def num_replicas(self):
"""Returns number of replicas, for purposes of averaging across replicas."""
return self.num_towers
@property
def num_towers(self):
"""Returns number of replicas, for purposes of averaging across replicas.
DEPRECATED: use `num_replicas` instead.
"""
raise NotImplementedError("must be implemented in descendants")
@property
def num_replicas_in_sync(self):
"""Returns number of replicas over which gradients are aggregated."""
raise NotImplementedError("must be implemented in descendants")
@property
def worker_devices(self):
"""Returns the list of devices used to run `call_for_each_replica()` calls.
"""
# TODO(josh11b): More docstring
raise NotImplementedError("must be implemented in descendants")
@property
def parameter_devices(self):
"""Returns the list of devices used for variable and `update` placement."""
# TODO(josh11b): More docstring
raise NotImplementedError("must be implemented in descendants")
def non_slot_devices(self, var_list):
"""Device(s) for non-slot variables.
Create variables on these devices in a
`with colocate_vars_with(non_slot_devices(...)):` block.
Update those using `update_non_slot()`.
Args:
var_list: The list of variables being optimized, needed with the
default `DistributionStrategy`.
"""
raise NotImplementedError("must be implemented in descendants")
@property
def worker_device_index(self):
"""An object mapping worker device to an id.
This might be passed as an argument to `call_for_each_replica()`, as in:
```
with distribution_strategy.scope():
def fn(device_id):
# device_id is an integer. `fn` is being executed on device:
# distribution_strategy.worker_devices[device_id].
distribution_strategy.call_for_each_replica(
fn, distribution_strategy.worker_device_index)
```
Returns:
An index object, or the integer 0 if there is only a single replica.
"""
_require_cross_replica_context(self)
return self._worker_device_index()
def _worker_device_index(self):
raise NotImplementedError("must be implemented in descendants")
@property
def between_graph(self):
"""Whether the strategy uses between-graph replication or not.
This is expected to return a constant value that will not be changed
throughout its life cycle.
"""
raise NotImplementedError("must be implemented in descendants")
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
"""Configures the strategy class."""
del session_config, cluster_spec, task_type, task_id
@property
def should_init(self):
"""Whether initialization is needed."""
raise NotImplementedError("must be implemented in descendants")
@property
def should_checkpoint(self):
"""Whether checkpointing is needed."""
raise NotImplementedError("must be implemented in descendants")
@property
def should_save_summary(self):
"""Whether saving summaries is needed."""
raise NotImplementedError("must be implemented in descendants")
# A note about the difference between the context managers
# `ReplicaContext` (defined here) and `_CurrentDistributionContext`
# (defined above) used by `DistributionStrategy.scope()`:
#
# * a ReplicaContext is only present during a `call_for_each_replica()`
# call (except during a `merge_run` call) and in such a scope it
# will be returned by calls to `get_replica_context()`. Implementers of new
# DistributionStrategy descendants will frequently also need to
# define a descendant of ReplicaContext, and are responsible for
# entering and exiting this context.
#
# * DistributionStrategy.scope() sets up a variable_creator scope that
# changes variable creation calls (e.g. to make mirrored
# variables). This is intended as an outer scope that users enter once
# around their model creation and graph definition. There is no
# anticipated need to define descendants of _CurrentDistributionContext.
# It sets the current DistributionStrategy for purposes of
# `get_distribution_strategy()` and `has_distribution_strategy()`
# and switches the thread mode to a "cross-replica context".
class ReplicaContext(object):
"""DistributionStrategy API inside a `call_for_each_replica()` call."""
def __init__(self, distribution_strategy, replica_id=None, tower_id=None):
"""`tower_id` is deprecated, use `replica_id` instead."""
if tower_id is not None:
replica_id = tower_id
assert replica_id is not None
self._distribution_strategy = distribution_strategy
self._thread_context = distribution_strategy_context._InReplicaThreadMode( # pylint: disable=protected-access
self)
self._replica_id = replica_id
# We keep a copy in _tower_id to ease the replica->tower transition.
self._tower_id = replica_id # DEPRECATED
def __enter__(self):
_push_per_thread_mode(self._thread_context)
def __exit__(self, exception_type, exception_value, traceback):
_pop_per_thread_mode()
def merge_call(self, merge_fn, *args, **kwargs):
"""Merge args across replicas and run `merge_fn` in a cross-replica context.
This allows communication and coordination when there are multiple calls
to a model function triggered by a call to
`distribution.call_for_each_replica(model_fn, ...)`.
See `MirroredDistribution.call_for_each_replica()` for an explanation.
Otherwise, this is equivalent to:
```
distribution = get_distribution_strategy()
with cross-replica-context(distribution):
return merge_fn(distribution, *args, **kwargs)
```
Args:
merge_fn: function that joins arguments from threads that are given as
PerDevice. It accepts `DistributionStrategy` object as the first
argument.
*args: positional per-thread arguments for `merge_fn`
**kwargs: keyword per-thread arguments for `merge_fn`.
Returns:
The return value of `merge_fn`, except for `PerDevice` values which are
unpacked.
"""
require_replica_context(self)
return self._merge_call(merge_fn, *args, **kwargs)
def _merge_call(self, merge_fn, *args, **kwargs):
"""Default implementation for single replica."""
_push_per_thread_mode( # thread-local, so not needed with multiple threads
distribution_strategy_context._CrossReplicaThreadMode( # pylint: disable=protected-access
self._distribution_strategy))
try:
return merge_fn(self._distribution_strategy, *args, **kwargs)
finally:
_pop_per_thread_mode()
@property
def is_single_replica(self):
"""Returns whether there is a single replica or multiple."""
require_replica_context(self)
return self._distribution_strategy.is_single_replica
@property
def num_towers(self):
"""Returns number of replicas, for purposes of averaging across replicas.
DEPRECATED: use `num_replicas` instead.
"""
return self._distribution_strategy.num_replicas
@property
def num_replicas(self):
"""Returns number of replicas, for purposes of averaging across replicas."""
return self._distribution_strategy.num_replicas
@property
def replica_id(self):
"""Which replica is being defined, a number from 0 to `num_replicas - 1`."""
require_replica_context(self)
return self._replica_id
@property
def tower_id(self):
"""DEPRECATED: Use `replica_id` instead."""
require_replica_context(self)
return self._replica_id
@property
def distribution_strategy(self):
"""The current `DistributionStrategy` object."""
return self._distribution_strategy
@property
def device(self):
"""The device this replica is to be executed on, as a string."""
require_replica_context(self)
return device_util.current()
# TODO(josh11b): Implement `start_all_reduce(method, t)` for efficient
# all-reduce. It would return a function returning the result of reducing `t`
# across all replicas. The caller would wait to call this function until they
# needed the reduce result, allowing an efficient implementation:
# * With eager execution, the reduction could be performed asynchronously
# in the background, not blocking until the result was needed.
# * When constructing a graph, it could batch up all reduction requests up
# to that point that the first result is needed. Most likely this can be
# implemented in terms of `merge_call()` and `batch_reduce()`.
# ------------------------------------------------------------------------------
class _DefaultDistributionStrategy(DistributionStrategy):
"""Default `DistributionStrategy` if none is explicitly selected."""
def scope(self):
"""Context manager setting a variable creator and `self` as current."""
if distribution_strategy_context.has_distribution_strategy():
raise RuntimeError("Must not nest DistributionStrategy scopes.")
def creator(next_creator, *args, **kwargs):
_require_distribution_strategy_scope(self)
return next_creator(*args, **kwargs)
return _CurrentDistributionContext(
self, variable_scope.variable_creator_scope(creator))
def colocate_vars_with(self, colocate_with_variable):
"""Does not require `self.scope`."""
_require_distribution_strategy_scope(self)
return ops.colocate_with(colocate_with_variable)
def distribute_dataset(self, dataset_fn):
return self._call_dataset_fn(dataset_fn)
def _broadcast(self, tensor, destinations):
if destinations is None:
return tensor
else:
raise NotImplementedError("TODO")
def _call_for_each_tower(self, fn, *args, **kwargs):
# We don't run `fn` in multiple threads in _DefaultDistributionStrategy.
kwargs.pop("run_concurrently", None)
with ReplicaContext(self, replica_id=0):
return fn(*args, **kwargs)
def _reduce(self, aggregation, value, destinations):
# TODO(josh11b): Use destinations?
del aggregation, destinations
return value
def _update(self, var, options, fn, *args, **kwargs):
# The implementations of _update() and _update_non_slot() are identical
# except _update() passes `var` as the first argument to `fn()`.
return self._update_non_slot(var, options, fn, var, *args, **kwargs)
def _update_non_slot(self, colocate_with, options, fn, *args, **kwargs):
should_group = options.pop("grouped")
assert not options # Validate that we are processing all of the options.
# TODO(josh11b): Figure out what we should be passing to UpdateContext()
# once that value is used for something.
with ops.colocate_with(colocate_with), UpdateContext(colocate_with):
result = fn(*args, **kwargs)
if should_group:
return result
else:
return nest.map_structure(self._unwrap, result)
def read_var(self, replica_local_var):
return array_ops.identity(replica_local_var)
def _unwrap(self, distributed_value):
return [distributed_value]
def value_container(self, value):
return value
@property
def is_single_tower(self):
return True
@property
def num_towers(self):
return 1
@property
def worker_devices(self):
raise RuntimeError(
"worker_devices() method unsupported by _DefaultDistributionStrategy.")
@property
def parameter_devices(self):
raise RuntimeError("parameter_devices() method unsupported by "
"_DefaultDistributionStrategy.")
def non_slot_devices(self, var_list):
return min(var_list, key=lambda x: x.name)
def _worker_device_index(self):
raise RuntimeError("worker_device_index() method unsupported by "
"_DefaultDistributionStrategy.")
# ------------------------------------------------------------------------------
# We haven't yet implemented deserialization for DistributedVariables.
# So here we catch any attempts to deserialize variables
# when using distribution strategies.
# pylint: disable=protected-access
_original_from_proto = resource_variable_ops._from_proto_fn
def _from_proto_fn(v, import_scope=None):
if distribution_strategy_context.has_distribution_strategy():
raise NotImplementedError(
"Deserialization of variables is not yet supported when using"
"distributed strategies.")
else:
return _original_from_proto(v, import_scope=import_scope)
resource_variable_ops._from_proto_fn = _from_proto_fn
# pylint: enable=protected-access
#-------------------------------------------------------------------------------
# Shorthand for some methods from distribution_strategy_context.
_push_per_thread_mode = distribution_strategy_context._push_per_thread_mode # pylint: disable=protected-access
_get_per_thread_mode = distribution_strategy_context._get_per_thread_mode # pylint: disable=protected-access
_pop_per_thread_mode = distribution_strategy_context._pop_per_thread_mode # pylint: disable=protected-access
#-------------------------------------------------------------------------------
# For compatibility during the tower -> replica transistion.
_require_cross_tower_context = _require_cross_replica_context
require_tower_context = require_replica_context
TowerContext = ReplicaContext
|
{
"content_hash": "6226474ff5c8b6004790677f68ff7889",
"timestamp": "",
"source": "github",
"line_count": 1312,
"max_line_length": 114,
"avg_line_length": 40.948170731707314,
"alnum_prop": 0.6915531233713051,
"repo_name": "alshedivat/tensorflow",
"id": "051f965f8b4d128c44f8ff4a4cfd14ea0953538f",
"size": "54413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/distribute.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3325"
},
{
"name": "Batchfile",
"bytes": "10132"
},
{
"name": "C",
"bytes": "439824"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "50398044"
},
{
"name": "CMake",
"bytes": "199209"
},
{
"name": "Dockerfile",
"bytes": "36386"
},
{
"name": "Go",
"bytes": "1276639"
},
{
"name": "HTML",
"bytes": "4681865"
},
{
"name": "Java",
"bytes": "871083"
},
{
"name": "Jupyter Notebook",
"bytes": "2604347"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "61311"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "40038696"
},
{
"name": "RobotFramework",
"bytes": "890"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "486609"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
}
|
import click
from alertaclient.utils import action_progressbar, build_query
@click.command('ack', short_help='Close alerts')
@click.option('--ids', '-i', metavar='ID', multiple=True, help='List of alert IDs (can use short 8-char id)')
@click.option('--query', '-q', 'query', metavar='QUERY', help='severity:"warning" AND resource:web')
@click.option('--filter', '-f', 'filters', metavar='FILTER', multiple=True, help='KEY=VALUE eg. serverity=warning resource=web')
@click.option('--text', help='Message associated with status change')
@click.pass_obj
def cli(obj, ids, query, filters, text):
"""Set alert status to 'closed'."""
client = obj['client']
if ids:
total = len(ids)
else:
if query:
query = [('q', query)]
else:
query = build_query(filters)
total, _, _ = client.get_count(query)
ids = [a.id for a in client.get_alerts(query)]
action_progressbar(client, action='close', ids=ids, label=f'Closing {total} alerts', text=text)
|
{
"content_hash": "748169ce5621158973f632f6807f3f93",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 128,
"avg_line_length": 40.76,
"alnum_prop": 0.6388616290480864,
"repo_name": "alerta/python-alerta-client",
"id": "8b7ac781807c1c0de37d8431aa3cd3577db06cfd",
"size": "1019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alertaclient/commands/cmd_close.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "253"
},
{
"name": "Makefile",
"bytes": "2622"
},
{
"name": "Python",
"bytes": "181621"
},
{
"name": "Shell",
"bytes": "5273"
}
],
"symlink_target": ""
}
|
"""
Tests for the twistedcaldav.datafilters module.
"""
|
{
"content_hash": "8d937f19eeeed0ba5c530a115b745247",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 47,
"avg_line_length": 18.666666666666668,
"alnum_prop": 0.7321428571428571,
"repo_name": "macosforge/ccs-calendarserver",
"id": "bcb5f024b17c1104c3e32691a4afc8b91e0a9e6b",
"size": "663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twistedcaldav/datafilters/test/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
ext_module = Extension(
'cchol',
['cchol.pyx'],
include_dirs=[np.get_include()])#,
'''
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'])
'''
setup(cmdclass = {'build_ext' : build_ext}, ext_modules = [ext_module])
|
{
"content_hash": "cf15ec4784dd71498b04d5d6b82e8f3b",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 71,
"avg_line_length": 27.625,
"alnum_prop": 0.583710407239819,
"repo_name": "pysal/pPysal",
"id": "80440e2e5b741ba9f14ca08c0f35d340e627509f",
"size": "442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chol/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2559"
},
{
"name": "FORTRAN",
"bytes": "241"
},
{
"name": "HTML",
"bytes": "42412"
},
{
"name": "Python",
"bytes": "429423"
},
{
"name": "Shell",
"bytes": "2658"
}
],
"symlink_target": ""
}
|
"""
Functions for plotting nodes.
Functions
=========
.. currentmodule:: bayespy.plot
.. autosummary::
:toctree: generated/
pdf
contour
plot
hinton
gaussian_mixture_2d
Plotters
========
.. autosummary::
:toctree: generated/
Plotter
PDFPlotter
ContourPlotter
HintonPlotter
FunctionPlotter
GaussianTimeseriesPlotter
CategoricalMarkovChainPlotter
"""
import os, sys
############################################################################
# A STUPID WORKAROUND FOR A MATPLOTLIB 1.4.0 BUG RELATED TO INTERACTIVE MODE
# See: https://github.com/matplotlib/matplotlib/issues/3505
import __main__
if hasattr(__main__, '__file__'):
sys.ps1 = ('WORKAROUND FOR A BUG #3505 IN MATPLOTLIB.\n'
'IF YOU SEE THIS MESSAGE, TRY MATPLOTLIB!=1.4.0.')
# This workaround does not work on Python shell, only on stand-alone scripts
# and IPython. A better solution: require MPL!=1.4.0.
#############################################################################
import numpy as np
import scipy.sparse as sp
import scipy
from scipy import special
import matplotlib.pyplot as plt
from matplotlib import animation
#from matplotlib.pyplot import *
from bayespy.inference.vmp.nodes.categorical import CategoricalMoments
from bayespy.inference.vmp.nodes.gaussian import (GaussianMoments,
GaussianWishartMoments)
from bayespy.inference.vmp.nodes.beta import BetaMoments
from bayespy.inference.vmp.nodes.beta import DirichletMoments
from bayespy.inference.vmp.nodes.bernoulli import BernoulliMoments
from bayespy.inference.vmp.nodes.categorical import CategoricalMoments
from bayespy.inference.vmp.nodes.gamma import GammaMoments
from bayespy.inference.vmp.nodes.node import Node, Moments
from bayespy.utils import (misc,
random,
linalg)
# Users can use pyplot via this module
import matplotlib
mpl = matplotlib
pyplot = plt
def interactive(function):
"""A decorator for forcing functions to use the interactive mode.
Parameters
----------
function : callable
The function to be decorated
"""
def new_function(*args, **kwargs):
if mpl.is_interactive():
was_interactive = True
else:
was_interactive = False
mpl.interactive(True)
retval = function(*args, **kwargs)
if not was_interactive:
mpl.interactive(False)
return retval
return new_function
def _subplots(plotfunc, *args, fig=None, kwargs=None):
"""Create a collection of subplots
Each subplot is created with the same plotting function.
Inputs are given as pairs:
(x, 3), (y, 2), ...
where x,y,... are the input arrays and 3,2,... are the ndim
parameters. The last ndim axes of each array are interpreted as a
single element to the plotting function.
All high-level plotting functions should wrap low-level plotting
functions with this function in order to generate subplots for
plates.
"""
if kwargs is None:
kwargs = {}
if fig is None:
fig = plt.gcf()
# Parse shape and plates of each input array
shapes = [np.shape(x)[-n:] if n > 0 else ()
for (x,n) in args]
plates = [np.shape(x)[:-n] if n > 0 else np.shape(x)
for (x,n) in args]
# Get the full grid shape of the subplots
broadcasted_plates = misc.broadcasted_shape(*plates)
# Subplot indexing layout
M = np.prod(broadcasted_plates[-2::-2])
N = np.prod(broadcasted_plates[-1::-2])
strides_subplot = [np.prod(broadcasted_plates[(j+2)::2]) * N
if ((len(broadcasted_plates)-j) % 2) == 0 else
np.prod(broadcasted_plates[(j+2)::2])
for j in range(len(broadcasted_plates))]
# Plot each subplot
for ind in misc.nested_iterator(broadcasted_plates):
# Get the list of inputs for this subplot
broadcasted_args = []
for n in range(len(args)):
i = misc.safe_indices(ind, plates[n])
broadcasted_args.append(args[n][0][i])
# Plot the subplot using the given function
ind_subplot = np.einsum('i,i', ind, strides_subplot)
axes = fig.add_subplot(M, N, ind_subplot+1)
plotfunc(*broadcasted_args, axes=axes, **kwargs)
def pdf(Z, x, *args, name=None, axes=None, fig=None, **kwargs):
"""
Plot probability density function of a scalar variable.
Parameters
----------
Z : node or function
Stochastic node or log pdf function
x : array
Grid points
"""
# TODO: Make it possible to plot a plated variable using _subplots function.
if axes is None and fig is None:
axes = plt.gca()
else:
if fig is None:
fig = plt.gcf()
axes = fig.add_subplot(111)
try:
lpdf = Z.logpdf(x)
except AttributeError:
lpdf = Z(x)
p = np.exp(lpdf)
retval = axes.plot(x, p, *args, **kwargs)
if name is None:
try:
name = Z.name
except AttributeError:
pass
if name:
axes.set_title(r'$q(%s)$' % (name))
axes.set_xlabel(r'$%s$' % (name))
return retval
def contour(Z, x, y, n=None, axes=None, fig=None, **kwargs):
"""
Plot 2-D probability density function of a 2-D variable.
Parameters
----------
Z : node or function
Stochastic node or log pdf function
x : array
Grid points on x axis
y : array
Grid points on y axis
"""
# TODO: Make it possible to plot a plated variable using _subplots function.
if axes is None and fig is None:
axes = plt.gca()
else:
if fig is None:
fig = plt.gcf()
axes = fig.add_subplot(111)
XY = misc.grid(x, y)
try:
lpdf = Z.logpdf(XY)
except AttributeError:
lpdf = Z(XY)
p = np.exp(lpdf)
shape = (np.size(x), np.size(y))
X = np.reshape(XY[:,0], shape)
Y = np.reshape(XY[:,1], shape)
P = np.reshape(p, shape)
if n is not None:
levels = np.linspace(0, np.amax(P), num=n+2)[1:-1]
return axes.contour(X, Y, P, levels, **kwargs)
else:
return axes.contour(X, Y, P, **kwargs)
def plot_gaussian_mc(X, scale=2, **kwargs):
"""
Plot Gaussian Markov chain as a 1-D function
Parameters
----------
X : node
Node with Gaussian Markov chain moments.
"""
timeseries_gaussian(X, axis=-2, scale=scale, **kwargs)
def plot_bernoulli(X, axis=-1, scale=2, **kwargs):
"""
Plot Bernoulli node as a 1-D function
"""
X = X._convert(BernoulliMoments)
u_X = X.get_moments()
z = u_X[0]
return _timeseries_mean_and_error(z, None, axis=axis, **kwargs)
def plot_gaussian(X, axis=-1, scale=2, **kwargs):
"""
Plot Gaussian node as a 1-D function
Parameters
----------
X : node
Node with Gaussian moments.
axis : int
The index of the time axis.
"""
X = X._convert(GaussianMoments)
u_X = X.get_moments()
x = u_X[0]
xx = misc.get_diag(u_X[1], ndim=len(X.dims[0]))
std = scale * np.sqrt(xx - x**2)
#std = scale * np.sqrt(np.einsum('...ii->...i', xx) - x**2)
return _timeseries_mean_and_error(x, std, axis=axis, **kwargs)
def plot(Y, axis=-1, scale=2, center=False, **kwargs):
"""
Plot a variable or an array as 1-D function with errorbars
"""
if misc.is_numeric(Y):
return _timeseries_mean_and_error(Y, None, axis=axis, center=center, **kwargs)
if isinstance(Y, Node):
# Try Bernoulli plotting
try:
Y = Y._convert(BernoulliMoments)
except BernoulliMoments.NoConverterError:
pass
else:
return plot_bernoulli(Y, axis=axis, scale=scale, center=center, **kwargs)
# Try Gaussian plotting
try:
Y = Y._convert(GaussianMoments)
except GaussianMoments.NoConverterError:
pass
else:
return plot_gaussian(Y, axis=axis, scale=scale, center=center, **kwargs)
(mu, var) = Y.get_mean_and_variance()
std = np.sqrt(var)
return _timeseries_mean_and_error(mu, std,
axis=axis,
scale=scale,
center=center,
**kwargs)
# Some backward compatibility
def timeseries_gaussian_mc(*args, center=True, **kwargs):
return plot_gaussian_mc(*args, center=center, **kwargs)
def timeseries_gaussian(*args, center=True, **kwargs):
return plot_gaussian(*args, center=center, **kwargs)
timeseries_normal = timeseries_gaussian
def timeseries(*args, center=True, **kwargs):
return plot(*args, center=center, **kwargs)
def _timeseries_mean_and_error(y, std, *args, axis=-1, center=True, fig=None, **kwargs):
# TODO/FIXME: You must multiply by ones(plates) in order to plot
# broadcasted plates properly
if fig is None:
fig = plt.gcf()
y = np.atleast_1d(y)
shape = list(np.shape(y))
# Get and remove the length of the time axis
T = shape.pop(axis)
# Move time axis to first
y = np.rollaxis(y, axis)
if std is not None:
std = np.rollaxis(std, axis)
y = np.reshape(y, (T, -1))
if std is not None:
std = np.reshape(std, (T, -1))
# Remove 1s
shape = [s for s in shape if s > 1]
# Calculate number of rows and columns
shape = misc.multiply_shapes(shape, (1,1))
if len(shape) > 2:
raise Exception("Can plot only in 2 dimensions (rows and columns)")
(M, N) = shape
# Prefer plotting to rows
if M == 1:
M = N
N = 1
# Plot each timeseries
ax0 = fig.add_subplot(M, N, 1)
for i in range(M*N):
if i > 0:
# Share x axis between all subplots
ax = fig.add_subplot(M, N, i+1, sharex=ax0)
else:
ax = ax0
# Autoscale the axes to data and use tight y and x axes
ax.autoscale(enable=True, tight=True)
ax.set_ylim(auto=True)
if i < (M-1)*N:
# Remove x tick labels from other than the last row
plt.setp(ax.get_xticklabels(), visible=False)
if std is None:
errorplot(y=y[:,i], axes=ax, **kwargs)
else:
if len(args) > 0:
raise Exception("Can't handle extra arguments")
errorplot(y=y[:,i], error=std[:,i], axes=ax, **kwargs)
if center:
# Center the zero level on y-axis
ylim = ax.get_ylim()
vmax = np.max(np.abs(ylim))
ax.set_ylim([-vmax, vmax])
# Remove height space between subplots
fig.subplots_adjust(hspace=0)
def _blob(axes, x, y, area, colour):
"""
Draws a square-shaped blob with the given area (< 1) at
the given coordinates.
"""
hs = np.sqrt(area) / 2
xcorners = np.array([x - hs, x + hs, x + hs, x - hs])
ycorners = np.array([y - hs, y - hs, y + hs, y + hs])
axes.fill(xcorners, ycorners, colour, edgecolor=colour)
def _rectangle(axes, x, y, width, height, **kwargs):
_x = x - width/2
_y = y - height/2
rectangle = plt.Rectangle((_x, _y),
width,
height,
**kwargs)
axes.add_patch(rectangle)
return
def gaussian_mixture_2d(X, alpha=None, scale=2, fill=False, axes=None, **kwargs):
"""
Plot Gaussian mixture as ellipses in 2-D
Parameters
----------
X : Mixture node
alpha : Dirichlet-like node (optional)
Probabilities for the clusters
scale : float (optional)
Scale for the covariance ellipses (by default, 2)
"""
if axes is None:
axes = plt.gca()
mu_Lambda = X.parents[1]._convert(GaussianWishartMoments)
(mu, _, Lambda, _) = mu_Lambda.get_moments()
mu = np.linalg.solve(Lambda, mu)
if len(mu_Lambda.plates) != 1:
raise NotImplementedError("Not yet implemented for more plates")
K = mu_Lambda.plates[0]
width = np.zeros(K)
height = np.zeros(K)
angle = np.zeros(K)
for k in range(K):
m = mu[k]
L = Lambda[k]
(u, W) = scipy.linalg.eigh(L)
u[0] = np.sqrt(1/u[0])
u[1] = np.sqrt(1/u[1])
width[k] = 2*u[0]
height[k] = 2*u[1]
angle[k] = np.arctan(W[0,1] / W[0,0])
angle = 180 * angle / np.pi
mode_height = 1 / (width * height)
# Use cluster probabilities to adjust alpha channel
if alpha is not None:
# Compute the normalized probabilities in a numerically stable way
logsum_p = misc.logsumexp(alpha.u[0], axis=-1, keepdims=True)
logp = alpha.u[0] - logsum_p
p = np.exp(logp)
# Visibility is based on cluster mode peak height
visibility = mode_height * p
visibility /= np.amax(visibility)
else:
visibility = np.ones(K)
for k in range(K):
ell = mpl.patches.Ellipse(mu[k], scale*width[k], scale*height[k],
angle=(180+angle[k]),
fill=fill,
alpha=visibility[k],
**kwargs)
axes.add_artist(ell)
plt.axis('equal')
# If observed, plot the data too
if np.any(X.observed):
mask = np.array(X.observed) * np.ones(X.plates, dtype=np.bool)
y = X.u[0][mask]
plt.plot(y[:,0], y[:,1], 'r.')
return
def _hinton(W, error=None, vmax=None, square=True, axes=None):
"""
Draws a Hinton diagram for visualizing a weight matrix.
Temporarily disables matplotlib interactive mode if it is on,
otherwise this takes forever.
Originally copied from
http://wiki.scipy.org/Cookbook/Matplotlib/HintonDiagrams
"""
if axes is None:
axes = plt.gca()
W = misc.atleast_nd(W, 2)
(height, width) = W.shape
if not vmax:
#vmax = 2**np.ceil(np.log(np.max(np.abs(W)))/np.log(2))
if error is not None:
vmax = np.max(np.abs(W) + error)
else:
vmax = np.max(np.abs(W))
axes.fill(0.5+np.array([0,width,width,0]),
0.5+np.array([0,0,height,height]),
'gray')
axes.axis('off')
if square:
axes.axis('equal')
axes.invert_yaxis()
for x in range(width):
for y in range(height):
_x = x+1
_y = y+1
w = W[y,x]
_w = np.abs(w)
if w > 0:
_c = 'white'
else:
_c = 'black'
if error is not None:
e = error[y,x]
if e < 0:
print(e, _w, vmax)
raise Exception("BUG? Negative error")
if _w + e > vmax:
print(e, _w, vmax)
raise Exception("BUG? Value+error greater than max")
_rectangle(axes,
_x,
_y,
min(1, np.sqrt((_w+e)/vmax)),
min(1, np.sqrt((_w+e)/vmax)),
edgecolor=_c,
fill=False)
_blob(axes, _x, _y, min(1, _w/vmax), _c)
def matrix(A, axes=None):
if axes is None:
axes = plt.gca()
A = np.atleast_2d(A)
vmax = np.max(np.abs(A))
return axes.imshow(A,
interpolation='nearest',
cmap='RdBu_r',
vmin=-vmax,
vmax=vmax)
def new_matrix(A, vmax=None):
A = np.atleast_2d(A)
if vmax is None:
vmax = np.max(np.abs(A))
(M, N) = np.shape(A)
for i in range(M):
for j in range(N):
pass
def gaussian_hinton(X, rows=None, cols=None, scale=1, fig=None):
"""
Plot the Hinton diagram of a Gaussian node
"""
if fig is None:
fig = plt.gcf()
# Get mean and second moment
X = X._convert(GaussianMoments)
(x, xx) = X.get_moments()
ndim = len(X.dims[0])
shape = X.get_shape(0)
size = len(X.get_shape(0))
# Compute standard deviation
xx = misc.get_diag(xx, ndim=ndim)
std = np.sqrt(xx - x**2)
# Force explicit elements when broadcasting
x = x * np.ones(shape)
std = std * np.ones(shape)
if rows is None:
rows = np.nan
if cols is None:
cols = np.nan
# Preprocess the axes to 0,...,ndim
if rows < 0:
rows += size
if cols < 0:
cols += size
if rows < 0 or rows >= size:
raise ValueError("Row axis invalid")
if cols < 0 or cols >= size:
raise ValueError("Column axis invalid")
# Remove non-row and non-column axes that have length 1
squeezed_shape = list(shape)
for i in reversed(range(len(shape))):
if shape[i] == 1 and i != rows and i != cols:
squeezed_shape.pop(i)
if i < cols:
cols -= 1
if i < rows:
rows -= 1
x = np.reshape(x, squeezed_shape)
std = np.reshape(std, squeezed_shape)
if np.ndim(x) < 2:
cols += 2 - np.ndim(x)
rows += 2 - np.ndim(x)
x = np.atleast_2d(x)
std = np.atleast_2d(std)
size = np.ndim(x)
if np.isnan(cols):
if rows != size - 1:
cols = size - 1
else:
cols = size - 2
if np.isnan(rows):
if cols != size - 1:
rows = size - 1
else:
rows = size - 2
# Put the row and column axes to the end
axes = [i for i in range(size) if i not in (rows, cols)] + [rows, cols]
x = np.transpose(x, axes=axes)
std = np.transpose(std, axes=axes)
vmax = np.max(np.abs(x) + scale*std)
if scale == 0:
_subplots(_hinton, (x, 2), fig=fig, kwargs=dict(vmax=vmax))
else:
def plotfunc(z, e, **kwargs):
return _hinton(z, error=e, **kwargs)
_subplots(plotfunc, (x, 2), (scale*std, 2), fig=fig, kwargs=dict(vmax=vmax))
def _hinton_figure(x, rows=None, cols=None, fig=None, square=True):
"""
Plot the Hinton diagram of a Gaussian node
"""
scale = 0
std = 0
if fig is None:
fig = plt.gcf()
# Get mean and second moment
shape = np.shape(x)
size = np.ndim(x)
if rows is None:
rows = np.nan
if cols is None:
cols = np.nan
# Preprocess the axes to 0,...,ndim
if rows < 0:
rows += size
if cols < 0:
cols += size
if rows < 0 or rows >= size:
raise ValueError("Row axis invalid")
if cols < 0 or cols >= size:
raise ValueError("Column axis invalid")
# Remove non-row and non-column axes that have length 1
squeezed_shape = list(shape)
for i in reversed(range(len(shape))):
if shape[i] == 1 and i != rows and i != cols:
squeezed_shape.pop(i)
if i < cols:
cols -= 1
if i < rows:
rows -= 1
x = np.reshape(x, squeezed_shape)
size = np.ndim(x)
if np.isnan(cols):
if rows != size - 1:
cols = size - 1
else:
cols = size - 2
if np.isnan(rows):
if cols != size - 1:
rows = size - 1
else:
rows = size - 2
# Put the row and column axes to the end
if np.ndim(x) >= 2:
axes = [i for i in range(size) if i not in (rows, cols)] + [rows, cols]
x = np.transpose(x, axes=axes)
#std = np.transpose(std, axes=axes)
vmax = np.max(np.abs(x) + scale*std)
kw = dict(vmax=vmax, square=square)
if scale == 0:
_subplots(_hinton, (x, 2), fig=fig, kwargs=kw)
else:
def plotfunc(z, e, **kwargs):
return _hinton(z, error=e, **kwargs)
_subplots(plotfunc, (x, 2), (scale*std, 2), fig=fig, kwargs=kw)
# For backwards compatibility:
gaussian_array = gaussian_hinton
def timeseries_categorical_mc(Z, fig=None):
if fig is None:
fig = plt.gcf()
# Make sure that the node is categorical
Z = Z._convert(CategoricalMoments)
# Get expectations (and broadcast explicitly)
z = Z._message_to_child()[0] * np.ones(Z.get_shape(0))
# Compute the subplot layout
z = misc.atleast_nd(z, 4)
if np.ndim(z) != 4:
raise ValueError("Can not plot arrays with over 4 axes")
M = np.shape(z)[0]
N = np.shape(z)[1]
# Plot Hintons
for i in range(M):
for j in range(N):
axes = fig.add_subplot(M, N, i*N+j+1)
_hinton(z[i,j].T, vmax=1.0, square=False, axes=axes)
def gamma_hinton(alpha, square=True, **kwargs):
"""
Plot a beta distributed random variable as a Hinton diagram
"""
# Make sure that the node is beta
alpha = alpha._convert(GammaMoments)
# Compute exp( <log p> )
x = alpha.get_moments()[0]
# Explicit broadcasting
x = x * np.ones(alpha.plates)
# Plot Hinton diagram
return _hinton_figure(x, square=square, **kwargs)
def beta_hinton(P, square=True):
"""
Plot a beta distributed random variable as a Hinton diagram
"""
# Make sure that the node is beta
P = P._convert(BetaMoments)
# Compute exp( <log p> )
p = np.exp(P._message_to_child()[0][...,0])
# Explicit broadcasting
p = p * np.ones(P.plates)
# Plot Hinton diagram
return _hinton(p, vmax=1.0, square=square)
def dirichlet_hinton(P, square=True):
"""
Plot a beta distributed random variable as a Hinton diagram
"""
# Make sure that the node is beta
P = P._convert(DirichletMoments)
# Compute exp( <log p> )
p = np.exp(P._message_to_child()[0])
# Explicit broadcasting
p = p * np.ones(P.plates+(1,))
# Plot Hinton diagram
return _hinton(p, vmax=1.0, square=square)
def bernoulli_hinton(Z, square=True):
"""
Plot a Bernoulli distributed random variable as a Hinton diagram
"""
# Make sure that the node is Bernoulli
Z = Z._convert(BernoulliMoments)
# Get <Z>
z = Z._message_to_child()[0]
# Explicit broadcasting
z = z * np.ones(Z.plates)
# Plot Hinton diagram
return _hinton(z, vmax=1.0, square=square)
def categorical_hinton(Z, square=True):
"""
Plot a Bernoulli distributed random variable as a Hinton diagram
"""
# Make sure that the node is Bernoulli
Z = Z._convert(CategoricalMoments)
# Get <Z>
z = Z._message_to_child()[0]
# Explicit broadcasting
z = z * np.ones(Z.plates+(1,))
# Plot Hinton diagram
return _hinton(np.squeeze(z), vmax=1.0, square=square)
def hinton(X, **kwargs):
r"""
Plot the Hinton diagram of a node
The keyword arguments depend on the node type. For some node types, the
diagram also shows uncertainty with non-filled rectangles. Currently,
beta-like, Gaussian-like and Dirichlet-like nodes are supported.
Parameters
----------
X : node
"""
if hasattr(X, "_convert"):
try:
X = X._convert(GaussianMoments)
except Moments.NoConverterError:
pass
else:
return gaussian_hinton(X, **kwargs)
try:
X = X._convert(GammaMoments)
except Moments.NoConverterError:
pass
else:
return gamma_hinton(X, **kwargs)
try:
X = X._convert(BetaMoments)
except Moments.NoConverterError:
pass
else:
return beta_hinton(X, **kwargs)
try:
X = X._convert(DirichletMoments)
except Moments.NoConverterError:
pass
else:
return dirichlet_hinton(X, **kwargs)
try:
X = X._convert(BernoulliMoments)
except Moments.NoConverterError:
pass
else:
return bernoulli_hinton(X, **kwargs)
try:
X = X._convert(CategoricalMoments)
except Moments.NoConverterError:
pass
else:
return categorical_hinton(X, **kwargs)
return _hinton_figure(X, **kwargs)
class Plotter():
r"""
Wrapper for plotting functions and base class for node plotters
The purpose of this class is to collect all the parameters needed by a
plotting function and provide a callable interface which needs only the node
as the input.
Plotter instances are callable objects that plot a given node using a
specified plotting function.
Parameters
----------
plotter : function
Plotting function to use
args : defined by the plotting function
Additional inputs needed by the plotting function
kwargs : defined by the plotting function
Additional keyword arguments supported by the plotting function
Examples
--------
First, create a gamma variable:
>>> import numpy as np
>>> from bayespy.nodes import Gamma
>>> x = Gamma(4, 5)
The probability density function can be plotted as:
>>> import bayespy.plot as bpplt
>>> bpplt.pdf(x, np.linspace(0.1, 10, num=100)) # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
However, this can be problematic when one needs to provide a
plotting function for the inference engine as the inference engine
gives only the node as input. Thus, we need to create a simple
plotter wrapper:
>>> p = bpplt.Plotter(bpplt.pdf, np.linspace(0.1, 10, num=100))
Now, this callable object ``p`` needs only the node as the input:
>>> p(x) # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
Thus, it can be given to the inference engine to use as a plotting function:
>>> x = Gamma(4, 5, plotter=p)
>>> x.plot() # doctest: +ELLIPSIS
[<matplotlib.lines.Line2D object at 0x...>]
"""
def __init__(self, plotter, *args, **kwargs):
self._args = args
self._kwargs = kwargs
self._plotter = plotter
def __call__(self, X, fig=None):
"""
Plot the node using the specified plotting function
Parameters
----------
X : node
The plotted node
"""
return self._plotter(X, *self._args, fig=fig, **self._kwargs)
class PDFPlotter(Plotter):
r"""
Plotter of probability density function of a scalar node
Parameters
----------
x_grid : array
Numerical grid on which the density function is computed and
plotted
See also
--------
pdf
"""
def __init__(self, x_grid, **kwargs):
super().__init__(pdf, x_grid, **kwargs)
class ContourPlotter(Plotter):
r"""
Plotter of probability density function of a two-dimensional node
Parameters
----------
x1_grid : array
Grid for the first dimension
x2_grid : array
Grid for the second dimension
See also
--------
contour
"""
def __init__(self, x1_grid, x2_grid, **kwargs):
super().__init__(contour, x1_grid, x2_grid, **kwargs)
class HintonPlotter(Plotter):
r"""
Plotter of the Hinton diagram of a node
See also
--------
hinton
"""
def __init__(self, **kwargs):
super().__init__(hinton, **kwargs)
class FunctionPlotter(Plotter):
r"""
Plotter of a node as a 1-dimensional function
See also
--------
plot
"""
def __init__(self, **kwargs):
super().__init__(plot, **kwargs)
class GaussianMarkovChainPlotter(Plotter):
r"""
Plotter of a Gaussian Markov chain as a timeseries
"""
def __init__(self, **kwargs):
super().__init__(timeseries_gaussian_mc, **kwargs)
class GaussianTimeseriesPlotter(Plotter):
r"""
Plotter of a Gaussian node as a timeseries
"""
def __init__(self, **kwargs):
super().__init__(timeseries_gaussian, **kwargs)
class GaussianHintonPlotter(Plotter):
r"""
Plotter of a Gaussian node as a Hinton diagram
"""
def __init__(self, **kwargs):
super().__init__(gaussian_array, **kwargs)
class CategoricalMarkovChainPlotter(Plotter):
r"""
Plotter of a Categorical timeseries
"""
def __init__(self, **kwargs):
super().__init__(timeseries_categorical_mc, **kwargs)
def matrix_animation(A, filename=None, fps=25, fig=None, **kwargs):
if fig is None:
fig = plt.gcf()
axes = fig.add_subplot(111)
A = np.atleast_3d(A)
vmax = np.max(np.abs(A))
x = axes.imshow(A[0],
interpolation='nearest',
cmap='RdBu_r',
vmin=-vmax,
vmax=vmax,
**kwargs)
s = axes.set_title('t = %d' % 0)
def animate(nframe):
s.set_text('t = %d' % nframe)
x.set_array(A[nframe])
return (x, s)
anim = animation.FuncAnimation(fig, animate,
frames=np.shape(A)[0],
interval=1000/fps,
blit=False,
repeat=False)
return anim
def save_animation(anim, filename, fps=25, bitrate=5000, fig=None):
# A bug in numpy/matplotlib causes this not to work in python3.3:
# https://github.com/matplotlib/matplotlib/issues/1891
#
# So the following command does not work currently..
#
# anim.save(filename, fps=fps)
if fig is None:
fig = plt.gcf()
writer = animation.FFMpegFileWriter(fps=fps, bitrate=bitrate)
writer.setup(fig, filename, 100)
anim.save(filename,
fps=fps,
writer=writer,
bitrate=bitrate)
return
def binary_matrix(A, axes=None):
if axes is None:
axes = plt.gca()
A = np.atleast_2d(A)
G = np.zeros(np.shape(A) + (3,))
G[A] = [0,0,0]
G[np.logical_not(A)] = [1,1,1]
axes.imshow(G, interpolation='nearest')
def gaussian_mixture_logpdf(x, w, mu, Sigma):
# Shape(x) = (N, D)
# Shape(w) = (K,)
# Shape(mu) = (K, D)
# Shape(Sigma) = (K, D, D)
# Shape(result) = (N,)
# Dimensionality
D = np.shape(x)[-1]
# Cholesky decomposition of the covariance matrix
U = linalg.chol(Sigma)
# Reshape x:
# Shape(x) = (N, 1, D)
x = np.expand_dims(x, axis=-2)
# (x-mu) and (x-mu)'*inv(Sigma)*(x-mu):
# Shape(v) = (N, K, D)
# Shape(z) = (N, K)
v = x - mu
z = np.einsum('...i,...i', v, linalg.chol_solve(U, v))
# Log-determinant of Sigma:
# Shape(ldet) = (K,)
ldet = linalg.chol_logdet(U)
# Compute log pdf for each cluster:
# Shape(lpdf) = (N, K)
lpdf = misc.gaussian_logpdf(z, 0, 0, ldet, D)
def matrixplot(A, colorbar=False, axes=None):
if axes is None:
axes = plt.gca()
if sp.issparse(A):
A = A.toarray()
axes.imshow(A, interpolation='nearest')
if colorbar:
plt.colorbar(ax=axes)
def contourplot(x1, x2, y, colorbar=False, filled=True, axes=None):
""" Plots 2D contour plot. x1 and x2 are 1D vectors, y contains
the function values. y.size must be x1.size*x2.size. """
if axes is None:
axes = plt.gca()
y = np.reshape(y, (len(x2),len(x1)))
if filled:
axes.contourf(x1, x2, y)
else:
axes.contour(x1, x2, y)
if colorbar:
plt.colorbar(ax=axes)
def errorplot(y=None, error=None, x=None, lower=None, upper=None,
color=(0,0,0,1), fillcolor=(0,0,0,0.4), axes=None, **kwargs):
if axes is None:
axes = plt.gca()
# Default inputs
if x is None:
x = np.arange(np.size(y))
# Parse errors (lower=lower/error/upper, upper=upper/error/lower)
if lower is None:
if error is not None:
lower = error
elif upper is not None:
lower = upper
if upper is None:
if error is not None:
upper = error
elif lower is not None:
upper = lower
# Plot errors
if (lower is not None) and (upper is not None):
l = y - lower
u = y + upper
axes.fill_between(x,
l,
u,
facecolor=fillcolor,
edgecolor=(0, 0, 0, 0),
linewidth=1,
interpolate=True)
# Plot function
axes.plot(x, y, color=color, **kwargs)
def plotmatrix(X):
"""
Creates a matrix of marginal plots.
On diagonal, are marginal plots of each variable. Off-diagonal plot (i,j)
shows the joint marginal density of x_i and x_j.
"""
return X.plotmatrix()
def _pdf_t(mu, s2, nu, axes=None, scale=4, color='k'):
"""
"""
if axes is None:
axes = plt.gca()
s = np.sqrt(s2)
x = np.linspace(mu-scale*s, mu+scale*s, num=100)
y2 = (x-mu)**2 / s2
lpdf = random.t_logpdf(y2, np.log(s2), nu, 1)
p = np.exp(lpdf)
return axes.plot(x, p, color=color)
def _pdf_gamma(a, b, axes=None, scale=4, color='k'):
"""
"""
if axes is None:
axes = plt.gca()
if np.size(a) != 1 or np.size(b) != 1:
raise ValueError("Parameters must be scalars")
mean = a/b
v = scale*np.sqrt(a/b**2)
m = max(0, mean-v)
n = mean + v
x = np.linspace(m, n, num=100)
logx = np.log(x)
lpdf = random.gamma_logpdf(b*x,
logx,
a*logx,
a*np.log(b),
special.gammaln(a))
p = np.exp(lpdf)
return axes.plot(x, p, color=color)
def _contour_t(mu, Cov, nu, axes=None, scale=4, transpose=False, colors='k'):
"""
"""
if axes is None:
axes = plt.gca()
if np.shape(mu) != (2,) or np.shape(Cov) != (2,2) or np.shape(nu) != ():
print(np.shape(mu), np.shape(Cov), np.shape(nu))
raise ValueError("Only 2-d t-distribution allowed")
if transpose:
mu = mu[[1,0]]
Cov = Cov[np.ix_([1,0],[1,0])]
s = np.sqrt(np.diag(Cov))
x0 = np.linspace(mu[0]-scale*s[0], mu[0]+scale*s[0], num=100)
x1 = np.linspace(mu[1]-scale*s[1], mu[1]+scale*s[1], num=100)
X0X1 = misc.grid(x0, x1)
Y = X0X1 - mu
L = linalg.chol(Cov)
logdet_Cov = linalg.chol_logdet(L)
Z = linalg.chol_solve(L, Y)
Z = linalg.inner(Y, Z, ndim=1)
lpdf = random.t_logpdf(Z, logdet_Cov, nu, 2)
p = np.exp(lpdf)
shape = (np.size(x0), np.size(x1))
X0 = np.reshape(X0X1[:,0], shape)
X1 = np.reshape(X0X1[:,1], shape)
P = np.reshape(p, shape)
return axes.contour(X0, X1, P, colors=colors)
def _contour_gaussian_gamma(mu, s2, a, b, axes=None, transpose=False):
"""
"""
pass
|
{
"content_hash": "67da994a519ff2afe9aecc3aa53fc37c",
"timestamp": "",
"source": "github",
"line_count": 1341,
"max_line_length": 88,
"avg_line_length": 26.216256524981358,
"alnum_prop": 0.5488394584139265,
"repo_name": "SalemAmeen/bayespy",
"id": "5ac404ab8021edf787934026e31df2e8b81a3ff0",
"size": "35411",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bayespy/plot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1042901"
}
],
"symlink_target": ""
}
|
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from .mailchimp import subscribe_user
class RegistrationForm(forms.Form):
username = forms.CharField(label=_('Username'))
first_name = forms.CharField(label=_('First Name'), required=False)
last_name = forms.CharField(label=_('Last Name'), required=False)
email = forms.EmailField(label=_('Email'))
password = forms.CharField(label=_('Password'),
widget=forms.PasswordInput())
confirm = forms.CharField(label=_('Confirm Password'),
widget=forms.PasswordInput())
subscribed = forms.BooleanField(label=_('Subscribe to newsletters'),
initial=True)
def clean_username(self):
username = self.cleaned_data['username']
if User.objects.filter(username=username).exists():
raise forms.ValidationError(
_('This username is already registered'))
return username
def clean_email(self):
email = self.cleaned_data['email']
if User.objects.filter(email=email).exists():
raise forms.ValidationError(_('This email is already registered'))
return email
def clean_confirm(self):
password = self.cleaned_data['password']
confirm = self.cleaned_data['confirm']
if password != confirm:
raise forms.ValidationError(_('Passwords do not match'))
return password
def save(self):
cd = self.cleaned_data
user = User.objects.create_user(
username=cd['username'],
first_name=cd['first_name'],
last_name=cd['last_name'],
email=cd['email'],
password=cd['password'],
)
user.save()
if cd['subscribed']:
subscribe_user(cd['email'])
return user
class LoginForm(forms.Form):
username = forms.CharField(label=_('Username'))
password = forms.CharField(label=_('Password'),
widget=forms.PasswordInput())
def clean(self):
cleaned_data = super(LoginForm, self).clean()
username = cleaned_data.get('username')
password = cleaned_data.get('password')
try:
user = User.objects.get(username=username)
except User.DoesNotExist:
self.add_error('username',
forms.ValidationError(_('Wrong Username')))
else:
if password and not user.check_password(password):
self.add_error('password',
forms.ValidationError(_('Wrong Password')))
return cleaned_data
|
{
"content_hash": "ac6fafbf40d6bea4dd75f19efad5d4f1",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 78,
"avg_line_length": 36.52,
"alnum_prop": 0.5899963490324936,
"repo_name": "samitnuk/online_shop",
"id": "1454bdc8aff82f3ae0f53995b431c6b8a612a670",
"size": "2739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/accounts/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "28580"
},
{
"name": "HTML",
"bytes": "32446"
},
{
"name": "JavaScript",
"bytes": "1111"
},
{
"name": "Python",
"bytes": "104680"
}
],
"symlink_target": ""
}
|
from .voxel_dir import task_dir, storage_dir, image_dir
|
{
"content_hash": "6c3a74f26a8913ae1bcaba0fd0b0e2ed",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 55,
"avg_line_length": 55,
"alnum_prop": 0.7818181818181819,
"repo_name": "andyneff/voxel-globe",
"id": "45307aa82f77b6437b0196378bacf8c87d605427",
"size": "55",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "voxel_globe/tools/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "21280"
},
{
"name": "Batchfile",
"bytes": "35781"
},
{
"name": "CSS",
"bytes": "1855"
},
{
"name": "HTML",
"bytes": "90597"
},
{
"name": "JavaScript",
"bytes": "131377"
},
{
"name": "Python",
"bytes": "302839"
},
{
"name": "Shell",
"bytes": "17009"
}
],
"symlink_target": ""
}
|
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template settings for CERT (Community Emergency Response Teams)
http://eden.sahanafoundation.org/wiki/BluePrintCERT
Demo only, not in Production
"""
T = current.T
# Pre-Populate
settings.base.prepopulate = ("CERT", "default/users")
# Theme
#settings.base.theme = "CERT"
settings.base.system_name = T("Sahana Disaster Management Platform")
settings.base.system_name_short = T("Sahana")
# Uncomment to Hide the language toolbar
settings.L10n.display_toolbar = False
# Default timezone for users
settings.L10n.utc_offset = "UTC -0600"
# Uncomment these to use US-style dates in English
settings.L10n.date_format = "%m-%d-%Y"
# Start week on Sunday
settings.L10n.firstDOW = 0
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Default Country Code for telephone numbers
settings.L10n.default_country_code = 1
# Enable this to change the label for 'Mobile Phone'
settings.ui.label_mobile_phone = "Cell Phone"
# Enable this to change the label for 'Postcode'
settings.ui.label_postcode = "ZIP Code"
# PDF to Letter
settings.base.paper_size = T("Letter")
settings.hrm.multiple_orgs = False
settings.hrm.vol_experience = False
settings.hrm.use_description = False
settings.hrm.use_skills = False
settings.hrm.use_awards = False
settings.hrm.use_credentials = False
settings.msg.require_international_phone_numbers = False
settings.gis.geocode_imported_addresses = "google"
# Comment/uncomment modules here to disable/enable them
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = T("Synchronization"),
#description = "Synchronization",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 1, # 1st item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 10
)),
# All modules below here should be possible to disable safely
("hrm", Storage(
name_nice = T("Staff"),
#description = "Human Resources Management",
restricted = True,
module_type = None,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
restricted = True,
module_type = 2,
)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
#("project", Storage(
# name_nice = T("Projects"),
# #description = "Tracking of Projects, Activities and Tasks",
# restricted = True,
# module_type = 2
# )),
("scenario", Storage(
name_nice = T("Scenarios"),
#description = "Define Scenarios for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = None,
)),
("event", Storage(
name_nice = T("Events"),
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
("irs", Storage(
name_nice = T("Incidents"),
#description = "Incident Reporting System",
restricted = False,
module_type = 10
)),
])
# END =========================================================================
|
{
"content_hash": "047754c6ebfe0bc55a929da856bd8ced",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 151,
"avg_line_length": 40.16949152542373,
"alnum_prop": 0.5441631504922644,
"repo_name": "flavour/Turkey",
"id": "611153e317ca2ce8de1d84f59e76e343cb1e9685",
"size": "7135",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/templates/CERT/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2021594"
},
{
"name": "HTML",
"bytes": "1310585"
},
{
"name": "JavaScript",
"bytes": "19245058"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "28627483"
},
{
"name": "Ruby",
"bytes": "2051"
},
{
"name": "Shell",
"bytes": "4860"
},
{
"name": "XSLT",
"bytes": "2678742"
}
],
"symlink_target": ""
}
|
from django.contrib.syndication.views import * # merge modules
import sys
from django.contrib.syndication.views import Feed as DjangoFeed
from coffin.template import loader as coffin_loader
class Feed(DjangoFeed):
"""A ``Feed`` implementation that renders it's title and
description templates using Jinja2.
Unfortunately, Django's base ``Feed`` class is not very extensible
in this respect at all. For a real solution, we'd have to essentially
have to duplicate the whole class. So for now, we use this terrible
non-thread safe hack.
Another, somewhat crazy option would be:
* Render the templates ourselves through Jinja2 (possible
introduce new attributes to avoid having to rewrite the
existing ones).
* Make the rendered result available to Django/the superclass by
using a custom template loader using a prefix, say
"feed:<myproject.app.views.MyFeed>". The loader would simply
return the Jinja-rendered template (escaped), the Django template
mechanism would find no nodes and just pass the output through.
Possible even worse than this though.
"""
def get_feed(self, *args, **kwargs):
parent_module = sys.modules[DjangoFeed.__module__]
old_loader = parent_module.loader
parent_module.loader = coffin_loader
try:
return super(Feed, self).get_feed(*args, **kwargs)
finally:
parent_module.loader = old_loader
|
{
"content_hash": "4bcee5df818520bce8c159509313bdb9",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 75,
"avg_line_length": 41.75,
"alnum_prop": 0.6892880904856953,
"repo_name": "havard024/prego",
"id": "7f3e86e8e7496e74a28491027a558073b22826db",
"size": "1503",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "venv/lib/python2.7/site-packages/coffin/contrib/syndication/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "2978"
},
{
"name": "CSS",
"bytes": "620190"
},
{
"name": "JavaScript",
"bytes": "2456120"
},
{
"name": "PHP",
"bytes": "25856"
},
{
"name": "Python",
"bytes": "34948766"
},
{
"name": "Shell",
"bytes": "12359"
},
{
"name": "TeX",
"bytes": "113674"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.contrib.auth.models import User
from donations.currencymap import SYMBOLS
from main.support import animations_list, font_effects
class Donation(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=250)
comment = models.TextField(blank=True, null=True)
timestamp = models.DateTimeField(blank=True, null=True)
amount = models.FloatField()
currency = models.CharField(max_length=10)
primary_amount = models.FloatField(blank=True, null=True)
primary_currency = models.CharField(max_length=10, blank=True, null=True)
type = models.CharField(max_length=20)
def as_dict(self):
short_amount = "%.2f" % self.amount
if round(self.amount, 0) == self.amount:
short_amount = "%.0f" % self.amount
return {
'name': self.name,
'comment': self.comment,
'amount': "%.2f" % self.amount,
'amount_integer': "%.0f" % self.amount,
'amount_short': short_amount,
'currency': self.currency,
'currencysymbol': SYMBOLS.get(self.currency, ""),
}
class TopList(models.Model):
user = models.ForeignKey(User)
count = models.IntegerField(default=1)
format = models.CharField(default='[[name]]: [[currencysymbol]][[amount]]', max_length=1000)
seperator = models.CharField(default=', ', max_length=100)
font = models.CharField(blank=True, null=True, max_length=255)
font_size = models.CharField(blank=True, null=True, max_length=255)
font_color = models.CharField(blank=True, null=True, max_length=255)
font_effect = models.CharField(blank=True, null=True, max_length=255, default=None, choices=font_effects())
font_weight = models.CharField(blank=True, null=True, max_length=255, default="normal")
outline_color = models.CharField(blank=True, null=True, max_length=255, default=None)
days = models.IntegerField(blank=True, null=True)
type = models.CharField(max_length=50, choices=(
('session', 'Session'),
('limited', 'Number of Days'),
('alltime', 'All Time'),
), default='session')
class Goal(models.Model):
user = models.ForeignKey(User)
start_date = models.DateTimeField()
end_date = models.DateTimeField(blank=True, null=True)
amount = models.FloatField()
description = models.CharField(max_length=1000, blank=True, null=True)
source_type = models.CharField(max_length=100, help_text="Limit donations to a specific type of donation for this goal.", default='', choices=(
('all', 'All types'),
('extralife', 'Extra Life'),
('fanfunding', 'Fan Funding/Super Chat'),
('imraising', 'Imraising'),
('twitchalerts', 'Twitch Alerts/Stream Labs'),
('streamjar', 'Stream Jar'),
('streamtip', 'Stream Tip')))
|
{
"content_hash": "180edeec3cabf7b716b276e46268a8e3",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 147,
"avg_line_length": 46.98360655737705,
"alnum_prop": 0.6566643405443127,
"repo_name": "google/mirandum",
"id": "ac2b5a4a630694b3c4faaeedffa124fdefdfcaa2",
"size": "3479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alerts/donations/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "9472"
},
{
"name": "Elixir",
"bytes": "574"
},
{
"name": "HTML",
"bytes": "122101"
},
{
"name": "JavaScript",
"bytes": "19438"
},
{
"name": "Jinja",
"bytes": "4124"
},
{
"name": "Python",
"bytes": "398732"
},
{
"name": "Shell",
"bytes": "3296"
}
],
"symlink_target": ""
}
|
'''perform automatic newline conversion
To perform automatic newline conversion, use::
[extensions]
win32text =
[encode]
** = cleverencode:
# or ** = macencode:
[decode]
** = cleverdecode:
# or ** = macdecode:
If not doing conversion, to make sure you do not commit CRLF/CR by accident::
[hooks]
pretxncommit.crlf = python:hgext.win32text.forbidcrlf
# or pretxncommit.cr = python:hgext.win32text.forbidcr
To do the same check on a server to prevent CRLF/CR from being
pushed or pulled::
[hooks]
pretxnchangegroup.crlf = python:hgext.win32text.forbidcrlf
# or pretxnchangegroup.cr = python:hgext.win32text.forbidcr
'''
from mercurial.i18n import _
from mercurial.node import short
from mercurial import util
import re
# regexp for single LF without CR preceding.
re_single_lf = re.compile('(^|[^\r])\n', re.MULTILINE)
newlinestr = {'\r\n': 'CRLF', '\r': 'CR'}
filterstr = {'\r\n': 'clever', '\r': 'mac'}
def checknewline(s, newline, ui=None, repo=None, filename=None):
# warn if already has 'newline' in repository.
# it might cause unexpected eol conversion.
# see issue 302:
# http://mercurial.selenic.com/bts/issue302
if newline in s and ui and filename and repo:
ui.warn(_('WARNING: %s already has %s line endings\n'
'and does not need EOL conversion by the win32text plugin.\n'
'Before your next commit, please reconsider your '
'encode/decode settings in \nMercurial.ini or %s.\n') %
(filename, newlinestr[newline], repo.join('hgrc')))
def dumbdecode(s, cmd, **kwargs):
checknewline(s, '\r\n', **kwargs)
# replace single LF to CRLF
return re_single_lf.sub('\\1\r\n', s)
def dumbencode(s, cmd):
return s.replace('\r\n', '\n')
def macdumbdecode(s, cmd, **kwargs):
checknewline(s, '\r', **kwargs)
return s.replace('\n', '\r')
def macdumbencode(s, cmd):
return s.replace('\r', '\n')
def cleverdecode(s, cmd, **kwargs):
if not util.binary(s):
return dumbdecode(s, cmd, **kwargs)
return s
def cleverencode(s, cmd):
if not util.binary(s):
return dumbencode(s, cmd)
return s
def macdecode(s, cmd, **kwargs):
if not util.binary(s):
return macdumbdecode(s, cmd, **kwargs)
return s
def macencode(s, cmd):
if not util.binary(s):
return macdumbencode(s, cmd)
return s
_filters = {
'dumbdecode:': dumbdecode,
'dumbencode:': dumbencode,
'cleverdecode:': cleverdecode,
'cleverencode:': cleverencode,
'macdumbdecode:': macdumbdecode,
'macdumbencode:': macdumbencode,
'macdecode:': macdecode,
'macencode:': macencode,
}
def forbidnewline(ui, repo, hooktype, node, newline, **kwargs):
halt = False
seen = set()
# we try to walk changesets in reverse order from newest to
# oldest, so that if we see a file multiple times, we take the
# newest version as canonical. this prevents us from blocking a
# changegroup that contains an unacceptable commit followed later
# by a commit that fixes the problem.
tip = repo['tip']
for rev in xrange(len(repo)-1, repo[node].rev()-1, -1):
c = repo[rev]
for f in c.files():
if f in seen or f not in tip or f not in c:
continue
seen.add(f)
data = c[f].data()
if not util.binary(data) and newline in data:
if not halt:
ui.warn(_('Attempt to commit or push text file(s) '
'using %s line endings\n') %
newlinestr[newline])
ui.warn(_('in %s: %s\n') % (short(c.node()), f))
halt = True
if halt and hooktype == 'pretxnchangegroup':
crlf = newlinestr[newline].lower()
filter = filterstr[newline]
ui.warn(_('\nTo prevent this mistake in your local repository,\n'
'add to Mercurial.ini or .hg/hgrc:\n'
'\n'
'[hooks]\n'
'pretxncommit.%s = python:hgext.win32text.forbid%s\n'
'\n'
'and also consider adding:\n'
'\n'
'[extensions]\n'
'win32text =\n'
'[encode]\n'
'** = %sencode:\n'
'[decode]\n'
'** = %sdecode:\n') % (crlf, crlf, filter, filter))
return halt
def forbidcrlf(ui, repo, hooktype, node, **kwargs):
return forbidnewline(ui, repo, hooktype, node, '\r\n', **kwargs)
def forbidcr(ui, repo, hooktype, node, **kwargs):
return forbidnewline(ui, repo, hooktype, node, '\r', **kwargs)
def reposetup(ui, repo):
if not repo.local():
return
for name, fn in _filters.iteritems():
repo.adddatafilter(name, fn)
|
{
"content_hash": "65ac016f0315dac8fd3f6c9a67f15443",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 79,
"avg_line_length": 32.05298013245033,
"alnum_prop": 0.5888429752066116,
"repo_name": "joewalnes/idea-community",
"id": "b3a25b0aa75e189915086a41a7136a7d0fa977d0",
"size": "5127",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "plugins/hg4idea/testData/bin/hgext/win32text.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "387"
},
{
"name": "C",
"bytes": "136045"
},
{
"name": "C#",
"bytes": "103"
},
{
"name": "C++",
"bytes": "40449"
},
{
"name": "Emacs Lisp",
"bytes": "2507"
},
{
"name": "Erlang",
"bytes": "10"
},
{
"name": "Groovy",
"bytes": "361320"
},
{
"name": "Java",
"bytes": "89694599"
},
{
"name": "JavaScript",
"bytes": "978"
},
{
"name": "Objective-C",
"bytes": "1877"
},
{
"name": "PHP",
"bytes": "145"
},
{
"name": "Perl",
"bytes": "6523"
},
{
"name": "Python",
"bytes": "1699274"
},
{
"name": "Shell",
"bytes": "6965"
},
{
"name": "VimL",
"bytes": "5950"
}
],
"symlink_target": ""
}
|
"""Constants used by Home Assistant components."""
MAJOR_VERSION = 0
MINOR_VERSION = 88
PATCH_VERSION = '0.dev0'
__short_version__ = '{}.{}'.format(MAJOR_VERSION, MINOR_VERSION)
__version__ = '{}.{}'.format(__short_version__, PATCH_VERSION)
REQUIRED_PYTHON_VER = (3, 5, 3)
# Format for platform files
PLATFORM_FORMAT = '{platform}.{domain}'
# Can be used to specify a catch all when registering state or event listeners.
MATCH_ALL = '*'
# Entity target all constant
ENTITY_MATCH_ALL = 'all'
# If no name is specified
DEVICE_DEFAULT_NAME = 'Unnamed Device'
# Sun events
SUN_EVENT_SUNSET = 'sunset'
SUN_EVENT_SUNRISE = 'sunrise'
# #### CONFIG ####
CONF_ABOVE = 'above'
CONF_ACCESS_TOKEN = 'access_token'
CONF_ADDRESS = 'address'
CONF_AFTER = 'after'
CONF_ALIAS = 'alias'
CONF_API_KEY = 'api_key'
CONF_API_VERSION = 'api_version'
CONF_AT = 'at'
CONF_AUTHENTICATION = 'authentication'
CONF_AUTH_MFA_MODULES = 'auth_mfa_modules'
CONF_AUTH_PROVIDERS = 'auth_providers'
CONF_BASE = 'base'
CONF_BEFORE = 'before'
CONF_BELOW = 'below'
CONF_BINARY_SENSORS = 'binary_sensors'
CONF_BLACKLIST = 'blacklist'
CONF_BRIGHTNESS = 'brightness'
CONF_CODE = 'code'
CONF_COLOR_TEMP = 'color_temp'
CONF_COMMAND = 'command'
CONF_COMMAND_CLOSE = 'command_close'
CONF_COMMAND_OFF = 'command_off'
CONF_COMMAND_ON = 'command_on'
CONF_COMMAND_OPEN = 'command_open'
CONF_COMMAND_STATE = 'command_state'
CONF_COMMAND_STOP = 'command_stop'
CONF_CONDITION = 'condition'
CONF_COVERS = 'covers'
CONF_CURRENCY = 'currency'
CONF_CUSTOMIZE = 'customize'
CONF_CUSTOMIZE_DOMAIN = 'customize_domain'
CONF_CUSTOMIZE_GLOB = 'customize_glob'
CONF_DELAY_TIME = 'delay_time'
CONF_DEVICE = 'device'
CONF_DEVICE_CLASS = 'device_class'
CONF_DEVICES = 'devices'
CONF_DISARM_AFTER_TRIGGER = 'disarm_after_trigger'
CONF_DISCOVERY = 'discovery'
CONF_DISKS = 'disks'
CONF_DISPLAY_CURRENCY = 'display_currency'
CONF_DISPLAY_OPTIONS = 'display_options'
CONF_DOMAIN = 'domain'
CONF_DOMAINS = 'domains'
CONF_EFFECT = 'effect'
CONF_ELEVATION = 'elevation'
CONF_EMAIL = 'email'
CONF_ENTITIES = 'entities'
CONF_ENTITY_ID = 'entity_id'
CONF_ENTITY_NAMESPACE = 'entity_namespace'
CONF_ENTITY_PICTURE_TEMPLATE = 'entity_picture_template'
CONF_EVENT = 'event'
CONF_EXCLUDE = 'exclude'
CONF_FILE_PATH = 'file_path'
CONF_FILENAME = 'filename'
CONF_FOR = 'for'
CONF_FORCE_UPDATE = 'force_update'
CONF_FRIENDLY_NAME = 'friendly_name'
CONF_FRIENDLY_NAME_TEMPLATE = 'friendly_name_template'
CONF_HEADERS = 'headers'
CONF_HOST = 'host'
CONF_HOSTS = 'hosts'
CONF_HS = 'hs'
CONF_ICON = 'icon'
CONF_ICON_TEMPLATE = 'icon_template'
CONF_INCLUDE = 'include'
CONF_ID = 'id'
CONF_IP_ADDRESS = 'ip_address'
CONF_LATITUDE = 'latitude'
CONF_LONGITUDE = 'longitude'
CONF_LIGHTS = 'lights'
CONF_MAC = 'mac'
CONF_METHOD = 'method'
CONF_MAXIMUM = 'maximum'
CONF_MINIMUM = 'minimum'
CONF_MODE = 'mode'
CONF_MONITORED_CONDITIONS = 'monitored_conditions'
CONF_MONITORED_VARIABLES = 'monitored_variables'
CONF_NAME = 'name'
CONF_OFFSET = 'offset'
CONF_OPTIMISTIC = 'optimistic'
CONF_PACKAGES = 'packages'
CONF_PASSWORD = 'password'
CONF_PATH = 'path'
CONF_PAYLOAD = 'payload'
CONF_PAYLOAD_OFF = 'payload_off'
CONF_PAYLOAD_ON = 'payload_on'
CONF_PENDING_TIME = 'pending_time'
CONF_PIN = 'pin'
CONF_PLATFORM = 'platform'
CONF_PORT = 'port'
CONF_PREFIX = 'prefix'
CONF_PROFILE_NAME = 'profile_name'
CONF_PROTOCOL = 'protocol'
CONF_PROXY_SSL = 'proxy_ssl'
CONF_QUOTE = 'quote'
CONF_RADIUS = 'radius'
CONF_RECIPIENT = 'recipient'
CONF_REGION = 'region'
CONF_RESOURCE = 'resource'
CONF_RESOURCES = 'resources'
CONF_RGB = 'rgb'
CONF_ROOM = 'room'
CONF_SCAN_INTERVAL = 'scan_interval'
CONF_SENDER = 'sender'
CONF_SENSOR_TYPE = 'sensor_type'
CONF_SENSORS = 'sensors'
CONF_SHOW_ON_MAP = 'show_on_map'
CONF_SLAVE = 'slave'
CONF_SOURCE = 'source'
CONF_SSL = 'ssl'
CONF_STATE = 'state'
CONF_STATE_TEMPLATE = 'state_template'
CONF_STRUCTURE = 'structure'
CONF_SWITCHES = 'switches'
CONF_TEMPERATURE_UNIT = 'temperature_unit'
CONF_TIME_ZONE = 'time_zone'
CONF_TIMEOUT = 'timeout'
CONF_TOKEN = 'token'
CONF_TRIGGER_TIME = 'trigger_time'
CONF_TTL = 'ttl'
CONF_TYPE = 'type'
CONF_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM = 'unit_system'
CONF_UPDATE_INTERVAL = 'update_interval'
CONF_URL = 'url'
CONF_USERNAME = 'username'
CONF_VALUE_TEMPLATE = 'value_template'
CONF_VERIFY_SSL = 'verify_ssl'
CONF_WEBHOOK_ID = 'webhook_id'
CONF_WEEKDAY = 'weekday'
CONF_WHITELIST = 'whitelist'
CONF_WHITELIST_EXTERNAL_DIRS = 'whitelist_external_dirs'
CONF_WHITE_VALUE = 'white_value'
CONF_XY = 'xy'
CONF_ZONE = 'zone'
# #### EVENTS ####
EVENT_HOMEASSISTANT_START = 'homeassistant_start'
EVENT_HOMEASSISTANT_STOP = 'homeassistant_stop'
EVENT_HOMEASSISTANT_CLOSE = 'homeassistant_close'
EVENT_STATE_CHANGED = 'state_changed'
EVENT_TIME_CHANGED = 'time_changed'
EVENT_CALL_SERVICE = 'call_service'
EVENT_PLATFORM_DISCOVERED = 'platform_discovered'
EVENT_COMPONENT_LOADED = 'component_loaded'
EVENT_SERVICE_REGISTERED = 'service_registered'
EVENT_SERVICE_REMOVED = 'service_removed'
EVENT_LOGBOOK_ENTRY = 'logbook_entry'
EVENT_THEMES_UPDATED = 'themes_updated'
EVENT_TIMER_OUT_OF_SYNC = 'timer_out_of_sync'
EVENT_AUTOMATION_TRIGGERED = 'automation_triggered'
EVENT_SCRIPT_STARTED = 'script_started'
# #### DEVICE CLASSES ####
DEVICE_CLASS_BATTERY = 'battery'
DEVICE_CLASS_HUMIDITY = 'humidity'
DEVICE_CLASS_ILLUMINANCE = 'illuminance'
DEVICE_CLASS_TEMPERATURE = 'temperature'
DEVICE_CLASS_TIMESTAMP = 'timestamp'
DEVICE_CLASS_PRESSURE = 'pressure'
# #### STATES ####
STATE_ON = 'on'
STATE_OFF = 'off'
STATE_HOME = 'home'
STATE_NOT_HOME = 'not_home'
STATE_UNKNOWN = 'unknown'
STATE_OPEN = 'open'
STATE_OPENING = 'opening'
STATE_CLOSED = 'closed'
STATE_CLOSING = 'closing'
STATE_PLAYING = 'playing'
STATE_PAUSED = 'paused'
STATE_IDLE = 'idle'
STATE_STANDBY = 'standby'
STATE_ALARM_DISARMED = 'disarmed'
STATE_ALARM_ARMED_HOME = 'armed_home'
STATE_ALARM_ARMED_AWAY = 'armed_away'
STATE_ALARM_ARMED_NIGHT = 'armed_night'
STATE_ALARM_ARMED_CUSTOM_BYPASS = 'armed_custom_bypass'
STATE_ALARM_PENDING = 'pending'
STATE_ALARM_ARMING = 'arming'
STATE_ALARM_DISARMING = 'disarming'
STATE_ALARM_TRIGGERED = 'triggered'
STATE_LOCKED = 'locked'
STATE_UNLOCKED = 'unlocked'
STATE_UNAVAILABLE = 'unavailable'
STATE_OK = 'ok'
STATE_PROBLEM = 'problem'
# #### STATE AND EVENT ATTRIBUTES ####
# Attribution
ATTR_ATTRIBUTION = 'attribution'
# Credentials
ATTR_CREDENTIALS = 'credentials'
# Contains time-related attributes
ATTR_NOW = 'now'
ATTR_DATE = 'date'
ATTR_TIME = 'time'
ATTR_SECONDS = 'seconds'
# Contains domain, service for a SERVICE_CALL event
ATTR_DOMAIN = 'domain'
ATTR_SERVICE = 'service'
ATTR_SERVICE_DATA = 'service_data'
# IDs
ATTR_ID = 'id'
# Name
ATTR_NAME = 'name'
# Contains one string or a list of strings, each being an entity id
ATTR_ENTITY_ID = 'entity_id'
# String with a friendly name for the entity
ATTR_FRIENDLY_NAME = 'friendly_name'
# A picture to represent entity
ATTR_ENTITY_PICTURE = 'entity_picture'
# Icon to use in the frontend
ATTR_ICON = 'icon'
# The unit of measurement if applicable
ATTR_UNIT_OF_MEASUREMENT = 'unit_of_measurement'
CONF_UNIT_SYSTEM_METRIC = 'metric' # type: str
CONF_UNIT_SYSTEM_IMPERIAL = 'imperial' # type: str
# Electrical attributes
ATTR_VOLTAGE = 'voltage'
# Contains the information that is discovered
ATTR_DISCOVERED = 'discovered'
# Location of the device/sensor
ATTR_LOCATION = 'location'
ATTR_BATTERY_CHARGING = 'battery_charging'
ATTR_BATTERY_LEVEL = 'battery_level'
ATTR_WAKEUP = 'wake_up_interval'
# For devices which support a code attribute
ATTR_CODE = 'code'
ATTR_CODE_FORMAT = 'code_format'
# For calling a device specific command
ATTR_COMMAND = 'command'
# For devices which support an armed state
ATTR_ARMED = 'device_armed'
# For devices which support a locked state
ATTR_LOCKED = 'locked'
# For sensors that support 'tripping', eg. motion and door sensors
ATTR_TRIPPED = 'device_tripped'
# For sensors that support 'tripping' this holds the most recent
# time the device was tripped
ATTR_LAST_TRIP_TIME = 'last_tripped_time'
# For all entity's, this hold whether or not it should be hidden
ATTR_HIDDEN = 'hidden'
# Location of the entity
ATTR_LATITUDE = 'latitude'
ATTR_LONGITUDE = 'longitude'
# Accuracy of location in meters
ATTR_GPS_ACCURACY = 'gps_accuracy'
# If state is assumed
ATTR_ASSUMED_STATE = 'assumed_state'
ATTR_STATE = 'state'
ATTR_OPTION = 'option'
# Bitfield of supported component features for the entity
ATTR_SUPPORTED_FEATURES = 'supported_features'
# Class of device within its domain
ATTR_DEVICE_CLASS = 'device_class'
# Temperature attribute
ATTR_TEMPERATURE = 'temperature'
# #### UNITS OF MEASUREMENT ####
# Temperature units
TEMP_CELSIUS = '°C'
TEMP_FAHRENHEIT = '°F'
# Length units
LENGTH_CENTIMETERS = 'cm' # type: str
LENGTH_METERS = 'm' # type: str
LENGTH_KILOMETERS = 'km' # type: str
LENGTH_INCHES = 'in' # type: str
LENGTH_FEET = 'ft' # type: str
LENGTH_YARD = 'yd' # type: str
LENGTH_MILES = 'mi' # type: str
# Volume units
VOLUME_LITERS = 'L' # type: str
VOLUME_MILLILITERS = 'mL' # type: str
VOLUME_GALLONS = 'gal' # type: str
VOLUME_FLUID_OUNCE = 'fl. oz.' # type: str
# Mass units
MASS_GRAMS = 'g' # type: str
MASS_KILOGRAMS = 'kg' # type: str
MASS_OUNCES = 'oz' # type: str
MASS_POUNDS = 'lb' # type: str
# UV Index units
UNIT_UV_INDEX = 'UV index' # type: str
# #### SERVICES ####
SERVICE_HOMEASSISTANT_STOP = 'stop'
SERVICE_HOMEASSISTANT_RESTART = 'restart'
SERVICE_TURN_ON = 'turn_on'
SERVICE_TURN_OFF = 'turn_off'
SERVICE_TOGGLE = 'toggle'
SERVICE_RELOAD = 'reload'
SERVICE_VOLUME_UP = 'volume_up'
SERVICE_VOLUME_DOWN = 'volume_down'
SERVICE_VOLUME_MUTE = 'volume_mute'
SERVICE_VOLUME_SET = 'volume_set'
SERVICE_MEDIA_PLAY_PAUSE = 'media_play_pause'
SERVICE_MEDIA_PLAY = 'media_play'
SERVICE_MEDIA_PAUSE = 'media_pause'
SERVICE_MEDIA_STOP = 'media_stop'
SERVICE_MEDIA_NEXT_TRACK = 'media_next_track'
SERVICE_MEDIA_PREVIOUS_TRACK = 'media_previous_track'
SERVICE_MEDIA_SEEK = 'media_seek'
SERVICE_SHUFFLE_SET = 'shuffle_set'
SERVICE_ALARM_DISARM = 'alarm_disarm'
SERVICE_ALARM_ARM_HOME = 'alarm_arm_home'
SERVICE_ALARM_ARM_AWAY = 'alarm_arm_away'
SERVICE_ALARM_ARM_NIGHT = 'alarm_arm_night'
SERVICE_ALARM_ARM_CUSTOM_BYPASS = 'alarm_arm_custom_bypass'
SERVICE_ALARM_TRIGGER = 'alarm_trigger'
SERVICE_LOCK = 'lock'
SERVICE_UNLOCK = 'unlock'
SERVICE_OPEN = 'open'
SERVICE_CLOSE = 'close'
SERVICE_CLOSE_COVER = 'close_cover'
SERVICE_CLOSE_COVER_TILT = 'close_cover_tilt'
SERVICE_OPEN_COVER = 'open_cover'
SERVICE_OPEN_COVER_TILT = 'open_cover_tilt'
SERVICE_SET_COVER_POSITION = 'set_cover_position'
SERVICE_SET_COVER_TILT_POSITION = 'set_cover_tilt_position'
SERVICE_STOP_COVER = 'stop_cover'
SERVICE_STOP_COVER_TILT = 'stop_cover_tilt'
SERVICE_SELECT_OPTION = 'select_option'
# #### API / REMOTE ####
SERVER_PORT = 8123
URL_ROOT = '/'
URL_API = '/api/'
URL_API_STREAM = '/api/stream'
URL_API_CONFIG = '/api/config'
URL_API_DISCOVERY_INFO = '/api/discovery_info'
URL_API_STATES = '/api/states'
URL_API_STATES_ENTITY = '/api/states/{}'
URL_API_EVENTS = '/api/events'
URL_API_EVENTS_EVENT = '/api/events/{}'
URL_API_SERVICES = '/api/services'
URL_API_SERVICES_SERVICE = '/api/services/{}/{}'
URL_API_COMPONENTS = '/api/components'
URL_API_ERROR_LOG = '/api/error_log'
URL_API_LOG_OUT = '/api/log_out'
URL_API_TEMPLATE = '/api/template'
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
HTTP_TOO_MANY_REQUESTS = 429
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_SERVICE_UNAVAILABLE = 503
HTTP_BASIC_AUTHENTICATION = 'basic'
HTTP_DIGEST_AUTHENTICATION = 'digest'
HTTP_HEADER_HA_AUTH = 'X-HA-access'
HTTP_HEADER_X_REQUESTED_WITH = 'X-Requested-With'
CONTENT_TYPE_JSON = 'application/json'
CONTENT_TYPE_MULTIPART = 'multipart/x-mixed-replace; boundary={}'
CONTENT_TYPE_TEXT_PLAIN = 'text/plain'
# The exit code to send to request a restart
RESTART_EXIT_CODE = 100
UNIT_NOT_RECOGNIZED_TEMPLATE = '{} is not a recognized {} unit.' # type: str
LENGTH = 'length' # type: str
MASS = 'mass' # type: str
VOLUME = 'volume' # type: str
TEMPERATURE = 'temperature' # type: str
SPEED_MS = 'speed_ms' # type: str
ILLUMINANCE = 'illuminance' # type: str
WEEKDAYS = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
# The degree of precision for platforms
PRECISION_WHOLE = 1
PRECISION_HALVES = 0.5
PRECISION_TENTHS = 0.1
# Static list of entities that will never be exposed to
# cloud, alexa, or google_home components
CLOUD_NEVER_EXPOSED_ENTITIES = ['group.all_locks']
|
{
"content_hash": "115c9de4830998e740517d09630046fa",
"timestamp": "",
"source": "github",
"line_count": 457,
"max_line_length": 79,
"avg_line_length": 27.49671772428884,
"alnum_prop": 0.720913576317046,
"repo_name": "PetePriority/home-assistant",
"id": "1a3d4e2e455afad0ca70fa25ffae383005d145d6",
"size": "12584",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1073"
},
{
"name": "Python",
"bytes": "13985647"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
}
|
import json
import logging
import unittest
from core.Database import Database
from core.entities.Plugin import Plugin
logging.basicConfig(level=logging.DEBUG, format='[%(levelname)s] [%(asctime)s] (%(threadName)-10s) %(message)s', filename='debug.log', filemode='w')
class PluginsTest(unittest.TestCase):
config = Database()
config.create_database()
Plugin._config = config
def test_mock_plugin(self):
mp = Plugin('mock', 'mock')
self.assertEqual(mp.call('test'), 'test')
def test_plugin_listall(self):
mp = Plugin('mock', 'mock')
mp.save()
try:
self.assertEqual(
[p.json_repr() for p in Plugin.get_all()],
[{"enabled": True,
"instname": "mock",
"params": "{}",
"name": "mock",
"state": "unknown"}]
)
finally:
mp.delete()
|
{
"content_hash": "ef491ea583ada6868236dd9f3f4300cd",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 149,
"avg_line_length": 28.363636363636363,
"alnum_prop": 0.5459401709401709,
"repo_name": "Cirreth/shome",
"id": "852bb11c0e32384061d1c2907f2d45c96f9d5759",
"size": "936",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unittests/PluginsTest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "125643"
},
{
"name": "HTML",
"bytes": "31880"
},
{
"name": "JavaScript",
"bytes": "317583"
},
{
"name": "Python",
"bytes": "91654"
},
{
"name": "Smarty",
"bytes": "197"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('search', '0011_auto_20170810_1205'),
]
operations = [
migrations.AlterField(
model_name='savedsearch',
name='name',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
|
{
"content_hash": "51a1a7469fc079cba4a1290994802b3e",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 74,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.6014851485148515,
"repo_name": "erikng/sal",
"id": "cba7c9bd167e7ccfe61823c03f77bec9ee9b26c7",
"size": "475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "search/migrations/0012_auto_20170822_1207.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "254975"
},
{
"name": "HTML",
"bytes": "248381"
},
{
"name": "JavaScript",
"bytes": "1148377"
},
{
"name": "Makefile",
"bytes": "2208"
},
{
"name": "Nginx",
"bytes": "1946"
},
{
"name": "Python",
"bytes": "757954"
},
{
"name": "Shell",
"bytes": "5922"
}
],
"symlink_target": ""
}
|
"""
This module houses the GoogleMap object, used for generating
the needed javascript to embed Google Maps in a Web page.
Google(R) is a registered trademark of Google, Inc. of Mountain View, California.
Example:
* In the view:
return render(request, 'template.html', {'google': GoogleMap(key="abcdefg")})
* In the template:
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
{{ google.xhtml }}
<head>
<title>Google Maps via GeoDjango</title>
{{ google.style }}
{{ google.scripts }}
</head>
{{ google.body }}
<div id="{{ google.dom_id }}" style="width:600px;height:400px;"></div>
</body>
</html>
Note: If you want to be more explicit in your templates, the following are
equivalent:
{{ google.body }} => "<body {{ google.onload }} {{ google.onunload }}>"
{{ google.xhtml }} => "<html xmlns="http://www.w3.org/1999/xhtml" {{ google.xmlns }}>"
{{ google.style }} => "<style>{{ google.vml_css }}</style>"
Explanation:
- The `xhtml` property provides the correct XML namespace needed for
Google Maps to operate in IE using XHTML. Google Maps on IE uses
VML to draw polylines. Returns, by default:
<html xmlns="http://www.w3.org/1999/xhtml" xmlns:v="urn:schemas-microsoft-com:vml">
- The `style` property provides the correct style tag for the CSS
properties required by Google Maps on IE:
<style type="text/css">v\:* {behavior:url(#default#VML);}</style>
- The `scripts` property provides the necessary <script> tags for
including the Google Maps javascript, as well as including the
generated javascript.
- The `body` property provides the correct attributes for the
body tag to load the generated javascript. By default, returns:
<body onload="gmap_load()" onunload="GUnload()">
- The `dom_id` property returns the DOM id for the map. Defaults to "map".
The following attributes may be set or customized in your local settings:
* GOOGLE_MAPS_API_KEY: String of your Google Maps API key. These are tied
to a domain. May be obtained from http://www.google.com/apis/maps/
* GOOGLE_MAPS_API_VERSION (optional): Defaults to using "2.x"
* GOOGLE_MAPS_URL (optional): Must have a substitution ('%s') for the API
version.
"""
from django.contrib.gis.maps.google.gmap import GoogleMap, GoogleMapSet
from django.contrib.gis.maps.google.overlays import (
GEvent, GIcon, GMarker, GPolygon, GPolyline,
)
from django.contrib.gis.maps.google.zoom import GoogleZoom
__all__ = [
'GoogleMap', 'GoogleMapSet', 'GEvent', 'GIcon', 'GMarker', 'GPolygon',
'GPolyline', 'GoogleZoom',
]
|
{
"content_hash": "d4943f7af0c2706a854cda4254193505",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 92,
"avg_line_length": 41.10144927536232,
"alnum_prop": 0.6502115655853314,
"repo_name": "yephper/django",
"id": "6fe4325c1197be01814e70e8bc891f60d184313f",
"size": "2836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/contrib/gis/maps/google/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "1538"
},
{
"name": "CSS",
"bytes": "1697381"
},
{
"name": "HTML",
"bytes": "390772"
},
{
"name": "Java",
"bytes": "588"
},
{
"name": "JavaScript",
"bytes": "3172126"
},
{
"name": "Makefile",
"bytes": "134"
},
{
"name": "PHP",
"bytes": "19336"
},
{
"name": "Python",
"bytes": "13365273"
},
{
"name": "Shell",
"bytes": "837"
},
{
"name": "Smarty",
"bytes": "133"
}
],
"symlink_target": ""
}
|
import rospy
import json
import argparse
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from modules.planning.proto import planning_pb2
from modules.drivers.proto import mobileye_pb2
from mobileye_data import MobileyeData
from localization_data import LocalizationData
from planning_data import PlanningData
from routing_data import RoutingData
from chassis_data import ChassisData
from view_subplot import ViewSubplot
from subplot_s_speed import SubplotSSpeed
from subplot_s_theta import SubplotSTheta
from subplot_s_time import SubplotSTime
from modules.localization.proto import localization_pb2
from modules.map.relative_map.proto import navigation_pb2
from modules.canbus.proto import chassis_pb2
from std_msgs.msg import String
PLANNING_TOPIC = '/apollo/planning'
mobileye = MobileyeData()
localization = LocalizationData()
planning = PlanningData()
chassis = ChassisData()
routing_data = RoutingData()
def update(frame_number):
view_subplot.show(mobileye, localization, planning, chassis, routing_data)
s_speed_subplot.show(planning)
s_theta_subplot.show(planning)
s_time_subplot.show(planning)
def localization_callback(localization_pb):
localization.update(localization_pb)
def mobileye_callback(mobileye_pb):
mobileye.update(mobileye_pb)
mobileye.compute_lanes()
# mobileye.compute_next_lanes()
mobileye.compute_obstacles()
planning.compute_path()
planning.compute_path_param()
def planning_callback(planning_pb):
planning.update(planning_pb)
def chassis_callback(chassis_pb):
chassis.update(chassis_pb)
def routing_callback(navigation_info_pb):
routing_data.update_navigation(navigation_info_pb)
def add_listener():
rospy.init_node('mobileye_plot', anonymous=True)
rospy.Subscriber('/apollo/sensor/mobileye',
mobileye_pb2.Mobileye,
mobileye_callback)
rospy.Subscriber(PLANNING_TOPIC,
planning_pb2.ADCTrajectory,
planning_callback)
rospy.Subscriber('/apollo/localization/pose',
localization_pb2.LocalizationEstimate,
localization_callback)
rospy.Subscriber('/apollo/canbus/chassis',
chassis_pb2.Chassis,
chassis_callback)
rospy.Subscriber('/apollo/navigation',
navigation_pb2.NavigationInfo, routing_callback)
if __name__ == '__main__':
add_listener()
fig = plt.figure()
ax = plt.subplot2grid((3, 2), (0, 0), rowspan=3, colspan=1)
view_subplot = ViewSubplot(ax)
ax1 = plt.subplot2grid((3, 2), (0, 1), rowspan=1, colspan=1)
s_speed_subplot = SubplotSSpeed(ax1)
ax2 = plt.subplot2grid((3, 2), (1, 1), rowspan=1, colspan=1)
s_theta_subplot = SubplotSTheta(ax2)
ax3 = plt.subplot2grid((3, 2), (2, 1), rowspan=1, colspan=1)
s_time_subplot = SubplotSTime(ax3)
ani = animation.FuncAnimation(fig, update, interval=100)
# ax.axis('equal')
ax.axvline(x=0.0, alpha=0.3)
ax.axhline(y=0.0, alpha=0.3)
ax2.axvline(x=0.0, alpha=0.3)
ax2.axhline(y=0.0, alpha=0.3)
plt.show()
|
{
"content_hash": "6dcf917ade1bf79009338e52b2953715",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 78,
"avg_line_length": 30.631067961165048,
"alnum_prop": 0.7004754358161648,
"repo_name": "startcode/apollo",
"id": "cec014bf3de3082e5f7de86c21109423a7ea5618",
"size": "3939",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "modules/tools/mobileye_viewer/mobileye_viewer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "45614"
},
{
"name": "C++",
"bytes": "9361050"
},
{
"name": "CMake",
"bytes": "41261"
},
{
"name": "CSS",
"bytes": "35714"
},
{
"name": "Cuda",
"bytes": "51223"
},
{
"name": "Dockerfile",
"bytes": "1843"
},
{
"name": "HTML",
"bytes": "20313"
},
{
"name": "JavaScript",
"bytes": "304933"
},
{
"name": "Python",
"bytes": "1203616"
},
{
"name": "Shell",
"bytes": "281488"
},
{
"name": "Smarty",
"bytes": "43786"
}
],
"symlink_target": ""
}
|
import numpy as np
import cv2
from LineSegment import LineSegment
from RoadLanes import RoadLanes
class LaneDetector(object):
"""
Given a source image, implements a pipeline of image processing
operations to detect lane division markings on a typical
highway scene.
## Typical operation:
1. call set_image and provide an image for image processing pipeline
2. call one of the pipeline operations. this will trigger all prior
operations to perform
3. artifacts (intermediate images) can be invalid if pipeline step has
not been executed yet
## Pipeline:
image -> roi -> edge detection -> line detection -> lane detection
"""
image = roi_img = gray_img = blurred_img = edges_img = lines_img = lanes_img = None
line_segments = None
h=0
w=0
lane_center_x = 0
is_quiet = False
#for gaussian blur
kernel_size = 13
#limits for lane detection
line_perspective_deg = 35
line_perspective_tolerance = 15
#default roi
margin_left=80
margin_right=37
horizon_height=255
def set_image(self, image):
""" entry point for image pipeline """
self.image = image
self.h = image.shape[0]
self.w = image.shape[1]
if not self.is_quiet:
print("w={},h={}".format(self.w,self.h))
def quiet(self):
self.is_quiet = True
def smooth(self):
""" Apply smoothing algorithm """
#convert image to grayscale
self.gray_img = cv2.cvtColor(self.image, cv2.COLOR_RGB2GRAY)
# Define a kernel size for Gaussian smoothing / blurring
# Note: this step is optional as cv2.Canny() applies a 5x5 Gaussian internally
self.blurred_img = cv2.GaussianBlur(self.gray_img,(self.kernel_size, self.kernel_size), 0)
def find_edges(self, low_threshold=10, high_threshold=50):
""" creates an edge image """
self.smooth()
self.edges_img = cv2.Canny(self.blurred_img, low_threshold, high_threshold)
def find_lines(self, rho=5, theta=np.pi/180, threshold=15, min_line_len=10, max_line_gap=5):
""" uses hough transforms to find lines in the image """
self.find_edges()
#mask area
self.mask_road_roi(self.edges_img)
lines = cv2.HoughLinesP(self.edges_img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
self.line_segments = LineSegment.create_lines(lines)
self.lines_img = np.copy(self.image)
self.draw_lines(self.lines_img, self.line_segments)
def find_lanes(self):
"""
splits collection of lines found earlier in the processing pipeline into
left or right lines (based on slope of the line), then uses a weighted average
stragegy to come up with one lane object
"""
#run pipeline through here
self.find_lines()
#debug image
self.lanes_img = np.copy(self.image)
#find left lines
left_lines = []
lines_on_the_left = [x for x in self.line_segments if x.slope_ascendant()]
if not self.is_quiet:
print("{0} lines on the left".format(len(lines_on_the_left)))
for left_line in lines_on_the_left:
within_tolerance = self.line_within_tolerance(left_line,"left")
if within_tolerance:
self.draw_line(self.lanes_img, left_line.line_vector, [255,0,0], 1)
left_lines.append(left_line)
if not self.is_quiet:
print("left: slope={0:0.4f} ({1:0.2f} deg), len={2:0.2f}: {3} >> {4}".\
format(left_line.slope,left_line.slope_degrees(),left_line.length(),left_line.line_vector,\
within_tolerance))
#find right lines
right_lines = []
lines_on_the_right = [x for x in self.line_segments if x.slope_descendant()]
if not self.is_quiet:
print("{0} lines on the right".format(len(lines_on_the_right)))
for right_line in lines_on_the_right:
within_tolerance = self.line_within_tolerance(right_line,"right")
if within_tolerance:
self.draw_line(self.lanes_img, right_line.line_vector, [0,255,0], 1)
right_lines.append(right_line)
if not self.is_quiet:
print("right: slope={0:0.4f} ({1:0.2f} deg), len={2:0.2f}: {3} XX {4}".\
format(right_line.slope,right_line.slope_degrees(),right_line.length(),right_line.line_vector,\
within_tolerance))
#create model of lane
self.lane = RoadLanes(left_lines, right_lines, self.h, self.h-self.horizon_height+50)
self.draw_lane(self.lanes_img, self.lane)
def line_within_tolerance(self, line_segment, side):
#does the angle seem ok?
lower_angle_limit = self.line_perspective_deg-self.line_perspective_tolerance
upper_angle_limit = self.line_perspective_deg+self.line_perspective_tolerance
within_angular_tol = lower_angle_limit <= line_segment.slope_degrees() <= upper_angle_limit
#is it long enough?
within_len_tol = line_segment.length() > 15
#is it on the correct side of the lane?
if side=="left":
side_ok = line_segment.line_vector[0] < self.lane_center_x
else:
side_ok = line_segment.line_vector[2] > self.lane_center_x
return within_angular_tol and within_len_tol and side_ok
def mask_road_roi(self, image):
"""
creates an ROI in the shape of a triangle intended to
match the target highway that looks like a triangle because
of camera perspective projection
"""
#create lines (y=ax+b) that shape the triangular ROI
#why triangular? that's what typical road lanes disapearing into
#the horizon look like
point_left = [self.margin_left,self.h]
point_right = [self.w-self.margin_right,self.h]
point_horizon = [self.margin_left+(point_right[0]-self.margin_left)/2,self.h-self.horizon_height]
self.lane_center_x = point_left[0] + (point_right[0]-point_left[0])/2
fit_left = np.polyfit((point_left[0], point_horizon[0]), (point_left[1], point_horizon[1]), 1)
fit_right = np.polyfit((point_right[0], point_horizon[0]), (point_right[1], point_horizon[1]), 1)
fit_bottom = np.polyfit((point_left[0], point_right[0]), (point_left[1], point_right[1]), 1)
if not self.is_quiet:
print('point left:{0}, point horizon={1}, point right={2}'.format(point_left, point_horizon, point_right))
# Find the region inside the lines
XX, YY = np.meshgrid(np.arange(0, self.w), np.arange(0, self.h))
region_thresholds = \
(YY > (XX*fit_left[0] + fit_left[1])) & \
(YY > (XX*fit_right[0] + fit_right[1])) & \
(YY < (XX*fit_bottom[0] + fit_bottom[1]))
# Mask region
image[~region_thresholds] = 0
def draw_lines(self, img, lines, color=[255, 0, 0], thickness=2):
for line in lines:
self.draw_line(img, line.line_vector, color, thickness)
def draw_lane(self, img, lane):
if lane.left_line:
self.draw_line(img, lane.left_line.line_vector, [0,0,200], 3)
if lane.right_line:
self.draw_line(img, lane.right_line.line_vector, [0,0,200], 3)
def draw_line(self, img, line, color=[255, 0, 0], thickness=2):
x1 = line[0]
y1 = line[1]
x2 = line[2]
y2 = line[3]
cv2.line(img, (int(x1), int(y1)), (int(x2), int(y2)), color, thickness)
|
{
"content_hash": "cc5599f29673c05ef04504541846857e",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 137,
"avg_line_length": 40.041666666666664,
"alnum_prop": 0.6062695109261186,
"repo_name": "pmerloti/CarND-LaneLines-P1-Merloti",
"id": "14f476d71ffe3880c38d40deabe8f496b3d457bb",
"size": "7688",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LaneDetection/LaneDetector.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13894"
}
],
"symlink_target": ""
}
|
"""Firebase Cloud Storage module.
This module contains utilities for accessing Google Cloud Storage buckets associated with
Firebase apps. This requires the ``google-cloud-storage`` Python module.
"""
# pylint: disable=import-error,no-name-in-module
try:
from google.cloud import storage
except ImportError:
raise ImportError('Failed to import the Cloud Storage library for Python. Make sure '
'to install the "google-cloud-storage" module.')
from firebase_admin import _utils
_STORAGE_ATTRIBUTE = '_storage'
def bucket(name=None, app=None) -> storage.Bucket:
"""Returns a handle to a Google Cloud Storage bucket.
If the name argument is not provided, uses the 'storageBucket' option specified when
initializing the App. If that is also not available raises an error. This function
does not make any RPC calls.
Args:
name: Name of a cloud storage bucket (optional).
app: An App instance (optional).
Returns:
google.cloud.storage.Bucket: A handle to the specified bucket.
Raises:
ValueError: If a bucket name is not specified either via options or method arguments,
or if the specified bucket name is not a valid string.
"""
client = _utils.get_app_service(app, _STORAGE_ATTRIBUTE, _StorageClient.from_app)
return client.bucket(name)
class _StorageClient:
"""Holds a Google Cloud Storage client instance."""
def __init__(self, credentials, project, default_bucket):
self._client = storage.Client(credentials=credentials, project=project)
self._default_bucket = default_bucket
@classmethod
def from_app(cls, app):
credentials = app.credential.get_credential()
default_bucket = app.options.get('storageBucket')
# Specifying project ID is not required, but providing it when available
# significantly speeds up the initialization of the storage client.
return _StorageClient(credentials, app.project_id, default_bucket)
def bucket(self, name=None):
"""Returns a handle to the specified Cloud Storage Bucket."""
bucket_name = name if name is not None else self._default_bucket
if bucket_name is None:
raise ValueError(
'Storage bucket name not specified. Specify the bucket name via the '
'"storageBucket" option when initializing the App, or specify the bucket '
'name explicitly when calling the storage.bucket() function.')
if not bucket_name or not isinstance(bucket_name, str):
raise ValueError(
'Invalid storage bucket name: "{0}". Bucket name must be a non-empty '
'string.'.format(bucket_name))
return self._client.bucket(bucket_name)
|
{
"content_hash": "6b71becf1bb3e9e4eb7c4deaa3668d27",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 91,
"avg_line_length": 40.838235294117645,
"alnum_prop": 0.6845516744688512,
"repo_name": "firebase/firebase-admin-python",
"id": "f3948371c481408561cff3a78b3dd438b3474fba",
"size": "3353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "firebase_admin/storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1142237"
},
{
"name": "Shell",
"bytes": "1682"
}
],
"symlink_target": ""
}
|
import firebase_storage
import location
|
{
"content_hash": "671da7de461251e715d0f4a3d3e749c5",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 23,
"avg_line_length": 20,
"alnum_prop": 0.875,
"repo_name": "alt-locator/alt-python",
"id": "012c4c35bedcc68b558091e5597f84117d61548c",
"size": "40",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/storage/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19629"
}
],
"symlink_target": ""
}
|
from typing import Mapping
from typing import Optional
from typing import Tuple
from typing import Union
import tensorflow as tf
from keras_cv import bounding_box
class ROIGenerator(tf.keras.layers.Layer):
"""
Generates region of interests (ROI, or proposal) from scores.
Mainly used in Region CNN (RCNN) networks.
This works for a multi-level input, both boxes and scores are dictionary
inputs with the same set of keys.
Users can configure top k and threshold differently in train and inference.
Users can choose to combine all levels if NMS across all levels are desired.
The following steps are applied to pair of (boxes, scores):
1) pre_nms_topk scores and boxes sorted and selected per level
2) nms applied and selected post_nms_topk scores and ROIs per level
3) combined scores and ROIs across all levels
4) post_nms_topk scores and ROIs sorted and selected
Args:
bounding_box_format: a case-insensitive string.
For detailed information on the supported format, see the
[KerasCV bounding box documentation](https://keras.io/api/keras_cv/bounding_box/formats/).
pre_nms_topk_train: int. number of top k scoring proposals to keep before applying NMS in training mode.
When RPN is run on multiple feature maps / levels (as in FPN) this number is per
feature map / level.
nms_score_threshold_train: float. score threshold to use for NMS in training mode.
nms_iou_threshold_train: float. IOU threshold to use for NMS in training mode.
post_nms_topk_train: int. number of top k scoring proposals to keep after applying NMS in training mode.
When RPN is run on multiple feature maps / levels (as in FPN) this number is per
feature map / level.
pre_nms_topk_test: int. number of top k scoring proposals to keep before applying NMS in inference mode.
When RPN is run on multiple feature maps / levels (as in FPN) this number is per
feature map / level.
nms_score_threshold_test: float. score threshold to use for NMS in inference mode.
nms_iou_threshold_test: float. IOU threshold to use for NMS in inference mode.
post_nms_topk_test: int. number of top k scoring proposals to keep after applying NMS in inference mode.
When RPN is run on multiple feature maps / levels (as in FPN) this number is per
feature map / level.
Usage:
```python
roi_generator = ROIGenerator("xyxy")
boxes = {2: tf.random.normal([32, 5, 4])}
scores = {2: tf.random.normal([32, 5])}
rois, roi_scores = roi_generator(boxes, scores, training=True)
```
"""
def __init__(
self,
bounding_box_format,
pre_nms_topk_train: int = 2000,
nms_score_threshold_train: float = 0.0,
nms_iou_threshold_train: float = 0.7,
post_nms_topk_train: int = 1000,
pre_nms_topk_test: int = 1000,
nms_score_threshold_test: float = 0.0,
nms_iou_threshold_test: float = 0.7,
post_nms_topk_test: int = 1000,
**kwargs,
):
super().__init__(**kwargs)
self.bounding_box_format = bounding_box_format
self.pre_nms_topk_train = pre_nms_topk_train
self.nms_score_threshold_train = nms_score_threshold_train
self.nms_iou_threshold_train = nms_iou_threshold_train
self.post_nms_topk_train = post_nms_topk_train
self.pre_nms_topk_test = pre_nms_topk_test
self.nms_score_threshold_test = nms_score_threshold_test
self.nms_iou_threshold_test = nms_iou_threshold_test
self.post_nms_topk_test = post_nms_topk_test
self.built = True
def call(
self,
multi_level_boxes: Union[tf.Tensor, Mapping[int, tf.Tensor]],
multi_level_scores: Union[tf.Tensor, Mapping[int, tf.Tensor]],
training: Optional[bool] = None,
) -> Tuple[tf.Tensor, tf.Tensor]:
"""
Args:
multi_level_boxes: float Tensor. A dictionary or single Tensor of boxes, one per level. shape is
[batch_size, num_boxes, 4] each level, in `bounding_box_format`.
The boxes from RPNs are usually encoded as deltas w.r.t to anchors,
they need to be decoded before passing in here.
multi_level_scores: float Tensor. A dictionary or single Tensor of scores, usually confidence scores,
one per level. shape is [batch_size, num_boxes] each level.
Returns:
rois: float Tensor of [batch_size, post_nms_topk, 4]
roi_scores: float Tensor of [batch_size, post_nms_topk]
"""
if training:
pre_nms_topk = self.pre_nms_topk_train
post_nms_topk = self.post_nms_topk_train
nms_score_threshold = self.nms_score_threshold_train
nms_iou_threshold = self.nms_iou_threshold_train
else:
pre_nms_topk = self.pre_nms_topk_test
post_nms_topk = self.post_nms_topk_test
nms_score_threshold = self.nms_score_threshold_test
nms_iou_threshold = self.nms_iou_threshold_test
def per_level_gen(boxes, scores):
scores_shape = scores.get_shape().as_list()
# scores can also be [batch_size, num_boxes, 1]
if len(scores_shape) == 3:
scores = tf.squeeze(scores, axis=-1)
_, num_boxes = scores.get_shape().as_list()
level_pre_nms_topk = min(num_boxes, pre_nms_topk)
level_post_nms_topk = min(num_boxes, post_nms_topk)
scores, sorted_indices = tf.nn.top_k(
scores, k=level_pre_nms_topk, sorted=True
)
boxes = tf.gather(boxes, sorted_indices, batch_dims=1)
# convert from input format to yxyx for the TF NMS operation
boxes = bounding_box.convert_format(
boxes,
source=self.bounding_box_format,
target="yxyx",
)
# TODO(tanzhenyu): consider supporting soft / batched nms for accl
selected_indices, num_valid = tf.image.non_max_suppression_padded(
boxes,
scores,
max_output_size=level_post_nms_topk,
iou_threshold=nms_iou_threshold,
score_threshold=nms_score_threshold,
pad_to_max_output_size=True,
sorted_input=True,
canonicalized_coordinates=True,
)
# convert back to input format
boxes = bounding_box.convert_format(
boxes,
source="yxyx",
target=self.bounding_box_format,
)
level_rois = tf.gather(boxes, selected_indices, batch_dims=1)
level_roi_scores = tf.gather(scores, selected_indices, batch_dims=1)
level_rois = level_rois * tf.cast(
tf.reshape(tf.range(level_post_nms_topk), [1, -1, 1])
< tf.reshape(num_valid, [-1, 1, 1]),
level_rois.dtype,
)
level_roi_scores = level_roi_scores * tf.cast(
tf.reshape(tf.range(level_post_nms_topk), [1, -1])
< tf.reshape(num_valid, [-1, 1]),
level_roi_scores.dtype,
)
return level_rois, level_roi_scores
if not isinstance(multi_level_boxes, dict):
return per_level_gen(multi_level_boxes, multi_level_scores)
rois = []
roi_scores = []
for level in sorted(multi_level_scores.keys()):
boxes = multi_level_boxes[level]
scores = multi_level_scores[level]
level_rois, level_roi_scores = per_level_gen(boxes, scores)
rois.append(level_rois)
roi_scores.append(level_roi_scores)
rois = tf.concat(rois, axis=1)
roi_scores = tf.concat(roi_scores, axis=1)
_, num_valid_rois = roi_scores.get_shape().as_list()
overall_top_k = min(num_valid_rois, post_nms_topk)
roi_scores, sorted_indices = tf.nn.top_k(
roi_scores, k=overall_top_k, sorted=True
)
rois = tf.gather(rois, sorted_indices, batch_dims=1)
return rois, roi_scores
def get_config(self):
config = {
"bounding_box_format": self.bounding_box_format,
"pre_nms_topk_train": self.pre_nms_topk_train,
"nms_score_threshold_train": self.nms_score_threshold_train,
"nms_iou_threshold_train": self.nms_iou_threshold_train,
"post_nms_topk_train": self.post_nms_topk_train,
"pre_nms_topk_test": self.pre_nms_topk_test,
"nms_score_threshold_test": self.nms_score_threshold_test,
"nms_iou_threshold_test": self.nms_iou_threshold_test,
"post_nms_topk_test": self.post_nms_topk_test,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
{
"content_hash": "faac6a2f8b8594125965106ebc2845a2",
"timestamp": "",
"source": "github",
"line_count": 202,
"max_line_length": 112,
"avg_line_length": 44.67821782178218,
"alnum_prop": 0.6068698060941828,
"repo_name": "keras-team/keras-cv",
"id": "88be147b2ebaa7d791c9ce773ac9f4bfa921bbfe",
"size": "9610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keras_cv/layers/object_detection/roi_generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "20378"
},
{
"name": "Dockerfile",
"bytes": "420"
},
{
"name": "Jsonnet",
"bytes": "967"
},
{
"name": "Jupyter Notebook",
"bytes": "24377"
},
{
"name": "Python",
"bytes": "1606495"
},
{
"name": "Shell",
"bytes": "4249"
},
{
"name": "Smarty",
"bytes": "535"
},
{
"name": "Starlark",
"bytes": "10259"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Package.featured'
db.add_column('packages_package', 'featured',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Package.featured'
db.delete_column('packages_package', 'featured')
models = {
'packages.changelog': {
'Meta': {'object_name': 'ChangeLog'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime(2012, 1, 29, 9, 18, 51, 90400)'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime(2012, 1, 29, 9, 18, 51, 90509)'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['packages.Package']"}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['packages.Release']", 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'packages.package': {
'Meta': {'object_name': 'Package'},
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime(2012, 1, 29, 9, 18, 51, 94873)'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime(2012, 1, 29, 9, 18, 51, 94977)'}),
'name': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '150'})
},
'packages.packageuri': {
'Meta': {'unique_together': "(['package', 'uri'],)", 'object_name': 'PackageURI'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'package_links'", 'to': "orm['packages.Package']"}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '400'})
},
'packages.release': {
'Meta': {'unique_together': "(('package', 'version'),)", 'object_name': 'Release'},
'author': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'author_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'classifiers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'releases'", 'blank': 'True', 'to': "orm['packages.TroveClassifier']"}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime(2012, 1, 29, 9, 18, 51, 90976)', 'db_index': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'download_uri': ('django.db.models.fields.URLField', [], {'max_length': '1024', 'blank': 'True'}),
'frequency': ('django.db.models.fields.CharField', [], {'default': "'hourly'", 'max_length': '25'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'license': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'maintainer_email': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime(2012, 1, 29, 9, 18, 51, 91115)'}),
'order': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'package': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'releases'", 'to': "orm['packages.Package']"}),
'platform': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'raw_data': ('crate.fields.json.JSONField', [], {'null': 'True', 'blank': 'True'}),
'requires_python': ('django.db.models.fields.CharField', [], {'max_length': '25', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'packages.releasefile': {
'Meta': {'unique_together': "(('release', 'type', 'python_version', 'filename'),)", 'object_name': 'ReleaseFile'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('model_utils.fields.AutoCreatedField', [], {'default': 'datetime.datetime(2012, 1, 29, 9, 18, 51, 93155)', 'db_index': 'True'}),
'digest': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'downloads': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '512'}),
'filename': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('model_utils.fields.AutoLastModifiedField', [], {'default': 'datetime.datetime(2012, 1, 29, 9, 18, 51, 93261)'}),
'python_version': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': "orm['packages.Release']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '25'})
},
'packages.releaseobsolete': {
'Meta': {'object_name': 'ReleaseObsolete'},
'environment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'obsoletes'", 'to': "orm['packages.Release']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'packages.releaseprovide': {
'Meta': {'object_name': 'ReleaseProvide'},
'environment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'provides'", 'to': "orm['packages.Release']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'packages.releaserequire': {
'Meta': {'object_name': 'ReleaseRequire'},
'environment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'requires'", 'to': "orm['packages.Release']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'packages.releaseuri': {
'Meta': {'object_name': 'ReleaseURI'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'release': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'uris'", 'to': "orm['packages.Release']"}),
'uri': ('django.db.models.fields.URLField', [], {'max_length': '500'})
},
'packages.troveclassifier': {
'Meta': {'object_name': 'TroveClassifier'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'trove': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '350'})
}
}
complete_apps = ['packages']
|
{
"content_hash": "03c2ab5cc43761473bd71d0ecaf2f4cf",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 197,
"avg_line_length": 73.50819672131148,
"alnum_prop": 0.5519625334522748,
"repo_name": "crate-archive/crate-site",
"id": "b9409187082fcd544128be54129502b8fcac8a47",
"size": "8992",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "crateweb/apps/packages/migrations/0005_auto__add_field_package_featured.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "JavaScript",
"bytes": "8467"
},
{
"name": "Python",
"bytes": "633678"
}
],
"symlink_target": ""
}
|
class MockECR(object):
@property
def aws_account_id(self):
return '12345678'
@property
def registry(self):
return []
@property
def project_repo(self):
return 'nginxdemos'
def project_repo_exists(self):
return True
def create_project_repo(self):
return True
def get_login(self):
return ''
def get_image_by_tag(self, tag):
return 'latest'
@property
def images(self):
return []
def tag_exists(self, tag):
return True
def find_git_sha1_image_tag(self, tag):
return 'latest'
def retag(self, tag, new_tag):
pass
|
{
"content_hash": "c6ba387035d5178c136c67bd0243953a",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 41,
"avg_line_length": 15.945945945945946,
"alnum_prop": 0.6372881355932203,
"repo_name": "izakp/hokusai",
"id": "025fc73ad2514281889c699c88b7fe0f741c9bcc",
"size": "590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/mocks/mock_ecr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "599"
},
{
"name": "Makefile",
"bytes": "3484"
},
{
"name": "Python",
"bytes": "134404"
}
],
"symlink_target": ""
}
|
input_data = {
'permission_sets': {
'developer': {
'permissions': ['access_data', 'can_see_system_activity', 'see_dashboards', 'clear_cache_refresh', 'create_table_calculations', 'deploy', 'develop', 'download_without_limit', 'explore', 'manage_spaces', 'mobile_app_access', 'save_content', 'schedule_look_emails', 'see_drill_overlay', 'see_lookml', 'see_sql', 'see_user_dashboards', 'send_to_integration', 'use_sql_runner']},
'user': {
'permissions': ['access_data', 'clear_cache_refresh', 'create_table_calculations', 'download_without_limit', 'explore', 'manage_spaces', 'mobile_app_access', 'save_content', 'schedule_look_emails', 'see_drill_overlay', 'see_lookml', 'see_lookml_dashboards', 'see_looks', 'see_sql', 'see_user_dashboards', 'send_to_integration']}},
'model_sets': {
'lk1_test_set': {
'models': ['test', 'test2']},
'lk4_huggy': {
'models': ['test', 'test1', 'test2']}},
'roles': {
'BusinessOperations_Developer': {
'permissions_set': 'test_perm1',
'model_set': 'developer',
'team': ['BusinessOperations_BO_Dev', 'DrewEdit']},
'BusinessOperations_User': {
'permissions': 'user',
'model_set': 'lk4_huggy',
'team': ['BusinessOperations', 'Freddy']}},
'folder_permissions': {
'business_operations_folder': [{
'name': 'Business Operations',
'team_view': ['BusinessOperations', 'Snaptest'],
'subfolder': [{
'name': 'test_sub',
'team_edit': ['Freddy'],
'team_view': ['hugo']},
{'name': 'test_sub2',
'subfolder': [{
'name': 'test_sub_sub',
'team_edit': ['Famke'],
'team_view': ['hugle']},
{'name': 'subdiddy',
'subfolder': [{
'name': 'hugle_testy',
'team_edit': ['Freddy'],
'team_view': ['hugle']}]}]}]}],
'sexy_time_folder': [{
'name': 'sexy time',
'team_edit': ['sexy_group1'],
'team_view': ['sexy_group2'],
'subfolder': [{
'name': 'sexy_sub'}]}],
'suffering_succotash_folder': [{
'name': 'suffering succotash',
'team_edit': ['sexy_group1'],
'team_view': ['sexy_group2'],
'subfolder': [{
'name': 'another_one',
'team_edit': ['sexy_group1', 'new_group'],
'team_view': ['newer_group']}]}]},
'user_attributes': {
'region_all': {
'type': 'string',
'hidden_value': 'false',
'user_view': 'true',
'user_edit': 'false',
'value': ['us', 'ag', 'bb', 'dd'],
'team': ['Cameos', 'Freddy', 'AudreyGroup']},
'region_testy': {
'type': 'string',
'hidden_value': 'false',
'user_view': 'true',
'user_edit': 'false',
'value': ['us', 'ag', 'bb'],
'team': ['Cameos', 'AudreyGroup']},
'can_see_hugo': {
'type': 'string',
'hidden_value': 'false',
'user_view': 'true',
'user_edit': 'false',
'value': ['No'],
'team': ['Cameos', 'CanSeeDAUGroup']}}}
fake_permission_set = [
{'role_name': 'BODevelopers',
'permission': ['access_data', 'use_sql_runner'],
'model_set_value': [
{'name': 'lk1_test_set',
'models': ['test', 'test2']}],
'teams': ['BusinessOperations_BO_Dev']}]
class MockSDK():
def search_folders(self):
pass
def create_folder(self):
pass
def search_groups(self):
pass
def create_group(self):
pass
def create_permission_set(self):
pass
def search_permission_sets(self):
pass
def update_permission_set(self):
pass
class MockSearchGroup():
def __init__(self, group_name):
self.group_name = group_name
class MockCreateGroup():
def __init__(self, id, name):
self.id = id
self.name = name
class MockSearchFolder():
def __init__(self, parent_id, name, id):
self.parent_id = parent_id
self.name = name
self.id = id
class MockCreateFolder():
def __init__(self, id, name, content_metadata_id):
self.id = id
|
{
"content_hash": "ebeda09026f021eb6d1a64e97903104b",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 387,
"avg_line_length": 35.6,
"alnum_prop": 0.47234226447709593,
"repo_name": "looker-open-source/lmanage",
"id": "fba55312d4f7e26e07ad2e62ca84e64559d9d0fe",
"size": "4628",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/fake_methods_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "LookML",
"bytes": "154186"
},
{
"name": "Python",
"bytes": "128745"
}
],
"symlink_target": ""
}
|
from sonmano.consume import *
|
{
"content_hash": "4270855bee721d512950447147d7014e",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 29,
"avg_line_length": 30,
"alnum_prop": 0.8,
"repo_name": "sonata-nfv/son-mano-framework",
"id": "3ab3c602b6028e493dc2555a4f72aa563e29e673",
"size": "1484",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "client/sonmano/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "12205"
},
{
"name": "Python",
"bytes": "598734"
},
{
"name": "Shell",
"bytes": "43602"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import re
import os
import sys
import platform
import contextlib
import subprocess
import collections
if sys.version_info.major == 3:
import urllib.request as urllib
import io
raw_input = input
else:
import urllib2 as urllib
import io
import zipfile
import tarfile
def best_fit(options):
options = collections.Counter(options)
def give_points(info):
for key in options:
if info.lower() in key.lower():
options[key] += 1
print('Gathering information...')
system = platform.system()
print(' SYSTEM: ', system)
give_points(system)
compiler = platform.python_compiler()
compiler = re.search('(Apple|GC|Visual|MS|Sun)C', compiler).group()
print(' COMPILER: ', compiler)
give_points(compiler)
processor = platform.processor()
print(' PROCESSOR:', processor)
give_points(processor)
machine = '64bit' if sys.maxsize > 2**32 else '32bit'
print(' MACHINE: ', machine)
give_points(machine)
winner = options.most_common(1)[0][0]
print('Best option:', winner)
return winner
def download(url, chunksize=2**20):
def _ipercentage(size):
chunks = 1
yield '{:0>6.2f}%'.format(0)
while True:
percentage = min(100, 100.0 * chunks * chunksize / size)
yield '{}{:0>6.2f}%'.format('\b' * 7, percentage)
chunks += 1
def _ianimation():
animation = '.:.....'
yield animation
while True:
animation = ''.join(['\b' * 7, animation[1:], animation[:-1]])
yield animation
print('Downloading: {}'.format(url))
# Open url
response = urllib.urlopen(url)
with contextlib.closing(response):
# Validate response
if response.code != 200:
print('Http error', response.msg)
raise SystemExit(1)
# Generate progress indicator
try:
size = int(response.headers['content-length'])
visual_provider = _ipercentage(size)
except KeyError:
visual_provider = _ianimation()
# Download
fakefile = io.BytesIO()
print(next(visual_provider), end='')
sys.stdout.flush()
while True:
chunk = response.read(chunksize)
if not chunk:
break
fakefile.write(chunk)
print(next(visual_provider), end='')
sys.stdout.flush()
print()
fakefile.seek(0)
return fakefile
def unpack(filelike, path, zip):
print('Unpacking to: {}'.format(path))
if zip:
zipfile.ZipFile(filelike).extractall(path)
else:
cmd = 'gunzip | tar xC ' + path
proc = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE)
proc.stdin.write(filelike.read())
def main(path=None, yesno=None):
ROOT_URL = 'http://naif.jpl.nasa.gov/pub/naif/toolkit/C'
PLATFORM_URLS = [
'MacIntel_OSX_AppleC_32bit',
'MacIntel_OSX_AppleC_64bit',
'MacPPC_OSX_AppleC_32bit',
'PC_Cygwin_GCC_32bit',
'PC_Linux_GCC_32bit',
'PC_Linux_GCC_64bit',
'PC_Windows_VisualC_32bit',
'PC_Windows_VisualC_64bit',
'SunIntel_Solaris_SunC_32bit',
'SunIntel_Solaris_SunC_64bit',
'SunSPARC_Solaris_GCC_32bit',
'SunSPARC_Solaris_GCC_64bit',
'SunSPARC_Solaris_SunC_32bit',
'SunSPARC_Solaris_SunC_64bit']
platform_url = best_fit(PLATFORM_URLS)
use_zip = platform_url.startswith('PC_Windows')
file_url = 'packages/cspice{0}'.format(('.zip' if use_zip else '.tar.Z'))
result = '/'.join([ROOT_URL, platform_url, file_url])
if yesno is None:
yesno = raw_input('Do you want to download it? [y/n] ')
for char in 'nN':
if yesno.startswith(char):
raise SystemExit(0)
elif not yesno:
raise SystemExit(0)
### DOWNLOAD AND UNPACK BEST PACKAGE ###
ROOT_DIR = path or os.path.realpath(os.path.dirname(__file__))
filelike = download(result)
with contextlib.closing(filelike):
unpack(filelike, ROOT_DIR, use_zip)
print('Done')
if __name__ == '__main__':
main()
|
{
"content_hash": "b7b2f448c45665cd3c7de67ddff364db",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 77,
"avg_line_length": 27.69281045751634,
"alnum_prop": 0.5888600424828888,
"repo_name": "DaRasch/spiceminer",
"id": "1307d08288a39b2c320368f109947d7df1134e1d",
"size": "4376",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "utils/getcspice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5725"
},
{
"name": "Python",
"bytes": "368433"
}
],
"symlink_target": ""
}
|
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, InternetGateway
class TestDescribeInternetGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return """
<DescribeInternetGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<internetGatewaySet>
<item>
<internetGatewayId>igw-eaad4883EXAMPLE</internetGatewayId>
<attachmentSet>
<item>
<vpcId>vpc-11ad4878</vpcId>
<state>available</state>
</item>
</attachmentSet>
<tagSet/>
</item>
</internetGatewaySet>
</DescribeInternetGatewaysResponse>
"""
def test_describe_internet_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_internet_gateways(
'igw-eaad4883EXAMPLE', filters=[('attachment.state', ['available', 'pending'])])
self.assert_request_parameters({
'Action': 'DescribeInternetGateways',
'InternetGatewayId.1': 'igw-eaad4883EXAMPLE',
'Filter.1.Name': 'attachment.state',
'Filter.1.Value.1': 'available',
'Filter.1.Value.2': 'pending'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 1)
self.assertIsInstance(api_response[0], InternetGateway)
self.assertEqual(api_response[0].id, 'igw-eaad4883EXAMPLE')
class TestCreateInternetGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return """
<CreateInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<internetGateway>
<internetGatewayId>igw-eaad4883</internetGatewayId>
<attachmentSet/>
<tagSet/>
</internetGateway>
</CreateInternetGatewayResponse>
"""
def test_create_internet_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_internet_gateway()
self.assert_request_parameters({
'Action': 'CreateInternetGateway'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, InternetGateway)
self.assertEqual(api_response.id, 'igw-eaad4883')
class TestDeleteInternetGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return """
<DeleteInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteInternetGatewayResponse>
"""
def test_delete_internet_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_internet_gateway('igw-eaad4883')
self.assert_request_parameters({
'Action': 'DeleteInternetGateway',
'InternetGatewayId': 'igw-eaad4883'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestAttachInternetGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return """
<AttachInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</AttachInternetGatewayResponse>
"""
def test_attach_internet_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.attach_internet_gateway(
'igw-eaad4883', 'vpc-11ad4878')
self.assert_request_parameters({
'Action': 'AttachInternetGateway',
'InternetGatewayId': 'igw-eaad4883',
'VpcId': 'vpc-11ad4878'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestDetachInternetGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return """
<DetachInternetGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DetachInternetGatewayResponse>
"""
def test_detach_internet_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.detach_internet_gateway(
'igw-eaad4883', 'vpc-11ad4878')
self.assert_request_parameters({
'Action': 'DetachInternetGateway',
'InternetGatewayId': 'igw-eaad4883',
'VpcId': 'vpc-11ad4878'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "edc11c035650a3b9ce2b0b2b42fa63fb",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 95,
"avg_line_length": 39.9671052631579,
"alnum_prop": 0.5902880658436214,
"repo_name": "IsCoolEntertainment/debpkg_python-boto",
"id": "35f78fe094d9d018c2fafa33efae018705548b99",
"size": "6075",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "tests/unit/vpc/test_internetgateway.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3224"
},
{
"name": "Python",
"bytes": "5119165"
},
{
"name": "Shell",
"bytes": "3071"
}
],
"symlink_target": ""
}
|
from pathlib import Path
from setuptools import find_packages, setup
rootpath = Path(__file__).parent.absolute()
def read(*parts):
return open(rootpath.joinpath(*parts), "r").read()
with open("requirements.txt") as f:
require = f.readlines()
install_requires = [r.strip() for r in require]
setup(
name="gridgeo",
python_requires='>=3.6',
description="Convert UGRID, SGRID, and non-compliant ocean model grids to geo-like objects", # noqa
license="BSD-3-Clause",
long_description=f'{read("README.md")}',
long_description_content_type="text/markdown",
author="Filipe Fernandes",
author_email="ocefpaf@gmail.com",
url="https://github.com/pyoceans/gridgeo",
keywords=["geojson", "shapefile", "ocean models", "ugrid", "sgrid"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: BSD License",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
platforms="any",
packages=find_packages(),
extras_require={"testing": ["pytest"]},
install_requires=install_requires,
entry_points={"console_scripts": ["gridio = gridgeo.gridio:main"]},
use_scm_version={
"write_to": "gridgeo/_version.py",
"write_to_template": '__version__ = "{version}"',
"tag_regex": r"^(?P<prefix>v)?(?P<version>[^\+]+)(?P<suffix>.*)?$",
},
setup_requires=['setuptools_scm'],
)
|
{
"content_hash": "62cfc14ffd99995a16cf72511841026b",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 104,
"avg_line_length": 34.680851063829785,
"alnum_prop": 0.6251533742331289,
"repo_name": "pyoceans/gridgeo",
"id": "d06f78b89186b4121ee807fee56ad81f6cf7a711",
"size": "1630",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1898300"
},
{
"name": "Makefile",
"bytes": "404"
},
{
"name": "Python",
"bytes": "32100"
}
],
"symlink_target": ""
}
|
"""
An extension for a heartbeat keyword following the convention used by KTL in the wild
"""
from __future__ import absolute_import
from Cauldron.types import Integer, DispatcherKeywordType
__all__ = ['HeartbeatKeyword']
class HeartbeatKeyword(Integer, DispatcherKeywordType):
"""This keyword will update with a period to identify a dispatcher as alive.
"""
KTL_REGISTERED = False
KTL_TYPE = 'integer'
def __init__(self, *args, **kwargs):
kwargs['initial'] = '0'
kwargs['period'] = 1
super(HeartbeatKeyword, self).__init__(*args, **kwargs)
def read(self):
"""Read this keyword"""
self.increment()
return self.value
# We don't have to do anything else here.
|
{
"content_hash": "19cea9e78c5edb0c9383f63bdf6701ea",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 85,
"avg_line_length": 26.586206896551722,
"alnum_prop": 0.6264591439688716,
"repo_name": "alexrudy/Cauldron",
"id": "7dae1d4f885b944dc7dd20ee48d5ec4b028ab661",
"size": "795",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Cauldron/ext/heartbeat/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "840330"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import os
try:
import json; json
except ImportError:
# test skipped later
json = None
from mapproxy.test.image import img_from_buf
from mapproxy.test.http import mock_single_req_httpd
from mapproxy.test.system import module_setup, module_teardown, SystemTest, make_base_config
from mapproxy.request.wms import WMS111MapRequest, WMS111FeatureInfoRequest, WMS111CapabilitiesRequest
from mapproxy.test.helper import validate_with_dtd
from mapproxy.test.http import mock_httpd
from mapproxy.test.image import create_tmp_image
from mapproxy.test.system.test_wms import is_111_exception
from mapproxy.util.fs import ensure_directory
from mapproxy.cache.renderd import has_renderd_support
from nose.tools import eq_
from nose.plugins.skip import SkipTest
test_config = {}
base_config = make_base_config(test_config)
def setup_module():
if not has_renderd_support():
raise SkipTest("requests required")
module_setup(test_config, 'renderd_client.yaml', with_cache_data=True)
def teardown_module():
module_teardown(test_config)
try:
from http.server import BaseHTTPRequestHandler
except ImportError:
from BaseHTTPServer import BaseHTTPRequestHandler
class TestWMS111(SystemTest):
config = test_config
def setup(self):
SystemTest.setup(self)
self.common_req = WMS111MapRequest(url='/service?', param=dict(service='WMS',
version='1.1.1'))
self.common_map_req = WMS111MapRequest(url='/service?', param=dict(service='WMS',
version='1.1.1', bbox='-180,0,0,80', width='200', height='200',
layers='wms_cache', srs='EPSG:4326', format='image/png',
exceptions='xml',
styles='', request='GetMap'))
self.common_fi_req = WMS111FeatureInfoRequest(url='/service?',
param=dict(x='10', y='20', width='200', height='200', layers='wms_cache',
format='image/png', query_layers='wms_cache', styles='',
bbox='1000,400,2000,1400', srs='EPSG:900913'))
def test_wms_capabilities(self):
req = WMS111CapabilitiesRequest(url='/service?').copy_with_request_params(self.common_req)
resp = self.app.get(req)
eq_(resp.content_type, 'application/vnd.ogc.wms_xml')
xml = resp.lxml
eq_(xml.xpath('//GetMap//OnlineResource/@xlink:href',
namespaces=dict(xlink="http://www.w3.org/1999/xlink"))[0],
'http://localhost/service?')
layer_names = set(xml.xpath('//Layer/Layer/Name/text()'))
expected_names = set(['direct', 'wms_cache',
'tms_cache'])
eq_(layer_names, expected_names)
assert validate_with_dtd(xml, dtd_name='wms/1.1.1/WMS_MS_Capabilities.dtd')
def test_get_map(self):
test_self = self
class req_handler(BaseHTTPRequestHandler):
def do_POST(self):
length = int(self.headers['content-length'])
json_data = self.rfile.read(length)
task = json.loads(json_data.decode('utf-8'))
eq_(task['command'], 'tile')
# request main tile of metatile
eq_(task['tiles'], [[15, 17, 5]])
eq_(task['cache_identifier'], 'wms_cache_GLOBAL_MERCATOR')
eq_(task['priority'], 100)
# this id should not change for the same tile/cache_identifier combination
eq_(task['id'], 'aeb52b506e4e82d0a1edf649d56e0451cfd5862c')
# manually create tile renderd should create
tile_filename = os.path.join(test_self.config['cache_dir'],
'wms_cache_EPSG900913/05/000/000/016/000/000/016.jpeg')
ensure_directory(tile_filename)
with open(tile_filename, 'wb') as f:
f.write(create_tmp_image((256, 256), format='jpeg', color=(255, 0, 100)))
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(b'{"status": "ok"}')
def log_request(self, code, size=None):
pass
with mock_single_req_httpd(('localhost', 42423), req_handler):
self.common_map_req.params['bbox'] = '0,0,9,9'
resp = self.app.get(self.common_map_req)
img = img_from_buf(resp.body)
main_color = sorted(img.convert('RGBA').getcolors())[-1]
# check for red color (jpeg/png conversion requires fuzzy comparision)
assert main_color[0] == 40000
assert main_color[1][0] > 250
assert main_color[1][1] < 5
assert 95 < main_color[1][2] < 105
assert main_color[1][3] == 255
eq_(resp.content_type, 'image/png')
self.created_tiles.append('wms_cache_EPSG900913/05/000/000/016/000/000/016.jpeg')
def test_get_map_error(self):
class req_handler(BaseHTTPRequestHandler):
def do_POST(self):
length = int(self.headers['content-length'])
json_data = self.rfile.read(length)
task = json.loads(json_data.decode('utf-8'))
eq_(task['command'], 'tile')
# request main tile of metatile
eq_(task['tiles'], [[15, 17, 5]])
eq_(task['cache_identifier'], 'wms_cache_GLOBAL_MERCATOR')
eq_(task['priority'], 100)
# this id should not change for the same tile/cache_identifier combination
eq_(task['id'], 'aeb52b506e4e82d0a1edf649d56e0451cfd5862c')
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(b'{"status": "error", "error_message": "barf"}')
def log_request(self, code, size=None):
pass
with mock_single_req_httpd(('localhost', 42423), req_handler):
self.common_map_req.params['bbox'] = '0,0,9,9'
resp = self.app.get(self.common_map_req)
eq_(resp.content_type, 'application/vnd.ogc.se_xml')
is_111_exception(resp.lxml, re_msg='Error from renderd: barf')
def test_get_map_connection_error(self):
self.common_map_req.params['bbox'] = '0,0,9,9'
resp = self.app.get(self.common_map_req)
eq_(resp.content_type, 'application/vnd.ogc.se_xml')
is_111_exception(resp.lxml, re_msg='Error while communicating with renderd:')
def test_get_map_non_json_response(self):
class req_handler(BaseHTTPRequestHandler):
def do_POST(self):
length = int(self.headers['content-length'])
json_data = self.rfile.read(length)
json.loads(json_data.decode('utf-8'))
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(b'{"invalid')
def log_request(self, code, size=None):
pass
with mock_single_req_httpd(('localhost', 42423), req_handler):
self.common_map_req.params['bbox'] = '0,0,9,9'
resp = self.app.get(self.common_map_req)
eq_(resp.content_type, 'application/vnd.ogc.se_xml')
is_111_exception(resp.lxml, re_msg='Error while communicating with renderd: invalid JSON')
def test_get_featureinfo(self):
expected_req = ({'path': r'/service?LAYERs=foo,bar&SERVICE=WMS&FORMAT=image%2Fpng'
'&REQUEST=GetFeatureInfo&HEIGHT=200&SRS=EPSG%3A900913'
'&VERSION=1.1.1&BBOX=1000.0,400.0,2000.0,1400.0&styles='
'&WIDTH=200&QUERY_LAYERS=foo,bar&X=10&Y=20&feature_count=100'},
{'body': b'info', 'headers': {'content-type': 'text/plain'}})
with mock_httpd(('localhost', 42423), [expected_req]):
self.common_fi_req.params['feature_count'] = 100
resp = self.app.get(self.common_fi_req)
eq_(resp.content_type, 'text/plain')
eq_(resp.body, b'info')
class TestTiles(SystemTest):
config = test_config
def test_get_tile(self):
test_self = self
class req_handler(BaseHTTPRequestHandler):
def do_POST(self):
length = int(self.headers['content-length'])
json_data = self.rfile.read(length)
task = json.loads(json_data.decode('utf-8'))
eq_(task['command'], 'tile')
eq_(task['tiles'], [[10, 20, 6]])
eq_(task['cache_identifier'], 'tms_cache_GLOBAL_MERCATOR')
eq_(task['priority'], 100)
# this id should not change for the same tile/cache_identifier combination
eq_(task['id'], 'cf35c1c927158e188d8fbe0db380c1772b536da9')
# manually create tile renderd should create
tile_filename = os.path.join(test_self.config['cache_dir'],
'tms_cache_EPSG900913/06/000/000/010/000/000/020.png')
ensure_directory(tile_filename)
with open(tile_filename, 'wb') as f:
f.write(b"foobaz")
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(b'{"status": "ok"}')
def log_request(self, code, size=None):
pass
with mock_single_req_httpd(('localhost', 42423), req_handler):
resp = self.app.get('/tiles/tms_cache/EPSG900913/6/10/20.png')
eq_(resp.content_type, 'image/png')
eq_(resp.body, b'foobaz')
self.created_tiles.append('tms_cache_EPSG900913/06/000/000/010/000/000/020.png')
def test_get_tile_error(self):
class req_handler(BaseHTTPRequestHandler):
def do_POST(self):
length = int(self.headers['content-length'])
json_data = self.rfile.read(length)
task = json.loads(json_data.decode('utf-8'))
eq_(task['command'], 'tile')
eq_(task['tiles'], [[10, 20, 7]])
eq_(task['cache_identifier'], 'tms_cache_GLOBAL_MERCATOR')
eq_(task['priority'], 100)
# this id should not change for the same tile/cache_identifier combination
eq_(task['id'], 'c24b8c3247afec34fd0a53e5d3706e977877ef47')
self.send_response(200)
self.send_header('Content-type', 'application/json')
self.end_headers()
self.wfile.write(b'{"status": "error", "error_message": "you told me to fail"}')
def log_request(self, code, size=None):
pass
with mock_single_req_httpd(('localhost', 42423), req_handler):
resp = self.app.get('/tiles/tms_cache/EPSG900913/7/10/20.png', status=500)
eq_(resp.content_type, 'text/plain')
eq_(resp.body, b'Error from renderd: you told me to fail')
|
{
"content_hash": "9d969ec977bc86a789c2aa6f29fba13e",
"timestamp": "",
"source": "github",
"line_count": 254,
"max_line_length": 102,
"avg_line_length": 44.125984251968504,
"alnum_prop": 0.5781584582441114,
"repo_name": "faegi/mapproxy",
"id": "de76b12af00685963e87de14c7b49b7f2fac3b16",
"size": "11857",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "mapproxy/test/system/test_renderd_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12401"
},
{
"name": "HTML",
"bytes": "18261"
},
{
"name": "Makefile",
"bytes": "1045"
},
{
"name": "Python",
"bytes": "1589733"
}
],
"symlink_target": ""
}
|
import json
import logging
import os
from io import StringIO
from django.core.management import call_command
import pytest
from demo.models import SimpleConcurrentModel
logger = logging.getLogger(__name__)
@pytest.mark.django_db()
def test_dumpdata():
SimpleConcurrentModel.objects.create()
out = StringIO()
call_command('dumpdata', 'demo', stdout=out)
data = json.loads(out.getvalue())
assert data
@pytest.mark.django_db(transaction=True)
def test_loaddata_fail():
datafile = os.path.join(os.path.dirname(__file__), 'dumpdata.json')
with open(datafile, 'r') as f:
data = json.load(f)
pk = data[0]['pk']
call_command('loaddata', datafile, stdout=StringIO())
assert SimpleConcurrentModel.objects.get(id=pk).username == 'loaded'
|
{
"content_hash": "85dbd04c5727234d45657ef8fddd7361",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 72,
"avg_line_length": 23.78787878787879,
"alnum_prop": 0.7031847133757961,
"repo_name": "saxix/django-concurrency",
"id": "e2b93dc17b33589c7f4011ca73e548943a3c1d58",
"size": "785",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/test_loaddata_dumpdata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2902"
},
{
"name": "Makefile",
"bytes": "1575"
},
{
"name": "Python",
"bytes": "162899"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
class MinimalEmailHandler:
'''
A simplistic outgoing email handler, to consolidate outgoing
mail sending needs of impact-api calls
attachment, if any, should be a triple of (filename, content, filetype)
attach_alternative, if any, should be a tuple of
(attachment_text, MIME_type)
(generally used for html version of the email)
'''
def __init__(self,
to,
subject,
body,
from_email=None,
bcc=None,
attachment=None,
attach_alternative=None):
self.email = EmailMultiAlternatives(
subject,
body,
to=to,
bcc=bcc or [settings.BCC_EMAIL],
from_email=from_email or settings.NO_REPLY_EMAIL)
if attachment:
self.email.attach(*attachment)
if attach_alternative:
self.email.attach_alternative(*attach_alternative)
def send(self):
self.email.send()
def send_email(**email_details):
MinimalEmailHandler(**email_details).send()
|
{
"content_hash": "ce448c84de51ede913a10c0442d44312",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 75,
"avg_line_length": 30.435897435897434,
"alnum_prop": 0.5939342881213142,
"repo_name": "masschallenge/impact-api",
"id": "22765334915d35e5a9a6a4ff1880ff60df64c991",
"size": "1187",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "web/impact/impact/minimal_email_handler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5077"
},
{
"name": "Dockerfile",
"bytes": "1735"
},
{
"name": "HTML",
"bytes": "11542"
},
{
"name": "JavaScript",
"bytes": "2332"
},
{
"name": "Makefile",
"bytes": "17106"
},
{
"name": "Python",
"bytes": "607293"
},
{
"name": "Shell",
"bytes": "5185"
}
],
"symlink_target": ""
}
|
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.5.1-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
import functools # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from openapi_client.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
UUIDSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
Configuration,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
Int32Base,
Int64Base,
Float32Base,
Float64Base,
NumberBase,
UUIDBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class GithubRepository(
DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
_class = StrSchema
@classmethod
@property
def _links(cls) -> typing.Type['GithubRepositorylinks']:
return GithubRepositorylinks
defaultBranch = StrSchema
description = StrSchema
name = StrSchema
@classmethod
@property
def permissions(cls) -> typing.Type['GithubRepositorypermissions']:
return GithubRepositorypermissions
private = BoolSchema
fullName = StrSchema
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
_class: typing.Union[_class, Unset] = unset,
_links: typing.Union['GithubRepositorylinks', Unset] = unset,
defaultBranch: typing.Union[defaultBranch, Unset] = unset,
description: typing.Union[description, Unset] = unset,
name: typing.Union[name, Unset] = unset,
permissions: typing.Union['GithubRepositorypermissions', Unset] = unset,
private: typing.Union[private, Unset] = unset,
fullName: typing.Union[fullName, Unset] = unset,
_configuration: typing.Optional[Configuration] = None,
**kwargs: typing.Type[Schema],
) -> 'GithubRepository':
return super().__new__(
cls,
*args,
_class=_class,
_links=_links,
defaultBranch=defaultBranch,
description=description,
name=name,
permissions=permissions,
private=private,
fullName=fullName,
_configuration=_configuration,
**kwargs,
)
from openapi_client.model.github_repositorylinks import GithubRepositorylinks
from openapi_client.model.github_repositorypermissions import GithubRepositorypermissions
|
{
"content_hash": "12b5654d5505f2ae0a16f1b9761b00e5",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 89,
"avg_line_length": 25.56451612903226,
"alnum_prop": 0.6511041009463723,
"repo_name": "cliffano/swaggy-jenkins",
"id": "aaa0d495f766fc97fc4ba5132b09c1ba6481a8ef",
"size": "3187",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "clients/python-experimental/generated/openapi_client/model/github_repository.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "569823"
},
{
"name": "Apex",
"bytes": "741346"
},
{
"name": "Batchfile",
"bytes": "14792"
},
{
"name": "C",
"bytes": "971274"
},
{
"name": "C#",
"bytes": "5131336"
},
{
"name": "C++",
"bytes": "7799032"
},
{
"name": "CMake",
"bytes": "20609"
},
{
"name": "CSS",
"bytes": "4873"
},
{
"name": "Clojure",
"bytes": "129018"
},
{
"name": "Crystal",
"bytes": "864941"
},
{
"name": "Dart",
"bytes": "876777"
},
{
"name": "Dockerfile",
"bytes": "7385"
},
{
"name": "Eiffel",
"bytes": "424642"
},
{
"name": "Elixir",
"bytes": "139252"
},
{
"name": "Elm",
"bytes": "187067"
},
{
"name": "Emacs Lisp",
"bytes": "191"
},
{
"name": "Erlang",
"bytes": "373074"
},
{
"name": "F#",
"bytes": "556012"
},
{
"name": "Gherkin",
"bytes": "951"
},
{
"name": "Go",
"bytes": "345227"
},
{
"name": "Groovy",
"bytes": "89524"
},
{
"name": "HTML",
"bytes": "2367424"
},
{
"name": "Haskell",
"bytes": "680841"
},
{
"name": "Java",
"bytes": "12164874"
},
{
"name": "JavaScript",
"bytes": "1959006"
},
{
"name": "Kotlin",
"bytes": "1280953"
},
{
"name": "Lua",
"bytes": "322316"
},
{
"name": "Makefile",
"bytes": "11882"
},
{
"name": "Nim",
"bytes": "65818"
},
{
"name": "OCaml",
"bytes": "94665"
},
{
"name": "Objective-C",
"bytes": "464903"
},
{
"name": "PHP",
"bytes": "4383673"
},
{
"name": "Perl",
"bytes": "743304"
},
{
"name": "PowerShell",
"bytes": "678274"
},
{
"name": "Python",
"bytes": "5529523"
},
{
"name": "QMake",
"bytes": "6915"
},
{
"name": "R",
"bytes": "840841"
},
{
"name": "Raku",
"bytes": "10945"
},
{
"name": "Ruby",
"bytes": "328360"
},
{
"name": "Rust",
"bytes": "1735375"
},
{
"name": "Scala",
"bytes": "1387368"
},
{
"name": "Shell",
"bytes": "407167"
},
{
"name": "Swift",
"bytes": "342562"
},
{
"name": "TypeScript",
"bytes": "3060093"
}
],
"symlink_target": ""
}
|
"""
Allows you to launch a pynetworktables2js server without copying
any python code. Just install, and do::
python -m pynetworktables2js
Or on Windows:
py -m pynetworktables2js
"""
from __future__ import print_function
import os
from optparse import OptionParser
from os.path import abspath, exists, join
import tornado.web
from networktables import NetworkTables
from tornado.ioloop import IOLoop
from . import get_handlers, NonCachingStaticFileHandler
try:
from .version import __version__
except ImportError:
__version__ = "__master__"
import logging
logger = logging.getLogger("dashboard")
log_date_fmt = "%H:%M:%S"
log_format = "%(asctime)s:%(msecs)03d %(levelname)-8s: %(name)-20s: %(message)s"
def init_networktables(options):
NetworkTables.setNetworkIdentity(options.identity)
if options.team:
logger.info("Connecting to NetworkTables for team %s", options.team)
NetworkTables.startClientTeam(options.team)
else:
logger.info("Connecting to NetworkTables at %s", options.robot)
NetworkTables.initialize(server=options.robot)
if options.dashboard:
logger.info("Connecting to networktables in Dashboard mode")
NetworkTables.setDashboardMode()
logger.info("NetworkTables Initialized")
def main():
# Setup options here
parser = OptionParser()
parser.add_option(
"-p", "--port", type="int", default=8888, help="Port to run web server on"
)
parser.add_option(
"-v",
"--verbose",
default=False,
action="store_true",
help="Enable verbose logging",
)
parser.add_option("--robot", default="127.0.0.1", help="Robot's IP address")
parser.add_option("--team", type="int", help="Team number of robot to connect to")
parser.add_option(
"--dashboard",
default=False,
action="store_true",
help="Use this instead of --robot to receive the IP from the driver station. WARNING: It will not work if you are not on the same host as the DS!",
)
parser.add_option(
"--identity",
default="pynetworktables2js %s" % __version__,
help="Identity to broadcast to remote NT clients",
)
options, args = parser.parse_args()
# Setup logging
logging.basicConfig(
datefmt=log_date_fmt,
format=log_format,
level=logging.DEBUG if options.verbose else logging.INFO,
)
if options.team and options.robot != "127.0.0.1":
parser.error("--robot and --team are mutually exclusive")
# Setup NetworkTables
init_networktables(options)
# setup tornado application with static handler + networktables support
www_dir = abspath(os.getcwd())
index_html = join(www_dir, "index.html")
if not exists(www_dir):
logger.error("Directory '%s' does not exist!", www_dir)
exit(1)
if not exists(index_html):
logger.warning("%s not found", index_html)
app = tornado.web.Application(
get_handlers()
+ [
(r"/()", NonCachingStaticFileHandler, {"path": index_html}),
(r"/(.*)", NonCachingStaticFileHandler, {"path": www_dir}),
]
)
# Start the app
logger.info("Listening on http://localhost:%s/", options.port)
app.listen(options.port)
IOLoop.current().start()
if __name__ == "__main__":
main()
|
{
"content_hash": "bfec9aeb2ab4948cadd87d6e6525a254",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 155,
"avg_line_length": 26.338461538461537,
"alnum_prop": 0.6378504672897196,
"repo_name": "amorygalili/pynetworktables2js",
"id": "7ea9e7041e5718f2f41efb0bc0ce48b5e242b14a",
"size": "3424",
"binary": false,
"copies": "2",
"ref": "refs/heads/add-connect-function",
"path": "pynetworktables2js/__main__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "6459"
},
{
"name": "JavaScript",
"bytes": "23241"
},
{
"name": "Python",
"bytes": "19599"
},
{
"name": "Shell",
"bytes": "411"
}
],
"symlink_target": ""
}
|
"""Tests for vumi.demos.words."""
from twisted.internet.defer import inlineCallbacks
from vumi.demos.words import (SimpleAppWorker, EchoWorker, ReverseWorker,
WordCountWorker)
from vumi.message import TransportUserMessage
from vumi.tests.helpers import VumiTestCase
from vumi.application.tests.helpers import ApplicationHelper
from vumi.tests.utils import LogCatcher
class EchoTestApp(SimpleAppWorker):
"""Test worker that echos calls to process_message."""
def process_message(self, data):
return 'echo:%s' % data
class TestSimpleAppWorker(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.app_helper = self.add_helper(ApplicationHelper(None))
self.worker = yield self.app_helper.get_application({}, EchoTestApp)
@inlineCallbacks
def test_help(self):
yield self.app_helper.make_dispatch_inbound(
None, session_event=TransportUserMessage.SESSION_NEW)
[reply] = self.app_helper.get_dispatched_outbound()
self.assertEqual(reply['session_event'], None)
self.assertEqual(reply['content'], 'Enter text:')
@inlineCallbacks
def test_content_text(self):
yield self.app_helper.make_dispatch_inbound(
"test", session_event=TransportUserMessage.SESSION_NEW)
[reply] = self.app_helper.get_dispatched_outbound()
self.assertEqual(reply['session_event'], None)
self.assertEqual(reply['content'], 'echo:test')
@inlineCallbacks
def test_base_process_message(self):
worker = yield self.app_helper.get_application({}, SimpleAppWorker)
self.assertRaises(NotImplementedError, worker.process_message, 'foo')
class TestEchoWorker(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.app_helper = self.add_helper(ApplicationHelper(None))
self.worker = yield self.app_helper.get_application({}, EchoWorker)
def test_process_message(self):
self.assertEqual(self.worker.process_message("foo"), "foo")
def test_help(self):
self.assertEqual(self.worker.get_help(), "Enter text to echo:")
@inlineCallbacks
def test_echo_non_ascii(self):
content = u'Zoë destroyer of Ascii'
with LogCatcher() as log:
yield self.app_helper.make_dispatch_inbound(content)
[reply] = self.app_helper.get_dispatched_outbound()
self.assertEqual(
log.messages(),
['User message: Zo\xc3\xab destroyer of Ascii'])
class TestReverseWorker(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.app_helper = self.add_helper(ApplicationHelper(None))
self.worker = yield self.app_helper.get_application({}, ReverseWorker)
def test_process_message(self):
self.assertEqual(self.worker.process_message("foo"), "oof")
def test_help(self):
self.assertEqual(self.worker.get_help(), "Enter text to reverse:")
class TestWordCountWorker(VumiTestCase):
@inlineCallbacks
def setUp(self):
self.app_helper = self.add_helper(ApplicationHelper(None))
self.worker = yield self.app_helper.get_application(
{}, WordCountWorker)
def test_process_message(self):
self.assertEqual(self.worker.process_message("foo bar"),
"2 words, 7 chars")
def test_singular(self):
self.assertEqual(self.worker.process_message("f"),
"1 word, 1 char")
def test_help(self):
self.assertEqual(self.worker.get_help(), "Enter text to return word"
" and character counts for:")
|
{
"content_hash": "8d21c8a4c46e005ee23fa9e02ca51d56",
"timestamp": "",
"source": "github",
"line_count": 100,
"max_line_length": 78,
"avg_line_length": 36.32,
"alnum_prop": 0.6651982378854625,
"repo_name": "vishwaprakashmishra/xmatrix",
"id": "16c735c467c8c47b42707b56ebd874a9dd3fdd19",
"size": "3657",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "vumi/demos/tests/test_words.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Erlang",
"bytes": "29735"
},
{
"name": "JavaScript",
"bytes": "5556"
},
{
"name": "Puppet",
"bytes": "2557"
},
{
"name": "Python",
"bytes": "2968329"
},
{
"name": "Shell",
"bytes": "3435"
}
],
"symlink_target": ""
}
|
"""
Accessibility algorithms.
@sort: accessibility, connected_components, cut_edges, cut_nodes, mutual_accessibility
"""
# Imports
from sys import getrecursionlimit
from sys import setrecursionlimit
# Transitive-closure
def accessibility(graph):
"""
Accessibility matrix (transitive closure).
@type graph: graph, digraph, hypergraph
@param graph: Graph.
@rtype: dictionary
@return: Accessibility information for each node.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
accessibility = {} # Accessibility matrix
# For each node i, mark each node j if that exists a path from i to j.
for each in graph:
access = {}
# Perform DFS to explore all reachable nodes
_dfs(graph, access, 1, each)
accessibility[each] = list(access.keys())
setrecursionlimit(recursionlimit)
return accessibility
# Strongly connected components
def mutual_accessibility(graph):
"""
Mutual-accessibility matrix (strongly connected components).
@type graph: graph, digraph
@param graph: Graph.
@rtype: dictionary
@return: Mutual-accessibility information for each node.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
mutual_access = {}
stack = []
low = {}
def visit(node):
if node in low:
return
num = len(low)
low[node] = num
stack_pos = len(stack)
stack.append(node)
for successor in graph.neighbors(node):
visit(successor)
low[node] = min(low[node], low[successor])
if num == low[node]:
component = stack[stack_pos:]
del stack[stack_pos:]
component.sort()
for each in component:
mutual_access[each] = component
for item in component:
low[item] = len(graph)
for node in graph:
visit(node)
setrecursionlimit(recursionlimit)
return mutual_access
# Connected components
def connected_components(graph):
"""
Connected components.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: dictionary
@return: Pairing that associates each node to its connected component.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
visited = {}
count = 1
# For 'each' node not found to belong to a connected component, find its connected
# component.
for each in graph:
if (each not in visited):
_dfs(graph, visited, count, each)
count = count + 1
setrecursionlimit(recursionlimit)
return visited
# Limited DFS implementations used by algorithms here
def _dfs(graph, visited, count, node):
"""
Depth-first search subfunction adapted for accessibility algorithms.
@type graph: graph, digraph, hypergraph
@param graph: Graph.
@type visited: dictionary
@param visited: List of nodes (visited nodes are marked non-zero).
@type count: number
@param count: Counter of connected components.
@type node: node
@param node: Node to be explored by DFS.
"""
visited[node] = count
# Explore recursively the connected component
for each in graph[node]:
if (each not in visited):
_dfs(graph, visited, count, each)
# Cut-Edge and Cut-Vertex identification
# This works by creating a spanning tree for the graph and keeping track of the preorder number
# of each node in the graph in pre[]. The low[] number for each node tracks the pre[] number of
# the node with lowest pre[] number reachable from the first node.
#
# An edge (u, v) will be a cut-edge low[u] == pre[v]. Suppose v under the spanning subtree with
# root u. This means that, from u, through a path inside this subtree, followed by an backarc,
# one can not get out the subtree. So, (u, v) is the only connection between this subtree and
# the remaining parts of the graph and, when removed, will increase the number of connected
# components.
# Similarly, a node u will be a cut node if any of the nodes v in the spanning subtree rooted in
# u are so that low[v] > pre[u], which means that there's no path from v to outside this subtree
# without passing through u.
def cut_edges(graph):
"""
Return the cut-edges of the given graph.
A cut edge, or bridge, is an edge of a graph whose removal increases the number of connected
components in the graph.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: list
@return: List of cut-edges.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
# Dispatch if we have a hypergraph
if 'hypergraph' == graph.__class__.__name__:
return _cut_hyperedges(graph)
pre = {} # Pre-ordering
low = {} # Lowest pre[] reachable from this node going down the spanning tree + one backedge
spanning_tree = {}
reply = []
pre[None] = 0
for each in graph:
if (each not in pre):
spanning_tree[each] = None
_cut_dfs(graph, spanning_tree, pre, low, reply, each)
setrecursionlimit(recursionlimit)
return reply
def _cut_hyperedges(hypergraph):
"""
Return the cut-hyperedges of the given hypergraph.
@type hypergraph: hypergraph
@param hypergraph: Hypergraph
@rtype: list
@return: List of cut-nodes.
"""
edges_ = cut_nodes(hypergraph.graph)
edges = []
for each in edges_:
if (each[1] == 'h'):
edges.append(each[0])
return edges
def cut_nodes(graph):
"""
Return the cut-nodes of the given graph.
A cut node, or articulation point, is a node of a graph whose removal increases the number of
connected components in the graph.
@type graph: graph, hypergraph
@param graph: Graph.
@rtype: list
@return: List of cut-nodes.
"""
recursionlimit = getrecursionlimit()
setrecursionlimit(max(len(graph.nodes())*2,recursionlimit))
# Dispatch if we have a hypergraph
if 'hypergraph' == graph.__class__.__name__:
return _cut_hypernodes(graph)
pre = {} # Pre-ordering
low = {} # Lowest pre[] reachable from this node going down the spanning tree + one backedge
reply = {}
spanning_tree = {}
pre[None] = 0
# Create spanning trees, calculate pre[], low[]
for each in graph:
if (each not in pre):
spanning_tree[each] = None
_cut_dfs(graph, spanning_tree, pre, low, [], each)
# Find cuts
for each in graph:
# If node is not a root
if (spanning_tree[each] is not None):
for other in graph[each]:
# If there is no back-edge from descendent to a ancestral of each
if (low[other] >= pre[each] and spanning_tree[other] == each):
reply[each] = 1
# If node is a root
else:
children = 0
for other in graph:
if (spanning_tree[other] == each):
children = children + 1
# root is cut-vertex iff it has two or more children
if (children >= 2):
reply[each] = 1
setrecursionlimit(recursionlimit)
return list(reply.keys())
def _cut_hypernodes(hypergraph):
"""
Return the cut-nodes of the given hypergraph.
@type hypergraph: hypergraph
@param hypergraph: Hypergraph
@rtype: list
@return: List of cut-nodes.
"""
nodes_ = cut_nodes(hypergraph.graph)
nodes = []
for each in nodes_:
if (each[1] == 'n'):
nodes.append(each[0])
return nodes
def _cut_dfs(graph, spanning_tree, pre, low, reply, node):
"""
Depth first search adapted for identification of cut-edges and cut-nodes.
@type graph: graph, digraph
@param graph: Graph
@type spanning_tree: dictionary
@param spanning_tree: Spanning tree being built for the graph by DFS.
@type pre: dictionary
@param pre: Graph's preordering.
@type low: dictionary
@param low: Associates to each node, the preordering index of the node of lowest preordering
accessible from the given node.
@type reply: list
@param reply: List of cut-edges.
@type node: node
@param node: Node to be explored by DFS.
"""
pre[node] = pre[None]
low[node] = pre[None]
pre[None] = pre[None] + 1
for each in graph[node]:
if (each not in pre):
spanning_tree[each] = node
_cut_dfs(graph, spanning_tree, pre, low, reply, each)
if (low[node] > low[each]):
low[node] = low[each]
if (low[each] == pre[each]):
reply.append((node, each))
elif (low[node] > pre[each] and spanning_tree[node] != each):
low[node] = pre[each]
|
{
"content_hash": "519c7cee1d195d81dc763ca66035939d",
"timestamp": "",
"source": "github",
"line_count": 324,
"max_line_length": 99,
"avg_line_length": 28.37962962962963,
"alnum_prop": 0.6147906470908102,
"repo_name": "wdv4758h/ZipPy",
"id": "863e0de024b95a9501b604049899c6b3a9e5e326",
"size": "10321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edu.uci.python.benchmark/src/benchmarks/python-graph/core/pygraph/algorithms/accessibility.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "9447"
},
{
"name": "C",
"bytes": "106932"
},
{
"name": "CSS",
"bytes": "32004"
},
{
"name": "Groff",
"bytes": "27753"
},
{
"name": "HTML",
"bytes": "721863"
},
{
"name": "Java",
"bytes": "1550721"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Makefile",
"bytes": "16156"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Python",
"bytes": "33672733"
},
{
"name": "R",
"bytes": "1959"
},
{
"name": "Ruby",
"bytes": "304"
},
{
"name": "Scheme",
"bytes": "125"
},
{
"name": "Shell",
"bytes": "3119"
},
{
"name": "Tcl",
"bytes": "1048"
},
{
"name": "TeX",
"bytes": "8790"
},
{
"name": "Visual Basic",
"bytes": "481"
},
{
"name": "XSLT",
"bytes": "366202"
}
],
"symlink_target": ""
}
|
import codecs
import inspect
from json import loads
import os
import sys
from bokeh.model import Model
import bokeh.models as models
from bokeh.core.properties import DataSpec
from bokeh.core.json_encoder import serialize_json
dest_dir = sys.argv[1]
classes = [member for name, member in inspect.getmembers(models) if inspect.isclass(member)]
model_class = next(klass for klass in classes if klass.__name__ == 'Model')
widget_class = next(klass for klass in classes if klass.__name__ == 'Widget')
# getclasstree returns a list which contains [ (class, parentClass), [(subClassOfClass, class), ...]]
# where the subclass list is omitted if there are no subclasses.
# If you say unique=True then mixins will be registered as leaves so don't use unique=True,
# and expect to have duplicates in the result of leaves()
all_tree = inspect.getclasstree(classes, unique=False)
def leaves(tree, underneath):
if len(tree) == 0:
return []
elif len(tree) > 1 and isinstance(tree[1], list):
subs = tree[1]
if underneath is None or tree[0][0] != underneath:
return leaves(subs, underneath) + leaves(tree[2:], underneath)
else:
# underneath=None to return all leaves from here out
return leaves(subs, underneath=None)
else:
leaf = tree[0]
tail = tree[1:]
if leaf[0] == underneath:
return [leaf]
elif underneath is not None:
return leaves(tail, underneath)
else:
return [leaf] + leaves(tail, underneath)
all_json = {}
for leaf in leaves(all_tree, model_class):
klass = leaf[0]
vm_name = klass.__view_model__
if vm_name in all_json:
continue
defaults = {}
instance = klass()
for name, default in instance.properties_with_values().items():
if isinstance(default, Model):
ref = default.ref
raw_attrs = default._to_json_like(include_defaults=True)
attrs = loads(serialize_json(raw_attrs))
ref['attributes'] = attrs
del ref['id'] # there's no way the ID will match coffee
default = ref
elif isinstance(default, float) and default == float('inf'):
default = None
defaults[name] = default
all_json[vm_name] = defaults
widgets_json = {}
for leaf_widget in leaves(all_tree, widget_class):
klass = leaf_widget[0]
vm_name = klass.__view_model__
if vm_name not in widgets_json:
widgets_json[vm_name] = all_json[vm_name]
del all_json[vm_name]
def output_defaults_module(filename, defaults):
output = serialize_json(defaults, indent=2)
coffee_template = """\
all_defaults = %s
get_defaults = (name) ->
if name of all_defaults
all_defaults[name]
else
null
all_view_model_names = () ->
Object.keys(all_defaults)
module.exports = {
get_defaults: get_defaults
all_view_model_names: all_view_model_names
}
"""
try:
os.makedirs(os.path.dirname(filename))
except OSError as e:
pass
f = codecs.open(filename, 'w', 'utf-8')
f.write(coffee_template % output)
f.close()
print("Wrote %s with %d model classes" % (filename, len(defaults)))
output_defaults_module(filename = os.path.join(dest_dir, 'common/generated_defaults/models_defaults.coffee'),
defaults = all_json)
output_defaults_module(filename = os.path.join(dest_dir, 'common/generated_defaults/widgets_defaults.coffee'),
defaults = widgets_json)
|
{
"content_hash": "c53ba120b0e3c3edb0ffb18a15c0914d",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 110,
"avg_line_length": 32.88785046728972,
"alnum_prop": 0.6439329354930378,
"repo_name": "clairetang6/bokeh",
"id": "5e158387c19d32d21ec41a9eb521e01fe25523dd",
"size": "3519",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "bokehjs/gulp/tasks/generate_defaults.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "410003"
},
{
"name": "CoffeeScript",
"bytes": "734115"
},
{
"name": "HTML",
"bytes": "27676"
},
{
"name": "JavaScript",
"bytes": "8811"
},
{
"name": "Jupyter Notebook",
"bytes": "3981"
},
{
"name": "Makefile",
"bytes": "5842"
},
{
"name": "Python",
"bytes": "1776015"
},
{
"name": "Shell",
"bytes": "17605"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_identity_based_route
short_description: Configure identity based routing in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall feature and identity_based_route category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_identity_based_route:
description:
- Configure identity based routing.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
comments:
description:
- Comments.
type: str
name:
description:
- Name.
required: true
type: str
rule:
description:
- Rule.
type: list
suboptions:
device:
description:
- Outgoing interface for the rule. Source system.interface.name.
type: str
gateway:
description:
- "IPv4 address of the gateway (Format: xxx.xxx.xxx.xxx )."
type: str
groups:
description:
- Select one or more group(s) from available groups that are allowed to use this route. Separate group names with a space.
type: list
suboptions:
name:
description:
- Group name. Source user.group.name.
required: true
type: str
id:
description:
- Rule ID.
required: true
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure identity based routing.
fortios_firewall_identity_based_route:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
state: "present"
firewall_identity_based_route:
comments: "<your_own_value>"
name: "default_name_4"
rule:
-
device: "<your_own_value> (source system.interface.name)"
gateway: "<your_own_value>"
groups:
-
name: "default_name_9 (source user.group.name)"
id: "10"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_identity_based_route_data(json):
option_list = ['comments', 'name', 'rule']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_identity_based_route(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_identity_based_route'] and data['firewall_identity_based_route']:
state = data['firewall_identity_based_route']['state']
else:
state = True
firewall_identity_based_route_data = data['firewall_identity_based_route']
filtered_data = underscore_to_hyphen(filter_firewall_identity_based_route_data(firewall_identity_based_route_data))
if state == "present":
return fos.set('firewall',
'identity-based-route',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall',
'identity-based-route',
mkey=filtered_data['name'],
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_firewall(data, fos):
if data['firewall_identity_based_route']:
resp = firewall_identity_based_route(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"firewall_identity_based_route": {
"required": False, "type": "dict", "default": None,
"options": {
"state": {"required": False, "type": "str",
"choices": ["present", "absent"]},
"comments": {"required": False, "type": "str"},
"name": {"required": True, "type": "str"},
"rule": {"required": False, "type": "list",
"options": {
"device": {"required": False, "type": "str"},
"gateway": {"required": False, "type": "str"},
"groups": {"required": False, "type": "list",
"options": {
"name": {"required": True, "type": "str"}
}},
"id": {"required": True, "type": "int"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_firewall(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_firewall(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
|
{
"content_hash": "0f26cb328a2cba2f6c0ed47b96047054",
"timestamp": "",
"source": "github",
"line_count": 382,
"max_line_length": 150,
"avg_line_length": 32.47643979057592,
"alnum_prop": 0.5577946155086249,
"repo_name": "thaim/ansible",
"id": "60b040ece438c80f1d2da28eaf8d49aa8d77decf",
"size": "12424",
"binary": false,
"copies": "13",
"ref": "refs/heads/fix-broken-link",
"path": "lib/ansible/modules/network/fortios/fortios_firewall_identity_based_route.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7"
},
{
"name": "Shell",
"bytes": "246"
}
],
"symlink_target": ""
}
|
from django import forms
try:
from django.core.urlresolvers import reverse
except ImportError:
from django.urls import reverse
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout
from smartfields.crispy.layout import ImageField, VideoField, LimitedField
from smartfields.processors.image import supported_formats
from .models import TextTesting, ImageTesting, VideoTesting
class TextTestingForm(forms.ModelForm):
class Meta:
model = TextTesting
fields = ('title',)
def __init__(self, *args, **kwargs):
super(TextTestingForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.layout = Layout(LimitedField('title'))
class ImageTestingForm(forms.ModelForm):
class Meta:
model = ImageTesting
fields = ('image_2',)
def __init__(self, *args, **kwargs):
super(ImageTestingForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.layout = Layout(
ImageField('image_2', plupload_options={
'url': reverse('smartfields:upload', kwargs={
'app_label': 'tests',
'model': 'imagetesting',
'field_name': 'image_2'
}),
'filters': {
'max_file_size': "20mb",
'mime_types': [{'title': "Image Files",
'extensions': supported_formats.input_exts}]
}}))
class VideoTestingForm(forms.ModelForm):
class Meta:
model = VideoTesting
fields = ('video_1',)
def __init__(self, *args, **kwargs):
super(VideoTestingForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.layout = Layout(
VideoField('video_1', plupload_options={
'url': reverse('smartfields:upload', kwargs={
'app_label': 'tests',
'model': 'videotesting',
'field_name': 'video_1'
}),
'filters': {
'max_file_size': "1024mb",
'mime_types': [{'title': "Video Files",
'extensions': "avi,mp4,mpg,mpeg,wmv,mov,webm"}]
}}))
|
{
"content_hash": "18c97bfe8b9f26923e5e97b6f7e38dda",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 83,
"avg_line_length": 32.55555555555556,
"alnum_prop": 0.5439419795221843,
"repo_name": "lehins/django-smartfields",
"id": "417ebc3787dcd1751291ceeefd4dc4b1f2a6a319",
"size": "2344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_app/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "433"
},
{
"name": "CoffeeScript",
"bytes": "14579"
},
{
"name": "HTML",
"bytes": "5381"
},
{
"name": "Python",
"bytes": "137111"
}
],
"symlink_target": ""
}
|
from django.utils.translation import activate
from django.test import TestCase
from django.core.urlresolvers import reverse
class TestHomePage(TestCase):
def test_uses_index_template(self):
activate('en')
response = self.client.get(reverse('home'))
self.assertTemplateUsed(response, 'taskbuster/index.html')
def test_uses_base_template(self):
activate('en')
response = self.client.get(reverse('home'))
self.assertTemplateUsed(response, 'base.html')
|
{
"content_hash": "d869eb9f6eec3fc1d51185b962251fe9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 66,
"avg_line_length": 31.8125,
"alnum_prop": 0.7033398821218074,
"repo_name": "caithess/taskbuster",
"id": "998443e6befd78039334b42faa6b9cca2f2e87fc",
"size": "533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskbuster/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "255"
},
{
"name": "HTML",
"bytes": "7124"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "22767"
}
],
"symlink_target": ""
}
|
def main():
pass
if __name__ == '__main__':
main()
# Load the Chrono::Engine unit!!!
import ChronoEngine_python_core as chrono
# Create a physical system,
my_system = chrono.ChSystemNSC()
# Add two bodies
my_shbodyA = chrono.ChBody()
my_shbodyA.SetMass(20)
my_shbodyA.SetInertiaXX( chrono.ChVectorD(10,10,10) )
print (my_shbodyA.GetInertia() )
my_shbodyA.SetPos(chrono.ChVectorD(1,-1,0))
my_shbodyA.GetCollisionModel().AddBox(10,1,10)
my_shbodyA.SetBodyFixed(True)
my_shbodyA.SetCollide(True)
my_shbodyB = chrono.ChBody()
my_shbodyB.SetPos(chrono.ChVectorD(0,2,0))
my_shbodyB.GetCollisionModel().AddBox(1,1,1)
my_shbodyB.SetCollide(True)
my_shmarker = chrono.ChMarker()
my_funct = chrono.ChFunction_Sine(0,0.5,3)
my_shmarker.SetMotion_X(my_funct)
my_shmarker.SetPos(chrono.ChVectorD(1,2,3))
my_shbodyB.AddMarker(my_shmarker)
my_system.Add(my_shbodyA)
my_system.Add(my_shbodyB)
# Define surface material(s)
my_shmaterial = chrono.ChMaterialSurfaceNSC()
my_shmaterial.SetFriction(0.3)
my_shmaterial.SetCompliance(0)
my_shbodyA.SetMaterialSurface(my_shmaterial)
my_shbodyB.SetMaterialSurface(my_shmaterial)
# Add Contact callback (TO FIX!!)
##class MyContactCallback(chrono.ChCustomCollisionPointCallbackP):
## def __init__(self):
## chrono.ChCustomCollisionPointCallbackP.__init__(self)
## def ContactCallback(self,collinfo,matcouple):
## print (' add contact: ' , collinfo.distance, matcouple.static_friction)
##
##my_call = MyContactCallback()
##my_system.SetCustomCollisionPointCallback(my_call)
# Report Contact callback
class MyReportContactCallback(chrono.ChReportContactCallbackP):
def __init__(self):
chrono.ChReportContactCallbackP.__init__(self)
def OnReportContact(self,vA,vB,cA,dist,force,torque,modA,modB):
print (' contact: point A=' , vA, ' dist=',dist)
return True # return False to stop reporting contacts
my_rep = MyReportContactCallback()
# Simulation loop
my_system.SetChTime(0)
while (my_system.GetChTime() < 1.2) :
my_system.DoStepDynamics(0.01)
print ('time=', my_system.GetChTime(), ' bodyB y=', my_shbodyB.GetPos().y())
my_system.GetContactContainer().ReportAllContacts(my_rep)
# Iterate over added bodies - how to use iterators
print ('This is the list of bodies in the system:')
iterbodies = my_system.IterBeginBodies()
while (iterbodies != my_system.IterEndBodies()):
print (' body pos=', iterbodies.Ref().GetPos() )
iterbodies = iterbodies.Next()
# Easier (but a bit slower) iteration in the style of Python:
print ('This is the list of bodies in the system:')
for abody in chrono.IterBodies(my_system):
print (' body pos=', abody.GetPos() )
# Also iterate on links, Python style:
for alink in chrono.IterLinks(my_system):
print (' link: ', alink )
# Move a body, using a ChFrame
my_displacement = chrono.ChFrameMovingD(chrono.ChVectorD(5,1,0));
my_shbodyA %= my_displacement
# ..also as:
# my_shbody.ConcatenatePreTransformation(my_displacement)
print ('Moved body pos=', my_shbodyA.GetPos() )
# Use a body with an auxiliary reference (REF) that does not correspond
# to the center of gravity (COG)
body_1= chrono.ChBodyAuxRef()
body_1.SetName('Parte1-1')
body_1.SetPos(chrono.ChVectorD(-0.0445347481124079,0.0676266363930238,-0.0230808979433518))
body_1.SetRot(chrono.ChQuaternionD(1,0,0,0))
body_1.SetMass(346.17080777653)
body_1.SetInertiaXX(chrono.ChVectorD(48583.2418823358,526927.118351673,490689.966726565))
body_1.SetInertiaXY(chrono.ChVectorD(1.70380722975012e-11,1.40840344485366e-11,-2.31869065456271e-12))
body_1.SetFrame_COG_to_REF(chrono.ChFrameD(chrono.ChVectorD(68.9923703887577,-60.1266363930238,70.1327223302498),chrono.ChQuaternionD(1,0,0,0)))
myasset = chrono.ChObjShapeFile()
myasset.SetFilename("shapes/test.obj")
body_1.GetAssets().push_back(myasset)
|
{
"content_hash": "799cce7ad2c581e478807c4e6c8895d9",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 144,
"avg_line_length": 29.844961240310077,
"alnum_prop": 0.7358441558441559,
"repo_name": "jcmadsen/chrono",
"id": "5f2712f18a44880fc3836b836afb538232726f3e",
"size": "4064",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/demos/python/demo_python_2.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1834210"
},
{
"name": "C++",
"bytes": "39292833"
},
{
"name": "CMake",
"bytes": "254507"
},
{
"name": "CSS",
"bytes": "24348"
},
{
"name": "GLSL",
"bytes": "4214"
},
{
"name": "HTML",
"bytes": "14262"
},
{
"name": "Inno Setup",
"bytes": "47881"
},
{
"name": "JavaScript",
"bytes": "4943"
},
{
"name": "Matlab",
"bytes": "6957"
},
{
"name": "Objective-C",
"bytes": "40098"
},
{
"name": "Python",
"bytes": "176227"
},
{
"name": "Ruby",
"bytes": "2220"
}
],
"symlink_target": ""
}
|
from wtforms import TextField
from wtforms.validators import DataRequired
from ..datacenter import DatacenterCheckForm
class CheckForm(DatacenterCheckForm):
''' Creates a wtforms form object for monitors '''
title = "Heroku: All Dynos Not Idle"
description = """
This monitor returns false if any Heroku Dynos associated with the specified Application Name are in an Idle state. This monitor can be used to identify when Heroku Dynos should be scaled down or restarted.
"""
placeholders = DatacenterCheckForm.placeholders
placeholders.update({
'appname' : 'Application Name',
})
apikey = TextField(
"API Key",
description=DatacenterCheckForm.descriptions['apikey'],
validators=[DataRequired(
message='API Key is a required field')])
appname = TextField(
"Application Name",
description=DatacenterCheckForm.descriptions['heroku']['appname'],
validators=[DataRequired(
message='Application Name is a required field')])
if __name__ == '__main__':
pass
|
{
"content_hash": "d10477b22439773dec5bc39eaad2ba25",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 210,
"avg_line_length": 35.96666666666667,
"alnum_prop": 0.6886005560704356,
"repo_name": "madflojo/cloudroutes-service",
"id": "d3422af1a78e53eca1fc6fd01ae676d9b8b68141",
"size": "1352",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/web/monitorforms/heroku-dyno-not-idle/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "17816"
},
{
"name": "HTML",
"bytes": "227943"
},
{
"name": "JavaScript",
"bytes": "3271"
},
{
"name": "Python",
"bytes": "678083"
},
{
"name": "Shell",
"bytes": "5859"
}
],
"symlink_target": ""
}
|
import os
from os.path import join as opj
import numpy as np
import matplotlib.pyplot as plt
cwd = os.getcwd()
# results[0] = test loss; results[1] = test accuracy
translate_results = np.load(opj(cwd, 'perclearn/data/results/translate.npz'))['arr_0']
rotate_results = np.load(opj(cwd, 'perclearn/data/results/rotate.npz'))['arr_0']
# Translation plots
x = np.linspace(0, 28, 28)
f, axarr = plt.subplots(2, sharex=True)
axarr[0].scatter(x, translate_results[0,:])
axarr[0].set_title('Test Loss - Translation')
axarr[1].scatter(x, translate_results[1,:])
axarr[1].set_title('Test Accuracy - Translation')
plt.savefig('/home/asier/Desktop/translate.png')
# Rotation plots
plt.figure()
x = np.linspace(0, 360, 36)
f, axarr = plt.subplots(2, sharex=True)
axarr[0].scatter(x, rotate_results[0,:])
axarr[0].set_title('Test Loss - Rotation')
axarr[1].scatter(x, rotate_results[1,:])
axarr[1].set_title('Test Accuracy - Rotation')
plt.savefig('/home/asier/Desktop/rotate.png')
|
{
"content_hash": "30c55edd99bde6c494b09e9064caa5e4",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 86,
"avg_line_length": 32.43333333333333,
"alnum_prop": 0.7153134635149023,
"repo_name": "erramuzpe/seattle-perceptual-learning",
"id": "9c0b209a355e96547bddec25d7fe84c6dc64bbd9",
"size": "997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "perclearn/reports/figure1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "37940"
},
{
"name": "Python",
"bytes": "47517"
}
],
"symlink_target": ""
}
|
import datetime
from distutils.core import setup
import py2exe
import site
import os
import os.path
class Target(object):
'''Target is the baseclass for all executables that are created.
It defines properties that are shared by all of them.
'''
def __init__(self, **kw):
self.__dict__.update(kw)
# the VersionInfo resource, uncomment and fill in those items
# that make sense:
# The 'version' attribute MUST be defined, otherwise no versioninfo will be built:
# self.version = "1.0"
# self.company_name = "Company Name"
# self.copyright = "Copyright Company Name © 2013"
# self.legal_copyright = "Copyright Company Name © 2013"
# self.legal_trademark = ""
# self.product_version = "1.0.0.0"
# self.product_name = "Product Name"
# self.private_build = "foo"
# self.special_build = "bar"
def copy(self):
return Target(**self.__dict__)
def __setitem__(self, name, value):
self.__dict__[name] = value
RT_BITMAP = 2
RT_MANIFEST = 24
# A manifest which specifies the executionlevel
# and windows common-controls library version 6
manifest_template = '''<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
<assemblyIdentity
version="5.0.0.0"
processorArchitecture="*"
name="%(prog)s"
type="win32"
/>
<description>%(prog)s</description>
<trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
<security>
<requestedPrivileges>
<requestedExecutionLevel
level="%(level)s"
uiAccess="false">
</requestedExecutionLevel>
</requestedPrivileges>
</security>
</trustInfo>
<dependency>
<dependentAssembly>
<assemblyIdentity
type="win32"
name="Microsoft.Windows.Common-Controls"
version="6.0.0.0"
processorArchitecture="*"
publicKeyToken="6595b64144ccf1df"
language="*"
/>
</dependentAssembly>
</dependency>
</assembly>
'''
main = Target(
# We can extend or override the VersionInfo of the base class:
# version = "1.0",
# file_description = "File Description",
# comments = "Some Comments",
# internal_name = "spam",
script="main.py", # path of the main script
# Allows to specify the basename of the executable, if different from 'main'
# dest_base = "main",
# Icon resources:[(resource_id, path to .ico file), ...]
icon_resources=[(1, r"img/main.ico")],
other_resources = [(RT_MANIFEST, 1, (manifest_template % dict(prog="main", level="asInvoker")).encode("utf-8")),
# for bitmap resources, the first 14 bytes must be skipped when reading the file:
# (RT_BITMAP, 1, open("bitmap.bmp", "rb").read()[14:]),
]
)
# ``zipfile`` and ``bundle_files`` options explained:
# ===================================================
#
# zipfile is the Python runtime library for your exe/dll-files; it
# contains in a ziparchive the modules needed as compiled bytecode.
#
# If 'zipfile=None' is used, the runtime library is appended to the
# exe/dll-files (which will then grow quite large), otherwise the
# zipfile option should be set to a pathname relative to the exe/dll
# files, and a library-file shared by all executables will be created.
#
# The py2exe runtime *can* use extension module by directly importing
# the from a zip-archive - without the need to unpack them to the file
# system. The bundle_files option specifies where the extension modules,
# the python dll itself, and other needed dlls are put.
#
# bundle_files == 3:
# Extension modules, the Python dll and other needed dlls are
# copied into the directory where the zipfile or the exe/dll files
# are created, and loaded in the normal way.
#
# bundle_files == 2:
# Extension modules are put into the library ziparchive and loaded
# from it directly.
# The Python dll and any other needed dlls are copied into the
# directory where the zipfile or the exe/dll files are created,
# and loaded in the normal way.
#
# bundle_files == 1:
# Extension modules and the Python dll are put into the zipfile or
# the exe/dll files, and everything is loaded without unpacking to
# the file system. This does not work for some dlls, so use with
# caution.
#
# bundle_files == 0:
# Extension modules, the Python dll, and other needed dlls are put
# into the zipfile or the exe/dll files, and everything is loaded
# without unpacking to the file system. This does not work for
# some dlls, so use with caution.
def findSitePackagesPath(requestedPath):
for i in site.getsitepackages():
path = os.path.join(i, requestedPath)
if os.path.exists(path):
return path
raise Exception("cannot find file {0}".format(requestedPath))
print(findSitePackagesPath(os.path.join("requests", "cacert.pem")))
excludes = ["pygame", # disabled soundsystems
"pyglet"]
includes = ["sip",
"PyQt5",
"PyQt5.QtCore",
"PyQt5.QtGui",
"PyQt5.QtMultimedia",
"PyQt5.QtNetwork", # for QtMultimedia
#"pygame.mixer",
"requests",
"requests.adapters",
"certifi"]
datafiles = [
("platforms", [ findSitePackagesPath(os.path.join("PyQt5", "plugins", "platforms", "qwindows.dll")) ]),
("imageformats", [ findSitePackagesPath(os.path.join("PyQt5", "plugins", "imageformats", "qico.dll")) ]),
("mediaservice", [ # for QtMultimedia
#findSitePackagesPath(os.path.join("PyQt5", "plugins", "mediaservice", "dsengine.dll")),
#findSitePackagesPath(os.path.join("PyQt5", "plugins", "mediaservice", "qtmedia_audioengine.dll")),
findSitePackagesPath(os.path.join("PyQt5", "plugins", "mediaservice", "wmfengine.dll")) # only this required
]),
("img",[
r"img\\main.ico",
r"img\\illegal.png",
r"img\\power_1.png",
r"img\\power_2.png",
r"img\\power_3.png",
r"img\\power_4.png",
r"img\\power_5.png",
r"img\\power_6.png",
r"img\\power_7.png",
r"img\\power_8.png",
r"img\\power_9.png",
r"img\\power_10.png",
r"img\\EDAssetsLicense"
]),
("sounds",[
r"sounds\\soundcredits.txt",
r"sounds\\startup.wav",
r"sounds\\error.wav",
r"sounds\\error1.wav",
r"sounds\\error2.wav",
r"sounds\\search.wav",
r"sounds\\search1.wav",
r"sounds\\search2.wav",
r"sounds\\search3.wav"
]),
("", [
findSitePackagesPath(os.path.join("PyQt5", "ssleay32.dll")), # for QtMultimedia
findSitePackagesPath(os.path.join("PyQt5", "libeay32.dll")), # for QtMultimedia
r"..\\extraInstallFiles\\MSVCP100.dll",
r"..\\extraInstallFiles\\MSVCR100.dll",
r"version.txt"
]),
("requests", [ findSitePackagesPath(os.path.join("requests", "cacert.pem")) ])
]
###############################
# WRITING VERSION STRING
majorversion=0
minorversion=14
builddate=datetime.datetime.now().strftime("%y%m%d")
versionstring=str(majorversion)+'.'+str(minorversion)+'.'+builddate
with open("version.txt", "w") as f:
f.write(versionstring)
py2exe_options = dict(
packages = [],
excludes = excludes,
## excludes = "tof_specials Tkinter".split(),
## ignores = "dotblas gnosis.xml.pickle.parsers._cexpat mx.DateTime".split(),
## dll_excludes = "MSVCP90.dll mswsock.dll powrprof.dll".split(),
optimize=0,
compressed=True, # uncompressed may or may not have a faster startup
bundle_files=3,#1,
dist_dir='dist',
includes=includes
)
# Some options can be overridden by command line options...
setup(name="name",
# console based executables
#console=[main],
# windows subsystem executables (no console)
windows=[main],
data_files=datafiles,
# py2exe options
zipfile=None,
options={"py2exe": py2exe_options},
)
|
{
"content_hash": "4049b97c7b9a90eb42386472d374f6e1",
"timestamp": "",
"source": "github",
"line_count": 243,
"max_line_length": 124,
"avg_line_length": 36.08230452674897,
"alnum_prop": 0.5806341240875912,
"repo_name": "BeTeK/EliteMerchant",
"id": "cdd12e73566695094f8778d4c193bbcc84fc6869",
"size": "8871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1038"
},
{
"name": "NSIS",
"bytes": "3152"
},
{
"name": "Python",
"bytes": "353540"
}
],
"symlink_target": ""
}
|
from tkinter import *
#from tkinter import messagebox
from tkinter import ttk
from tkinter import filedialog
from datetime import datetime
import plotly.plotly as py
from plotly.graph_objs import Scatter
import plotly.graph_objs as go
import random
import sqlite3
import pandas
conn=sqlite3.connect('MultiServerDatabase_SRPT.db')
NumJobs = []
AvgNumJobs = []
NumJobsTime = []
NUM_SERVERS = 0
#----------------------------------------------------------------------#
# Class: GUI
#
# This class is used as a graphical user interface for the application.
#
#----------------------------------------------------------------------#
class GUI(Tk):
def __init__(self, master):
Tk.__init__(self, master)
self.master = master # reference to parent
self.statusText = StringVar()
global SEED
#SEED = random.randint(0, 1000000000)
SEED = 994863731
random.seed(SEED)
# Create the input frame
self.frameIn = Input(self)
self.frameIn.pack(side=TOP, fill=BOTH, padx = 5, pady =5, ipadx = 5, ipady = 5)
# Create the output frame
self.frameOut = Output(self)
self.frameOut.pack(side=TOP, fill=BOTH, padx = 5, pady =5, ipadx = 5, ipady = 5)
# Bind simulate button
self.bind("<<input_simulate>>", self.submit)
# Bind save button
self.bind("<<output_save>>", self.saveData)
# Bind clear button
self.bind("<<output_clear>>", self.clearConsole)
# Bind stop button
self.bind("<<stop_sim>>", self.stopSimulation)
# Status Bar
status = Label(self.master, textvariable=self.statusText, bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, anchor=W, fill=X)
# Initialize console
self.consoleFrame = Frame(self.frameOut)
self.console = Text(self.consoleFrame, wrap = WORD)
self.makeConsole()
self.printIntro()
self.updateStatusBar("Waiting for submit...")
def makeConsole(self):
#self.consoleFrame = Frame(self.frameOut)
self.consoleFrame.pack(side=TOP, padx=5, pady=5)
#self.console = Text(self.consoleFrame, wrap = WORD)
self.console.config(state=DISABLED) # start with console as disabled (non-editable)
self.scrollbar = Scrollbar(self.consoleFrame)
self.scrollbar.config(command = self.console.yview)
self.console.config(yscrollcommand=self.scrollbar.set)
self.console.grid(column=0, row=0)
self.scrollbar.grid(column=1, row=0, sticky='NS')
def writeToConsole(self, text = ' '):
self.console.config(state=NORMAL) # make console editable
self.console.insert(END, '%s\n'%text)
self.update()
self.console.yview(END) # auto-scroll
self.console.config(state=DISABLED) # disable (non-editable) console
def saveData(self, event):
# Get filename
filename = fileDialog.asksaveasfilename(title="Save as...", defaultextension='.txt')
if filename:
file = open(filename, mode='w')
data = self.console.get(1.0, END)
encodedData = data.encode('utf-8')
text = str(encodedData)
file.write(text)
file.close()
# Empty old saves at the begining of each simulation
def clearSavedJobs(self):
with open("Jobs.xls", "w") as myFile:
myFile.write('Job Name,Completion Time' + '\n')
myFile.close()
def clearSavedArrivals(self):
with open("Arrivals.xls", "w") as myFile:
myFile.write('Job Name,Arrival Time,RPT,ERPT' + '\n')
myFile.close()
def clearSavedNumJobs(self):
with open("AvgNumberOfJobs.xls", "w") as myFile:
myFile.write('Current Time, Average Number Of Jobs, Current Number Of Jobs' + '\n')
myFile.close()
def clearConsole(self, event):
self.console.config(state=NORMAL) # make console editable
self.console.delete('1.0', END)
self.console.config(state=DISABLED) # disable (non-editable) console
def updateStatusBar(self, text=' '):
self.statusText.set(text)
def printIntro(self):
self.writeToConsole("SRPTE \n\n This application simulates a single server with Poisson arrivals and processing times of a general distribution. Each arrival has an estimation error within a percent error taken as input. Jobs are serviced in order of shortest remaining processing time.")
def saveParams(self, numServers, load, arrRate, arrDist, procRate, procDist, percErrorMin, percErrorMax, simLength, alpha, lower, upper):
##params = pandas.DataFrame(columns=('seed', 'numServers', 'load', 'arrRate', 'arrDist', 'procRate', 'procDist', 'alpha', 'lower', 'upper', 'percErrorMin', 'percErrorMax', 'simLength'))
print (SEED)
params = pandas.DataFrame({ 'seed' : [SEED],
'numServers' : [numServers],
'load' : [load],
'arrRate' : [arrRate],
'arrDist' : [arrDist],
'procRate' : [procRate],
'procDist' : [procDist],
'alpha' : [alpha],
'lower' : [lower],
'upper' : [upper],
'percErrorMin' : [percErrorMin],
'percErrorMax' : [percErrorMax],
'simLength' : [simLength],
'avgNumJobs' : [MachineClass.AvgNumJobs]
})
params.to_sql(name='parameters', con=conn, if_exists='append')
print (params)
def printParams(self, numServers, load, arrDist, procRate, procDist, percErrorMin, percErrorMax, simLength):
self.writeToConsole("--------------------------------------------------------------------------------")
self.writeToConsole("PARAMETERS:")
self.writeToConsole("Number of Servers = %s"%numServers)
self.writeToConsole("Load = %.4f"%load)
#self.writeToConsole("Arrival Rate = %.4f"%arrRate)
self.writeToConsole("Arrival Distribution = %s"%arrDist)
self.writeToConsole("Processing Rate = %.4f, Processing Distribution = %s"%(procRate, str(procDist)))
self.writeToConsole("% Error = " + u"\u00B1" + " %.4f, %.4f"%(percErrorMin, percErrorMax))
self.writeToConsole("Simulation Length = %.4f\n\n"%simLength)
def calcVariance(self, List, avg):
var = 0
for i in List:
var += (avg - i)**2
return var/len(List)
def plotNumJobsInSys(self):
py.sign_in('mailacrs','wowbsbc0qo')
trace0 = Scatter(x=NumJobsTime, y=NumJobs)
data = [trace0]
layout = go.Layout(
title='Number of Jobs Over Time',
xaxis=dict(
title='Time',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='Number of Jobs',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
unique_url = py.plot(fig, filename = 'SRPT_NumJobsInSys')
def plotAvgNumJobsInSys(self):
py.sign_in('mailacrs','wowbsbc0qo')
trace0 = Scatter(x=NumJobsTime, y=AvgNumJobs)
data = [trace0]
layout = go.Layout(
title='Average Number of Jobs Over Time',
xaxis=dict(
title='Time',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
),
yaxis=dict(
title='Number of Jobs',
titlefont=dict(
family='Courier New, monospace',
size=18,
color='#7f7f7f'
)
)
)
fig = go.Figure(data=data, layout=layout)
unique_url = py.plot(fig, filename = 'SRPT_AvgNumJobsInSys')
def stopSimulation(self, event):
MachineClass.StopSim = True
def submit(self, event):
self.updateStatusBar("Simulating...")
#self.clearSavedJobs()
#self.clearSavedArrivals()
#self.clearSavedNumJobs()
I = Input(self)
# Set global variable for num servers to value inputed
global NUM_SERVERS
NUM_SERVERS = I.valuesList[0]
self.printParams(I.valuesList[0], #num Servers
I.valuesList[1], #load
#I.valuesList[2], # arrival rate
'Exponential', #arrival
I.valuesList[3], I.distList[1], #processing rate
I.valuesList[4], #error min
I.valuesList[5], #error max
I.valuesList[6]) #sim time
main.timesClicked = 0
# Start process
MC = MachineClass(self)
MC.run( #I.valuesList[0], #num Servers
I.valuesList[1], #load
#I.valuesList[2], # arrival rate
'Exponential', # arrival
I.valuesList[3], I.distList[1], # processing
I.valuesList[4], # error min
I.valuesList[5], # error max
I.valuesList[6]) # sim time
self.saveParams(I.valuesList[0], #num Servers
I.valuesList[1], #load
'?', # arrival rate
'Exponential', # arrival dist
'?', I.distList[1], # processing
I.valuesList[4], # error min
I.valuesList[5], # error max
I.valuesList[6], # sim time
JobClass.BPArray[0], # alpha
JobClass.BPArray[1], # lower
JobClass.BPArray[2]) # upper
self.plotNumJobsInSys()
self.plotAvgNumJobsInSys()
self.updateStatusBar("Simulation complete.")
#----------------------------------------------------------------------#
# Class: Input
#
# This class is used as a graphical user interface for a larger
# application.
#
#----------------------------------------------------------------------#
class Input(LabelFrame):
def __init__(self, master):
LabelFrame.__init__(self, master, text = "Input")
self.master = master
self.numServersInput = IntVar()
self.loadInput = DoubleVar()
self.arrivalRateInput = DoubleVar()
self.processingRateInput = DoubleVar()
self.percentErrorMinInput = DoubleVar()
self.percentErrorMaxInput = DoubleVar()
self.simLengthInput = DoubleVar()
self.errorMessage = StringVar()
self.comboboxVal = StringVar()
self.numServersInput.set(2) ##################################CHANGE LATER
self.loadDefault = 0.70 ##################################CHANGE LATER
self.arrRateDefault = 0.8 ##################################CHANGE LATER
self.procRateDefault = 0.5 ##################################CHANGE LATER
self.loadInput.set(self.loadDefault)
#self.arrivalRateInput.set(self.arrRateDefault)
self.processingRateInput.set(self.procRateDefault)
self.percentErrorMinInput.set(-50)
self.percentErrorMaxInput.set(0)
self.simLengthInput.set(5000000.0)
self.grid_columnconfigure(0, weight=2)
self.grid_columnconfigure(1, weight=2)
self.grid_columnconfigure(2, weight=1)
self.grid_columnconfigure(3, weight=1)
self.grid_columnconfigure(4, weight=1)
self.grid_columnconfigure(5, weight=2)
self.grid_rowconfigure(0, weight=1)
# Labels
labels = ['Number of Servers', 'System Load', 'Interarrival Rate (' + u'\u03bb' + ')', 'Processing Rate (' + u'\u03bc' + ')', '% Error' , 'Simulation Length']
r=0
c=0
for elem in labels:
Label(self, text=elem).grid(row=r, column=c)
r=r+1
Label(self, textvariable=self.errorMessage, fg="red", font=14).grid(row=6, columnspan=4) #error message, invalid input
Label(self, text="Min").grid(row=4, column=1, sticky = E)
Label(self, text="Max").grid(row=4, column=3, sticky = W)
# Entry Boxes
self.numServersEntry = Entry(self, textvariable = self.numServersInput)
self.loadEntry = Entry(self, textvariable = self.loadInput)
self.arrivalRateEntry = Entry(self, textvariable = self.arrivalRateInput)
self.procRateEntry = Entry(self, textvariable = self.processingRateInput)
self.minErrorEntry = Entry(self, textvariable = self.percentErrorMinInput, width = 5)
self.maxErrorEntry = Entry(self, textvariable = self.percentErrorMaxInput, width = 5)
self.simLengthEntry = Entry(self, textvariable = self.simLengthInput)
self.numServersEntry.grid(row = 0, column = 1, columnspan = 4)
self.loadEntry.grid(row = 1, column = 1, columnspan = 4)
self.arrivalRateEntry.grid(row = 2, column = 1, columnspan = 4)
self.procRateEntry.grid(row = 3, column = 1, columnspan = 4)
self.minErrorEntry.grid(row = 4, column = 2, sticky = E)
self.maxErrorEntry.grid(row = 4, column = 4, sticky = W)
self.simLengthEntry.grid(row = 5, column = 1, columnspan = 4)
self.loadInput.trace('w', self.entryBoxChange)
self.arrivalRateInput.trace('w', self.entryBoxChange)
self.refreshLoad()
# Distribution Dropdowns
self.distributions = ('Select Distribution', 'Poisson', 'Exponential', 'Uniform', 'Bounded Pareto', 'Custom')
self.ArrivalDistComboBox = ttk.Combobox(self, values = self.distributions, state = 'disabled')
self.ArrivalDistComboBox.current(2) # set selection
self.ArrivalDistComboBox.grid(row = 1, column = 5)
self.ProcessDistComboBox = ttk.Combobox(self, textvariable = self.comboboxVal, values = self.distributions, state = 'readonly')
self.ProcessDistComboBox.current(4) # set default selection #####################CHANGE LATER
self.ProcessDistComboBox.grid(row = 2, column = 5)
self.comboboxVal.trace("w", self.selectionChange) # refresh on change
self.refreshComboboxes()
# Simulate Button
self.simulateButton = Button(self, text = "SIMULATE", command = self.onButtonClick)
self.simulateButton.grid(row = 7, columnspan = 6)
def entryBoxChange(self, name, index, mode):
self.refreshLoad()
def refreshLoad(self):
if len(self.loadEntry.get()) > 0:
self.arrivalRateEntry.delete(0, 'end')
self.arrivalRateEntry.configure(state = 'disabled')
else:
self.arrivalRateEntry.configure(state = 'normal')
if len(self.arrivalRateEntry.get()) > 0:
self.loadEntry.delete(0, 'end')
self.loadEntry.configure(state = 'disabled')
else:
self.loadEntry.configure(state = 'normal')
def selectionChange(self, name, index, mode):
self.refreshComboboxes()
def refreshComboboxes(self):
selection = self.ProcessDistComboBox.get()
if selection == 'Bounded Pareto':
#self.procRateEntry.delete(0, 'end')
self.procRateEntry.configure(state = 'disabled')
else:
self.procRateEntry.configure(state = 'normal')
#self.processingRateInput.set(self.procRateDefault)
def onButtonClick(self):
if (self.getNumericValues() == 0) and (self.getDropDownValues() == 0):
# Send to submit button in main
self.simulateButton.event_generate("<<input_simulate>>")
def getNumericValues(self):
try:
numberOfServers = self.numServersInput.get()
load = self.loadInput.get()
#arrivalRate = self.arrivalRateInput.get()
processingRate = self.processingRateInput.get()
percentErrorMin = self.percentErrorMinInput.get()
percentErrorMax = self.percentErrorMaxInput.get()
maxSimLength = self.simLengthInput.get()
except ValueError:
self.errorMessage.set("One of your inputs is an incorrect type, try again.")
return 1
#try:
# arrRate = float(self.arrivalRateInput.get())
#except ValueError:
# arrRate = 0.0
#try:
# procRate = float(self.processingRateInput.get())
#except ValueError:
# procRate = 0.0
if load <= 0.0:
self.errorMessage.set("System load must be a non-zero value!")
return 1
#if arrRate <= 0.0:
# self.errorMessage.set("Arrival rate must be a non-zero value!")
# return 1
#if procRate != None and processingRate <= 0.0:
# self.errorMessage.set("Processing rate must be a non-zero value!")
# return 1
if maxSimLength <= 0.0:
self.errorMessage.set("Simulation length must be a non-zero value!")
return 1
else:
self.errorMessage.set("")
Input.valuesList = [numberOfServers, load, 0.0, processingRate, percentErrorMin, percentErrorMax, maxSimLength]
return 0
def getDropDownValues(self):
comboBox1Value = self.ArrivalDistComboBox.get()
comboBox2Value = self.ProcessDistComboBox.get()
if comboBox2Value == 'Select Distribution':
self.errorMessage.set("You must select a distribution for the processing rate")
return 1
else:
self.errorMessage.set("")
Input.distList = [comboBox1Value, comboBox2Value]
return 0
#----------------------------------------------------------------------#
# Class: Output
#
# This class is used as a graphical user interface for a larger
# application.
#
#----------------------------------------------------------------------#
class Output(LabelFrame):
def __init__(self, master):
LabelFrame.__init__(self, master, text = "Output")
self.grid_columnconfigure(0, weight=1)
self.grid_rowconfigure(0, weight=1)
buttonFrame = Frame(self)
buttonFrame.pack(side=BOTTOM, padx=5, pady=5)
# Clear Button
self.clearButton = Button(buttonFrame, text = "CLEAR DATA", command = self.onClearButtonClick)
self.clearButton.grid(row = 2, column = 0)
# Save Button
self.saveButton = Button(buttonFrame, text = "SAVE DATA", command = self.onSaveButtonClick)
self.saveButton.grid(row=2, column=1)
# Stop Button
self.stopButton = Button(buttonFrame, text = "STOP SIMULATION", command = self.onStopButtonClick)
self.stopButton.grid(row = 2, column = 2)
def onClearButtonClick(self):
# Clear console
self.clearButton.event_generate("<<output_clear>>")
def onSaveButtonClick(self):
# Save data
self.saveButton.event_generate("<<output_save>>")
def onStopButtonClick(self):
# Stop simulation
self.stopButton.event_generate("<<stop_sim>>")
#----------------------------------------------------------------------#
# Class: CustomDist
#
# This class is used to allow users to enter a custom distribution.
#
#----------------------------------------------------------------------#
class CustomDist(object):
def __init__(self, master):
top = self.top = Toplevel(master)
top.geometry("500x200") # set window size
top.resizable(0,0)
self.function = StringVar()
# Label frame
frame1 = Frame(top)
frame1.pack(side=TOP, padx=5, pady=5)
self.l=Label(frame1, text="Please enter the functional inverse of the distribution of your choice. \nExponential distribution is provided as an example. \nNote: x " + u"\u2265" + " 0", font=("Helvetica", 12), justify=LEFT)
self.l.pack()
# Button frame
frame2 = Frame(top)
frame2.pack(side=TOP, padx=5, pady=5)
self.mu=Button(frame2, text=u'\u03bc', command=self.insertMu)
self.mu.pack(side=LEFT)
self.x=Button(frame2, text="x", command=self.insertX)
self.x.pack(side=LEFT)
self.ln=Button(frame2, text="ln", command=self.insertLn)
self.ln.pack(side=LEFT)
# Input frame
frame3 = Frame(top)
frame3.pack(side=TOP, padx=5, pady=5)
self.e = Entry(frame3, textvariable = self.function)
self.e.insert(0, "-ln(1 - x)/" + u'\u03bc')
self.e.pack(fill="both", expand=True)
frame4 = Frame(top)
frame4.pack(side=TOP, pady=10)
self.b=Button(frame4,text='Ok',command=self.cleanup)
self.b.pack()
def cleanup(self):
self.stringEquation=self.convertFunction()
self.top.destroy()
def insertMu(self):
self.e.insert(END, u'\u03bc')
def insertX(self):
self.e.insert(END, "x")
def insertLn(self):
self.e.insert(END, "ln")
def convertFunction(self):
self.stringList = list(self.e.get())
for i in range(len(self.stringList)):
if self.stringList[i] == u'\u03bc':
self.stringList[i] = "procRate"
elif self.stringList[i] == "x":
self.stringList[i] = "random.uniform(0.0, 1.0)"
elif self.stringList[i] == "l" and self.stringList[i+1] == "n":
self.stringList[i] = "log"
self.stringList[i+1] = ""
print ("".join(self.stringList))
return "".join(self.stringList)
#----------------------------------------------------------------------#
# Class: BoundedParetoDist
#
# This class is used to allow users to enter parameters to
# Bounded Pareto distribution.
#
#----------------------------------------------------------------------#
class BoundedParetoDist(object):
Array = []
def __init__(self, master):
top = self.top = Toplevel(master)
top.geometry("500x200") # set window size
top.resizable(0,0)
self.errorMessage = StringVar()
self.alpha = DoubleVar()
self.L = DoubleVar()
self.U = DoubleVar()
# Set default parameters
self.alpha.set(1.5)
self.L.set(1)
self.U.set(10**(6))
# Label frame
frame1 = Frame(top)
frame1.pack(side=TOP, padx=5, pady=5)
self.l=Label(frame1, text="Please enter the parameters you would like.", font=("Helvetica", 12), justify=LEFT)
self.l.pack()
self.error = Label(frame1, textvariable=self.errorMessage, fg="red", font=14)
self.error.pack()
# Input frame
frame2 = Frame(top)
frame2.pack(side=TOP, padx=5, pady=5)
frame2.grid_columnconfigure(0, weight=1)
frame2.grid_rowconfigure(0, weight=1)
self.l1 = Label(frame2, text = "alpha (shape)")
self.l2 = Label(frame2, text = "L (smallest job size)")
self.l3 = Label(frame2, text = "U (largest job size)")
self.l1.grid(row = 0, column = 0)
self.l2.grid(row = 1, column = 0)
self.l3.grid(row = 2, column = 0)
self.e1 = Entry(frame2, textvariable = self.alpha)
self.e2 = Entry(frame2, textvariable = self.L)
self.e3 = Entry(frame2, textvariable = self.U)
self.e1.grid(row = 0, column = 1)
self.e2.grid(row = 1, column = 1)
self.e3.grid(row = 2, column = 1)
frame3 = Frame(top)
frame3.pack(side=TOP, pady=10)
self.b=Button(frame3,text='Ok',command=self.cleanup)
self.b.pack()
def cleanup(self):
if(self.checkParams() == 0):
self.paramArray=BoundedParetoDist.Array
self.top.destroy()
def checkParams(self):
self.a = float(self.e1.get())
self.l = float(self.e2.get())
self.u = float(self.e3.get())
if (self.a <= 0) or (self.u < self.l) or (self.l <= 0):
print ("ERROR: Bounded pareto paramater error")
self.errorMessage.set("Bounded pareto paramater error")
return 1
else:
self.errorMessage.set("")
BoundedParetoDist.Array = [self.a, self.l, self.u]
return 0
#----------------------------------------------------------------------#
# Class: Node
#
# This class is used to define the linked list nodes.
#
#----------------------------------------------------------------------#
class Node():
def __init__(self, job, nextNode = None):
self.job = job
self.nextNode = nextNode
#----------------------------------------------------------------------#
# Class: LinkedList
#
# This class is used to make the linked list data structure used to
# store jobs.
#
#----------------------------------------------------------------------#
class LinkedList(object):
Size = 0
def __init__(self, head = None):
self.head = head
# Insert job into queue (sorted by ERPT)
def insert(self, job):
current = self.head # node iterator, starts at head
previous = None
if (current == None): # if queue is empty, set current job as head
self.head = Node(job, None)
else:
while (current != None) and (job.ERPT > current.job.ERPT):
previous = current # prev = node[i]
current = current.nextNode # current = node[i+1]
# Insert new node after previous before current
if (previous == None):
self.head = Node(job, current)
else:
previous.nextNode = Node(job, current)
LinkedList.Size += 1
# Remove first item in queue
def removeHead(self):
if (LinkedList.Size > 0):
self.head = self.head.nextNode # move head forward one node
LinkedList.Size -= 1
else:
print ("ERROR: The linked list is already empty!!")
def clear(self):
self.head = None
def printList(self):
current = self.head
while (current != None):
print (current.job.name, current.job.ERPT)
current = current.nextNode
#----------------------------------------------------------------------#
# Class: JobClass
#
# This class is used to define jobs.
#
# Attributes: arrival time, processing time, remaining processing
# time, estimated remaining processing time, percent error
#----------------------------------------------------------------------#
class JobClass(object):
BPArray = []
def __init__(self, master):
self.master = master
self.arrivalTime = 0
self.completionTime = 0
self.procTime = 0
self.RPT = 0 # Real Remaining Processing Time
self.ERPT = 0 # Estimated Remaining Processing Time
self.percentError = 0
self.processRate = 0
self.arrivalRate = 0
#JobClass.BPArray = []
def setArrProcRates(self, load, procRate, procDist):
if procDist == 'Bounded Pareto':
alpha = JobClass.BPArray[0]
L = JobClass.BPArray[1]
U = JobClass.BPArray[2]
if alpha > 1 and L > 0:
procMean = (L**alpha/(1 - (L/U)**alpha))*(alpha/(alpha - 1))*((1/(L**(alpha - 1)))-(1/(U**(alpha - 1))))
self.processRate = 1/float(procMean)
else:
self.processRate = procRate
self.arrivalRate = float(load) * self.processRate
# Dictionary of service distributions
def setServiceDist(self, procRate, procDist):
ServiceDistributions = {
'Poisson': random.expovariate(1.0/procRate),
'Exponential': random.expovariate(procRate),
'Uniform': random.uniform(0.0, procRate),
'Bounded Pareto': self.setBoundedPareto,
'Custom': self.setCustomDist
}
if(procDist == 'Custom'):
return ServiceDistributions[procDist](procRate)
elif(procDist == 'Bounded Pareto'):
return ServiceDistributions[procDist]()
else:
return ServiceDistributions[procDist]
def setCustomDist(self, procRate):
if main.timesClicked == 0:
main.timesClicked += 1
self.popup = CustomDist(self.master)
self.master.wait_window(self.popup.top)
main.customEquation = self.popup.stringEquation
return eval(main.customEquation)
def setBoundedPareto(self):
# Get and set parameters (in job class array)
if main.timesClicked == 0:
main.timesClicked += 1
self.popup = BoundedParetoDist(self.master)
self.master.wait_window(self.popup.top)
self.alpha = float(self.popup.paramArray[0]) # Shape, power of tail, alpha = 2 is approx Expon., alpha = 1 gives higher variance
self.L = float(self.popup.paramArray[1]) # Smallest job size
self.U = float(self.popup.paramArray[2]) # Largest job size
JobClass.BPArray = [self.alpha, self.L, self.U]
x = random.uniform(0.0, 1.0)
# reassigning
alpha = JobClass.BPArray[0]
L = JobClass.BPArray[1]
U = JobClass.BPArray[2]
paretoNumerator = float(-(x*(U**alpha) - x*(L**alpha) - (U**alpha)))
paretoDenominator = float((U**alpha) * (L**alpha))
main.customEquation = (paretoNumerator/paretoDenominator)**(-1/alpha)
return main.customEquation
# Generates a percent error for processing time
def generateError(self, percErrorMin, percErrorMax):
self.percentError = random.uniform(percErrorMin, percErrorMax)
return self.percentError
# Sets all processing times for job
def setJobAttributes(self, load, procRate, procDist, percErrorMin, percErrorMax):
if(procDist == 'Bounded Pareto'):
self.procTime = self.setServiceDist(procRate, procDist) #use updated proc rate
self.setArrProcRates(load, procRate, procDist)
else:
self.setArrProcRates(load, procRate, procDist)
self.procTime = self.setServiceDist(procRate, procDist) #use updated proc rate
self.estimatedProcTime = (1 + (self.generateError(percErrorMin, percErrorMax)/100.0))*self.procTime
self.RPT = self.procTime
self.ERPT = self.estimatedProcTime
self.arrivalTime = MachineClass.CurrentTime
#----------------------------------------------------------------------#
# Class: MachineClass
#
# This class is used to generate Jobs at random and process them.
#
# Entities: jobs, server
# Events: job arrives, job completes
# Activities: processing job, waiting for new job
#
#----------------------------------------------------------------------#
class MachineClass(object):
Queue = LinkedList()
JobOrderOut = []
CurrentTime = 0.0
TimeUntilArrival = 0.0
AvgNumJobs = 0
PrevTime = 0
PrevNumJobs = 0
StopSim = False
ServiceStartTimes = [None] * NUM_SERVERS # Start times of job in each server
ProcessingJobs = [None] * NUM_SERVERS # Array of current job in each server
ServersBusy = [False] * NUM_SERVERS # Array of whether each server is busy
def __init__(self, master):
self.master = master
MachineClass.Queue.clear()
LinkedList.Size = 0
MachineClass.CurrentTime = 0.0
MachineClass.TimeUntilArrival = 0.0
MachineClass.StopSim = False
MachineClass.ServiceStartTimes = [None] * NUM_SERVERS
MachineClass.ProcessingJobs = [None] * NUM_SERVERS
MachineClass.ServersBusy = [False] * NUM_SERVERS
MachineClass.AvgNumJobs = 0
MachineClass.PrevTime = 0
MachineClass.PrevNumJobs = 0
NumJobs[:] = []
AvgNumJobs[:] = []
NumJobsTime[:] = []
self.ctr = 0
# Dictionary of arrival distributions
def setArrivalDist(self, arrRate, arrDist):
ArrivalDistributions = {
'Poisson': random.expovariate(1.0/arrRate),
'Exponential': random.expovariate(arrRate)
#'Normal': Rnd.normalvariate(self.inputInstance.valuesList[0])
#'Custom':
}
return ArrivalDistributions[arrDist]
#update data
def updateJobs(self):
for serverID in range(NUM_SERVERS):
if(MachineClass.ProcessingJobs[serverID] != None):
serviceTime = MachineClass.CurrentTime - MachineClass.ServiceStartTimes[serverID]
MachineClass.ProcessingJobs[serverID].RPT -= serviceTime
MachineClass.ProcessingJobs[serverID].ERPT -= serviceTime
MachineClass.ServiceStartTimes[serverID] = MachineClass.CurrentTime
def calcNumJobs(self, jobID, load):
self.currentNumJobs = 0
# First add all jobs that are currently being processed
for serverID in range(NUM_SERVERS):
if(MachineClass.ServersBusy[serverID] == True):
self.currentNumJobs += 1
# Secondly, add all jobs that are waiting in queue
self.currentNumJobs += MachineClass.Queue.Size
#changeInJobs = MachineClass.PrevNumJobs - self.currentNumJobs
self.t = MachineClass.CurrentTime
self.delta_t = self.t - MachineClass.PrevTime
# If one job in system
if(jobID == 0):
MachineClass.AvgNumJobs = 1 # First event is always create new job
# UPDATE
else:
MachineClass.AvgNumJobs = (MachineClass.PrevTime/(self.t))*float(MachineClass.AvgNumJobs) + float(MachineClass.PrevNumJobs)*(float(self.delta_t)/self.t)
# PrevTime becomes "old" t
MachineClass.PrevTime = self.t
# PrevNum jobs becomes current num jobs
MachineClass.PrevNumJobs = self.currentNumJobs
#GUI.writeToConsole(self.master, "%.6f | %.6f average num jobs %s"%(MachineClass.CurrentTime, self.t, MachineClass.AvgNumJobs))
NumJobs.append(self.currentNumJobs) # y axis of plot
AvgNumJobs.append(MachineClass.AvgNumJobs) # y axis of plot
NumJobsTime.append(MachineClass.CurrentTime) # x axis of plot
self.saveNumJobs(load, MachineClass.CurrentTime, self.currentNumJobs)
self.saveAvgNumJobs(load, MachineClass.CurrentTime, MachineClass.AvgNumJobs)
def saveNumJobs(self, load, numJobs, time):
text = "%f,%f"%(numJobs, time) + "\n"
scaledLoad = int(load * 100)
path = "./MULTI_SERVER_RESULTS/SRPT/SRPT_Num_load=%s_alpha=%s_servers=%s.txt"%(scaledLoad, JobClass.BPArray[0], NUM_SERVERS)
with open(path, "a") as myFile:
myFile.write(text)
myFile.close()
def saveAvgNumJobs(self, load, avgNumJobs, time):
text = "%f,%f"%(avgNumJobs, time) + "\n"
scaledLoad = int(load * 100)
path = "./MULTI_SERVER_RESULTS/SRPT/SRPT_Avg_load=%s_alpha=%s_servers=%s.txt"%(scaledLoad, JobClass.BPArray[0], NUM_SERVERS)
with open(path, "a") as myFile:
myFile.write(text)
myFile.close()
# Job arriving
def arrivalEvent(self, load, arrDist, procRate, procDist, percErrorMin, percErrorMax):
J = JobClass(self.master)
J.setJobAttributes(load, procRate, procDist, percErrorMin, percErrorMax)
J.name = "Job%02d"%self.ctr
self.calcNumJobs(self.ctr, load)
GUI.writeToConsole(self.master, "%.6f | %s arrived, ERPT = %.5f"%(MachineClass.CurrentTime, J.name, J.ERPT))
self.updateJobs() # update all processing jobs
MachineClass.Queue.insert(J) # add job to queue
# Find longest RPT of all processing jobs, preempt longest processing job
try:
maxERPT = max(element.ERPT for element in MachineClass.ProcessingJobs if element is not None)
l = [x for x in MachineClass.ProcessingJobs if (x is not None and x.ERPT == maxERPT)]
maxProcJob = l[0]
# Preempt largest job processing if all servers busy
if (maxERPT > J.ERPT)and(all(element == True for element in MachineClass.ServersBusy)):
#GUI.writeToConsole(self.master, "%.6f | %s preempting %s"%(MachineClass.CurrentTime, J.name, maxProcJob.name))
GUI.writeToConsole(self.master, "----------- | %s preempting %s"%(J.name, maxProcJob.name))
#Remove maxProcJob from server
serverID = MachineClass.ProcessingJobs.index(maxProcJob)
MachineClass.ServersBusy[serverID] = False
MachineClass.ProcessingJobs[serverID] = None
MachineClass.ServiceStartTimes[serverID] = None
#add back to queue
MachineClass.Queue.insert(maxProcJob) # add job to queue
#GUI.writeToConsole(self.master, "%.6f | %s added back to queue, ERPT = %.5f"%(MachineClass.CurrentTime, maxProcJob.name, maxProcJob.ERPT))
GUI.writeToConsole(self.master, "----------- | %s added back to queue, ERPT = %.5f"%(maxProcJob.name, maxProcJob.ERPT))
except ValueError:
maxERPT = 10^100
maxProcJob = None
self.processJobs() # process first job in queue
# Generate next arrival
MachineClass.TimeUntilArrival = self.setArrivalDist(J.arrivalRate, arrDist)
self.ctr += 1
# Processing first job in queue
def processJobs(self):
for serverID in range(NUM_SERVERS):
#Server not busy and queue is not empty
if (MachineClass.ServersBusy[serverID] == False) and (MachineClass.Queue.Size > 0):
currentJob = MachineClass.Queue.head.job
MachineClass.ServiceStartTimes[serverID] = MachineClass.CurrentTime
MachineClass.ProcessingJobs[serverID] = currentJob
MachineClass.ServersBusy[serverID] = True
#GUI.writeToConsole(self.master, "%.6f | %s processing on server %s"%(MachineClass.CurrentTime, currentJob.name, index))
GUI.writeToConsole(self.master, "----------- | %s processing on server %s, ERPT=%s"%(currentJob.name, serverID, currentJob.ERPT))
MachineClass.Queue.removeHead() # remove first job from queue
# Job completed
def completionEvent(self, completingJob, load, percErrorMin, percErrorMax):
completingJob.completionTime = MachineClass.CurrentTime
self.calcNumJobs(self.ctr, load)
# Server no longer busy
serverID = MachineClass.ProcessingJobs.index(completingJob)
MachineClass.ServersBusy[serverID] = False
MachineClass.ProcessingJobs[serverID] = None
MachineClass.ServiceStartTimes[serverID] = None
GUI.writeToConsole(self.master, "%.6f | %s COMPLTED at server %s"%(MachineClass.CurrentTime, completingJob.name, serverID))
#Update other processing jobs (in case next event should be completion)
self.updateJobs()
#If there is a job waiting for this server, process it
if(MachineClass.Queue.Size > 0):
self.processJobs()
def run(self, load, arrDist, procRate, procDist, percErrorMin, percErrorMax, simLength):
counter = 1;
while 1:
# Generate time of first job arrival
if(self.ctr == 0):
arrRate = float(load) / procRate
MachineClass.TimeUntilArrival = self.setArrivalDist(arrRate, arrDist) # generate next arrival
# Find shortest RPT of all processing jobs
try:
minRPT = min(element.RPT for element in MachineClass.ProcessingJobs if element is not None)
l = [x for x in MachineClass.ProcessingJobs if (x is not None and x.RPT == minRPT)]
minProcJob = l[0]
except ValueError:
minRPT = -1
minProcJob = None
# If all servers are idle, or next arrival is before completion of shortest job processing next event is ARRIVAL
if (all(element == False for element in MachineClass.ServersBusy)) or (MachineClass.TimeUntilArrival < minRPT):
MachineClass.CurrentTime += MachineClass.TimeUntilArrival
self.arrivalEvent(load, arrDist, procRate, procDist, percErrorMin, percErrorMax)
#next event is job finishing (job with shortest RPT)
else:
completingJob = minProcJob
MachineClass.CurrentTime += completingJob.RPT
self.completionEvent(completingJob, load, percErrorMin, percErrorMax)
# If current time is greater than the simulation length, end program
if (MachineClass.CurrentTime > simLength) or (MachineClass.StopSim == True):
break
#----------------------------------------------------------------------#
def main():
window = GUI(None) # instantiate the class with no parent (None)
window.title('Multi-Server SRPT with Errors') # title the window
# Global variables used in JobClass
main.timesClicked = 0
main.customEquation = ""
#window.geometry("500x600") # set window size
window.mainloop() # loop indefinitely, wait for events
if __name__ == '__main__': main()
|
{
"content_hash": "6e51bbd83d0ca67642928885c9077fff",
"timestamp": "",
"source": "github",
"line_count": 1050,
"max_line_length": 290,
"avg_line_length": 34.404761904761905,
"alnum_prop": 0.6647474048442906,
"repo_name": "rsmailach/MultiServerSRPT",
"id": "5fbe2bc240593de606437af8d5d147d7b0881136",
"size": "36544",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SRPTE_Multi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "257147"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.