text
stringlengths 4
1.02M
| meta
dict |
|---|---|
'''
This is a very simple example referenced in the beginner's tutorial:
https://enigmampc.github.io/catalyst/beginner-tutorial.html
Run this example, by executing the following from your terminal:
catalyst ingest-exchange -x bitfinex -f daily -i btc_usdt
catalyst run -f buy_btc_simple.py -x bitfinex --start 2016-1-1 \
--end 2017-9-30 -o buy_btc_simple_out.pickle
If you want to run this code using another exchange, make sure that
the asset is available on that exchange. For example, if you were to run
it for exchange Poloniex, you would need to edit the following line:
context.asset = symbol('btc_usdt') # note 'usdt' instead of 'usd'
and specify exchange poloniex as follows:
catalyst ingest-exchange -x poloniex -f daily -i btc_usdt
catalyst run -f buy_btc_simple.py -x poloniex --start 2016-1-1 \
--end 2017-9-30 -o buy_btc_simple_out.pickle
To see which assets are available on each exchange, visit:
https://www.enigma.co/catalyst/status
'''
from catalyst import run_algorithm
from catalyst.api import order_target, record, symbol, order
import pandas as pd
def initialize(context):
# context.asset = symbol('btc_usdt')
context.asset = symbol('xrp_btc')
context.i = 0
# context.set_commission(maker=0.4,taker=0.3)
# def handle_data(context, data):
# context.i += 1
# if context.i == 1:
# order_target(context.asset, 3, limit_price=0.00231)
# if context.i == 2:
# order(context.asset, -1, limit_price=0.0023145)
# order(context.asset, -1, limit_price=0.0023146)
# order(context.asset, 3, limit_price=0.0023146)
# record(btc=data.current(context.asset, 'price'))
def handle_data(context, data):
if not context.blotter.open_orders:
if(context.portfolio.positions and
context.portfolio.positions[context.asset].amount >= 2):
order(context.asset, -2, limit_price=(
data.current(context.asset, 'price') - 0.00000002))
else:
order_target(context.asset, 3, limit_price=(
data.current(context.asset, 'price') + 0.00000002))
record(btc=data.current(context.asset, 'price'))
# def handle_data(context, data):
# context.i += 1
# if context.i % 2 == 1:# if not context.blotter.open_orders:
# order_target(
# context.asset,
# 1,
# limit_price=data.current(context.asset, 'price'))
# record(btc=data.current(context.asset, 'price'))
if __name__ == '__main__':
live = True
if live:
run_algorithm(
capital_base=0.02,
data_frequency='daily',
initialize=initialize,
handle_data=handle_data,
exchange_name='poloniex',
algo_namespace='buy_btc_simple',
quote_currency='btc',
live=True,
simulate_orders=False,
# start=pd.to_datetime('2018-05-01 17:18', utc=True),
# end=pd.to_datetime('2018-05-14 08:28', utc=True),
)
else:
run_algorithm(
capital_base=100000,
data_frequency='daily',
initialize=initialize,
handle_data=handle_data,
exchange_name='poloniex',
algo_namespace='buy_btc_simple',
quote_currency='usdt',
start=pd.to_datetime('2016-01-01', utc=True),
end=pd.to_datetime('2016-01-03', utc=True),
)
|
{
"content_hash": "8410e6623fb11986a54ae6512dcdc92f",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 77,
"avg_line_length": 36.302083333333336,
"alnum_prop": 0.612051649928264,
"repo_name": "enigmampc/catalyst",
"id": "5cd6a1a5d20695f4717d0d5528f971c46e5b7456",
"size": "3485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catalyst/support/buy_and_sell_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7014"
},
{
"name": "Dockerfile",
"bytes": "2510"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Jupyter Notebook",
"bytes": "229701"
},
{
"name": "PowerShell",
"bytes": "3269"
},
{
"name": "Python",
"bytes": "4279642"
},
{
"name": "Shell",
"bytes": "7469"
}
],
"symlink_target": ""
}
|
"""Config flow for Spotify."""
from __future__ import annotations
from collections.abc import Mapping
import logging
from typing import Any
from spotipy import Spotify
from homeassistant.config_entries import ConfigEntry
from homeassistant.data_entry_flow import FlowResult
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN, SPOTIFY_SCOPES
class SpotifyFlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle Spotify OAuth2 authentication."""
DOMAIN = DOMAIN
VERSION = 1
reauth_entry: ConfigEntry | None = None
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> dict[str, Any]:
"""Extra data that needs to be appended to the authorize url."""
return {"scope": ",".join(SPOTIFY_SCOPES)}
async def async_oauth_create_entry(self, data: dict[str, Any]) -> FlowResult:
"""Create an entry for Spotify."""
spotify = Spotify(auth=data["token"]["access_token"])
try:
current_user = await self.hass.async_add_executor_job(spotify.current_user)
except Exception: # pylint: disable=broad-except
return self.async_abort(reason="connection_error")
name = data["id"] = current_user["id"]
if self.reauth_entry and self.reauth_entry.data["id"] != current_user["id"]:
return self.async_abort(reason="reauth_account_mismatch")
if current_user.get("display_name"):
name = current_user["display_name"]
data["name"] = name
await self.async_set_unique_id(current_user["id"])
return self.async_create_entry(title=name, data=data)
async def async_step_reauth(self, entry_data: Mapping[str, Any]) -> FlowResult:
"""Perform reauth upon migration of old entries."""
self.reauth_entry = self.hass.config_entries.async_get_entry(
self.context["entry_id"]
)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: dict[str, Any] | None = None
) -> FlowResult:
"""Confirm reauth dialog."""
if self.reauth_entry is None:
return self.async_abort(reason="reauth_account_mismatch")
if user_input is None and self.reauth_entry:
return self.async_show_form(
step_id="reauth_confirm",
description_placeholders={"account": self.reauth_entry.data["id"]},
errors={},
)
return await self.async_step_pick_implementation(
user_input={"implementation": self.reauth_entry.data["auth_implementation"]}
)
|
{
"content_hash": "94ab435b326897e44d51b35d627bfac4",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 88,
"avg_line_length": 33.795180722891565,
"alnum_prop": 0.6442067736185383,
"repo_name": "nkgilley/home-assistant",
"id": "bbfb92db091306645719397d4b24dafd907775bd",
"size": "2805",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/spotify/config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
"""
The screen to be used when a screen transition happens
"""
from Microsoft.Xna.Framework import Color, Rectangle
from base import BaseScreen
class Transition:
speed = 1.5 # seconds
color = Color.Black
is_finished = False
def __init__(self, to_opaque=True):
self.to_opaque = to_opaque
self.alpha = 0
if not self.to_opaque:
self.alpha = 255
def Update(self, gametime, debug=False):
if self.to_opaque:
self.alpha += gametime.ElapsedGameTime.TotalMilliseconds / (self.speed * 3.3333)
if self.alpha >= 255:
self.is_finished = True
else:
self.alpha -= gametime.ElapsedGameTime.TotalMilliseconds / (self.speed * 3.3333)
if self.alpha <= 0:
self.is_finished = True
if debug:
print(self.alpha)
self.color = Color(self.color, self.alpha)
class TransitionScreen (BaseScreen):
transition1 = Transition()
transition2 = Transition(False)
def __init__(self, transition_from, transition_to, color1=None, color2=None):
self.old_screen = transition_from
self.next_screen = transition_to
self.bgcolor = transition_from.bgcolor
if color1 is None:
color1 = Color.Black
if color2 is None:
color2 = Color.Black
self.transition1.color = color1
self.transition2.color = color2
def OnEnter(self, engine):
BaseScreen.OnEnter(self, engine)
screen = self.engine.GetScreenSize()
self.rect = Rectangle(0, 0, screen.X, screen.Y)
def Update(self, gametime):
if not self.transition1.is_finished:
self.transition1.Update(gametime)
elif not self.transition2.is_finished:
self.transition2.Update(gametime, True)
self.bgcolor = self.next_screen.bgcolor
else:
return self.next_screen
return None
def Draw(self, spritebatch):
transition = None
if not self.transition1.is_finished:
transition = self.transition1
self.old_screen.Draw(spritebatch)
else:
transition = self.transition2
self.next_screen.Draw(spritebatch)
spritebatch.Begin()
spritebatch.Draw(self.engine.DummyTexture,
destinationRectangle=self.rect,
color=self.transition1.color)
spritebatch.End()
|
{
"content_hash": "7a5b82266effa7e7ef518674ec07a936",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 92,
"avg_line_length": 27.263736263736263,
"alnum_prop": 0.6082224909310762,
"repo_name": "Eshumkv/WeaponWizard",
"id": "ea957ac27c5a829946fad530b17ae0bf0b17fcbe",
"size": "2483",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PythonTest/scripts/screens/transition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "128565"
},
{
"name": "Python",
"bytes": "6386"
}
],
"symlink_target": ""
}
|
"""The tab switching measurement.
This measurement opens pages in different tabs. After all the tabs have opened,
it cycles through each tab in sequence, and records a histogram of the time
between when a tab was first requested to be shown, and when it was painted.
Power usage is also measured.
"""
import time
from metrics import histogram_util
from metrics import power
from telemetry.core import util
from telemetry.page import page_measurement
# TODO: Revisit this test once multitab support is finalized.
class TabSwitching(page_measurement.PageMeasurement):
def __init__(self):
super(TabSwitching, self).__init__()
self._first_page_in_pageset = True
self._power_metric = power.PowerMetric()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--enable-stats-collection-bindings'
])
power.PowerMetric.CustomizeBrowserOptions(options)
def DidStartBrowser(self, browser):
self._first_page_in_pageset = True
def TabForPage(self, page, browser):
if self._first_page_in_pageset:
# The initial browser window contains a single tab, navigate that tab
# rather than creating a new one.
self._first_page_in_pageset = False
return browser.tabs[0]
return browser.tabs.New()
def StopBrowserAfterPage(self, browser, page):
# Restart the browser after the last page in the pageset.
return len(browser.tabs) >= len(page.page_set.pages)
def MeasurePage(self, page, tab, results):
"""On the last tab, cycle through each tab that was opened and then record
a single histogram for the tab switching metric."""
if len(tab.browser.tabs) != len(page.page_set.pages):
return
# Measure power usage of tabs after quiescence.
util.WaitFor(tab.HasReachedQuiescence, 60)
self._power_metric.Start(page, tab)
time.sleep(5)
self._power_metric.Stop(page, tab)
self._power_metric.AddResults(tab, results,)
histogram_name = 'MPArch.RWH_TabSwitchPaintDuration'
histogram_type = histogram_util.BROWSER_HISTOGRAM
display_name = 'MPArch_RWH_TabSwitchPaintDuration'
first_histogram = histogram_util.GetHistogram(
histogram_type, histogram_name, tab)
prev_histogram = first_histogram
for i in xrange(len(tab.browser.tabs)):
t = tab.browser.tabs[i]
t.Activate()
def _IsDone():
cur_histogram = histogram_util.GetHistogram(
histogram_type, histogram_name, tab)
diff_histogram = histogram_util.SubtractHistogram(
cur_histogram, prev_histogram)
return diff_histogram
util.WaitFor(_IsDone, 30)
prev_histogram = histogram_util.GetHistogram(
histogram_type, histogram_name, tab)
last_histogram = histogram_util.GetHistogram(
histogram_type, histogram_name, tab)
diff_histogram = histogram_util.SubtractHistogram(last_histogram,
first_histogram)
results.AddSummary(display_name, '', diff_histogram,
data_type='unimportant-histogram')
|
{
"content_hash": "69e7738fec64838260779b81cc4ad486",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 79,
"avg_line_length": 35.127906976744185,
"alnum_prop": 0.7096987752399868,
"repo_name": "anirudhSK/chromium",
"id": "6a78493b725c7369256645c2a2a99de9b6fbdf58",
"size": "3184",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/perf/measurements/tab_switching.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "853"
},
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "52960"
},
{
"name": "Awk",
"bytes": "8660"
},
{
"name": "C",
"bytes": "42502191"
},
{
"name": "C#",
"bytes": "1132"
},
{
"name": "C++",
"bytes": "201859263"
},
{
"name": "CSS",
"bytes": "946557"
},
{
"name": "DOT",
"bytes": "2984"
},
{
"name": "Java",
"bytes": "5687122"
},
{
"name": "JavaScript",
"bytes": "22163714"
},
{
"name": "M",
"bytes": "2190"
},
{
"name": "Matlab",
"bytes": "2496"
},
{
"name": "Objective-C",
"bytes": "7670589"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "Perl",
"bytes": "672770"
},
{
"name": "Python",
"bytes": "10873885"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Shell",
"bytes": "1315894"
},
{
"name": "Tcl",
"bytes": "277091"
},
{
"name": "TypeScript",
"bytes": "1560024"
},
{
"name": "XSLT",
"bytes": "13493"
},
{
"name": "nesC",
"bytes": "15206"
}
],
"symlink_target": ""
}
|
import time
import config
import busses
def advance_station():
index = busses.status_bus["station"]
if busses.status_bus["line"] == "S41":
index += 1
if busses.status_bus["line"] == "S42":
index -= 1
index = index % len(config.stations)
busses.status_bus["station"] = index
busses.status_bus["set_at_time"] = time.time()
def set_station(station_name):
busses.status_bus["station"] = config.stations.index(station_name)
busses.status_bus["set_at_time"] = time.time()
|
{
"content_hash": "882fc4751548e0b80493d1fec02937a4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 70,
"avg_line_length": 28.72222222222222,
"alnum_prop": 0.6421663442940039,
"repo_name": "Pixdigit/Saufbot",
"id": "3b46c19ad1a4f3dcbb7944f0596f0c015264ef95",
"size": "541",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mangament_units.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20283"
}
],
"symlink_target": ""
}
|
from lxml import etree
from openerp import tools
from openerp.tools.translate import _
from openerp.osv import fields, osv
class project_task_delegate(osv.osv_memory):
_name = 'project.task.delegate'
_description = 'Task Delegate'
_columns = {
'name': fields.char('Delegated Title', required=True, help="New title of the task delegated to the user"),
'prefix': fields.char('Your Task Title', help="Title for your validation task"),
'project_id': fields.many2one('project.project', 'Project', help="User you want to delegate this task to"),
'user_id': fields.many2one('res.users', 'Assign To', required=True, help="User you want to delegate this task to"),
'new_task_description': fields.text('New Task Description', help="Reinclude the description of the task in the task of the user"),
'planned_hours': fields.float('Planned Hours', help="Estimated time to close this task by the delegated user"),
'planned_hours_me': fields.float('Hours to Validate', help="Estimated time for you to validate the work done by the user to whom you delegate this task"),
'state': fields.selection([('pending','Pending'), ('done','Done'), ], 'Validation State', help="New state of your own task. Pending will be reopened automatically when the delegated task is closed")
}
def onchange_project_id(self, cr, uid, ids, project_id=False, context=None):
project_project = self.pool.get('project.project')
if not project_id:
return {'value':{'user_id': False}}
project = project_project.browse(cr, uid, project_id, context=context)
return {'value': {'user_id': project.user_id and project.user_id.id or False}}
def default_get(self, cr, uid, fields, context=None):
"""
This function gets default values
"""
res = super(project_task_delegate, self).default_get(cr, uid, fields, context=context)
if context is None:
context = {}
record_id = context and context.get('active_id', False) or False
if not record_id:
return res
task_pool = self.pool.get('project.task')
task = task_pool.browse(cr, uid, record_id, context=context)
task_name =tools.ustr(task.name)
if 'project_id' in fields:
res['project_id'] = int(task.project_id.id) if task.project_id else False
if 'name' in fields:
if task_name.startswith(_('CHECK: ')):
newname = tools.ustr(task_name).replace(_('CHECK: '), '')
else:
newname = tools.ustr(task_name or '')
res['name'] = newname
if 'planned_hours' in fields:
res['planned_hours'] = task.remaining_hours or 0.0
if 'prefix' in fields:
if task_name.startswith(_('CHECK: ')):
newname = tools.ustr(task_name).replace(_('CHECK: '), '')
else:
newname = tools.ustr(task_name or '')
prefix = _('CHECK: %s') % newname
res['prefix'] = prefix
if 'new_task_description' in fields:
res['new_task_description'] = task.description
return res
_defaults = {
'planned_hours_me': 1.0,
'state': 'pending',
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
res = super(project_task_delegate, self).fields_view_get(cr, uid, view_id, view_type, context, toolbar, submenu=submenu)
users_pool = self.pool.get('res.users')
obj_tm = users_pool.browse(cr, uid, uid, context=context).company_id.project_time_mode_id
tm = obj_tm and obj_tm.name or 'Hours'
if tm in ['Hours','Hour']:
return res
eview = etree.fromstring(res['arch'])
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
for field in res['fields']:
if 'Hours' in res['fields'][field]['string']:
res['fields'][field]['string'] = res['fields'][field]['string'].replace('Hours',tm)
return res
def delegate(self, cr, uid, ids, context=None):
if context is None:
context = {}
task_id = context.get('active_id', False)
task_pool = self.pool.get('project.task')
delegate_data = self.read(cr, uid, ids, context=context)[0]
delegated_tasks = task_pool.do_delegate(cr, uid, [task_id], delegate_data, context=context)
models_data = self.pool.get('ir.model.data')
action_model, action_id = models_data.get_object_reference(cr, uid, 'project', 'action_view_task')
view_model, task_view_form_id = models_data.get_object_reference(cr, uid, 'project', 'view_task_form2')
view_model, task_view_tree_id = models_data.get_object_reference(cr, uid, 'project', 'view_task_tree2')
action = self.pool[action_model].read(cr, uid, [action_id], context=context)[0]
action['res_id'] = delegated_tasks[task_id]
action['view_id'] = False
action['views'] = [(task_view_form_id, 'form'), (task_view_tree_id, 'tree')]
action['help'] = False
return action
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "0d623a7a78e55476382b33d51855d9c8",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 206,
"avg_line_length": 47.68695652173913,
"alnum_prop": 0.6057622173595916,
"repo_name": "diogocs1/comps",
"id": "e3c935d4f5dd7ac53b6691fb71c6129f76ca96a1",
"size": "6463",
"binary": false,
"copies": "194",
"ref": "refs/heads/master",
"path": "web/addons/project/wizard/project_task_delegate.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
}
|
import oslo_config.cfg
# there are 3 ways to access the configuration.
#
# a. ryu.cfg.CONF (used to register cli options)
# b. RyuApp.CONF (preferred way for ryu applications)
# c. oslo.config.cfg.CONF
#
# Currently all of above shares a single ConfigOpts instance.
# We will unshare c. (and stop using it) as soon as ofagent neutron agent
# is updated.
# We want to avoid using c. for our options as a python program which embeds
# ryu applications (eg. neutron agent) might want to put its own set of cli
# options into it, which can conflict with ours. (Currently there seems
# no conflict for the neutron agent. But who knows?)
# At some point later we might want to unshare a. and b. as well, in order
# to allow app-specific options.
CONF = oslo_config.cfg.CONF
# re-export for convenience
from oslo_config.cfg import ConfigOpts
from oslo_config.cfg import BoolOpt
from oslo_config.cfg import IntOpt
from oslo_config.cfg import ListOpt
from oslo_config.cfg import MultiStrOpt
from oslo_config.cfg import StrOpt
from oslo_config.cfg import FloatOpt
from oslo_config.cfg import RequiredOptError
from oslo_config.cfg import ConfigFilesNotFoundError
|
{
"content_hash": "71613bd1b2d5740cf7804595d309943f",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 35.484848484848484,
"alnum_prop": 0.7677198975234842,
"repo_name": "lzppp/mylearning",
"id": "3c580aee6a40afca3fe7de2d42262433c3cf4d76",
"size": "1850",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ryu/cfg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "410"
},
{
"name": "CSS",
"bytes": "7182"
},
{
"name": "Erlang",
"bytes": "872692"
},
{
"name": "HTML",
"bytes": "4612"
},
{
"name": "JavaScript",
"bytes": "46535"
},
{
"name": "Makefile",
"bytes": "88"
},
{
"name": "Python",
"bytes": "5260593"
},
{
"name": "Shell",
"bytes": "15461"
}
],
"symlink_target": ""
}
|
def predict_next_board(game):
"""Returns a list of pos2sible board positions each for left, down, right, up"""
orig = game.board
left = game.move_left()
down = game.move_down()
right = game.move_right()
up = game.move_up()
return [orig, left, down, right, up]
def flatten(board):
return [item for sublist in board for item in sublist]
def score_board(orig, new):
if orig == new:
return 999
print("orig", orig)
print("new", new)
flat_orig = flatten(orig)
flat_new = flatten(new)
if not max(flat_new in [0, 3, 12, 15]):
return 100
if len([f for f in flat_orig if f is not None]) > len([g for g in flat_new if g is not None]):
return 1
return 10
def weight_boards(orig, boards):
"""Returns a list of weights associated with each board"""
b = boards
assert len(b) == 4, b
return [w for w in map(lambda x: score_board(orig, x), b)]
def choose(scores):
if max(scores) == 0:
return "left"
if max(scores) == 1:
return "down"
if max(scores) == 2:
return "right"
return "up"
|
{
"content_hash": "493d21cc84e6009b347a314d01146607",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 98,
"avg_line_length": 27.29268292682927,
"alnum_prop": 0.5924932975871313,
"repo_name": "jdowns/play2048",
"id": "69cb6a8c8a082cb770fab5ae835bc1667381d04a",
"size": "1119",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "brain.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7030"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0008_auto_20180208_1256'),
]
operations = [
migrations.AlterModelOptions(
name='room',
options={'ordering': ['order', 'name']},
),
migrations.AddField(
model_name='dancefloorevent',
name='notification_send',
field=models.BooleanField(default=False),
),
]
|
{
"content_hash": "65bd8937a85a4f4b15ff6a74fdb4ac20",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 53,
"avg_line_length": 24.15,
"alnum_prop": 0.5590062111801242,
"repo_name": "bruecksen/notifhain",
"id": "7bb2f61dbbb3cfd50a57a56f53714d4e7cdb4cc2",
"size": "532",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "notifhain/event/migrations/0009_auto_20180208_1452.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "584"
},
{
"name": "Python",
"bytes": "61554"
},
{
"name": "Shell",
"bytes": "94"
}
],
"symlink_target": ""
}
|
import logging
import os, sys, time, socket
import docker
def setupLogging():
log = logging.getLogger('maestro')
if not len(log.handlers):
log.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(levelname)-10s %(message)s")
filehandler = logging.FileHandler('maestro.log')
filehandler.setLevel(logging.DEBUG)
filehandler.setFormatter(formatter)
log.addHandler(filehandler)
return log
quiet=False
def setQuiet(state=True):
global quiet
quiet = state
# Display the status
def status(string):
global quiet
log = logging.getLogger('maestro')
log.info(string)
if not quiet:
print string
def order(raw_list):
def _process(wait_list):
new_wait = []
for item in wait_list:
match = False
for dependency in raw_list[item]['require']:
if dependency in ordered_list:
match = True
else:
match = False
break
if match:
ordered_list.append(item)
else:
new_wait.append(item)
if len(new_wait) > 0:
# Guard against circular dependencies
if len(new_wait) == len(wait_list):
raise Exception("Unable to satisfy the require for: " + item)
# Do it again for any remaining items
_process(new_wait)
ordered_list = []
wait_list = []
# Start by building up the list of items that do not have any dependencies
for item in raw_list:
if 'require' not in raw_list[item]:
ordered_list.append(item)
else:
wait_list.append(item)
# Then recursively order the items that do define dependencies
_process(wait_list)
return ordered_list
def waitForService(ip, port, retries=60):
while retries >= 0:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(1)
s.connect((ip, port))
s.close()
break
except:
time.sleep(0.5)
retries = retries - 1
continue
return retries
def findImage(name, tag="latest"):
result = docker.Client().images(name=name)
for image in result:
if image['Tag'] == tag:
return image['Id']
return None
|
{
"content_hash": "06428de94b1562f08ea49b043e25a086",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 77,
"avg_line_length": 23.692307692307693,
"alnum_prop": 0.6321892393320965,
"repo_name": "Wiredcraft/maestro",
"id": "0d5c3765521de6017bbf8c85a6f05f0b8eab6f8f",
"size": "2156",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "maestro/utils.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('proposals', '0013_auto_20160919_0021'),
]
operations = [
migrations.AlterField(
model_name='score',
name='note',
field=models.CharField(blank=True, default='', help_text='Only you can see this', max_length=255),
),
migrations.AlterField(
model_name='score',
name='value',
field=models.PositiveSmallIntegerField(help_text='4 = Must have!, 3 = Interesting talk, 2 = Meh, 1 = Definitely not!', validators=[django.core.validators.MinValueValidator(1), django.core.validators.MaxValueValidator(4)]),
),
]
|
{
"content_hash": "5dc460eac709550c7cb800854935c5da",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 234,
"avg_line_length": 33.416666666666664,
"alnum_prop": 0.6309226932668329,
"repo_name": "pyvec/cz.pycon.org-2016",
"id": "d56d62f4a67ee95a0068a88a767c232e44bade57",
"size": "874",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "pyconcz_2016/proposals/migrations/0014_auto_20160919_0116.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "847394"
},
{
"name": "HTML",
"bytes": "88430"
},
{
"name": "JavaScript",
"bytes": "362108"
},
{
"name": "Python",
"bytes": "96339"
}
],
"symlink_target": ""
}
|
import os
import time
import base64
import urllib
from urlparse import urlparse
import cPickle as pickle
import json
import etcd
from configdb import exceptions
from configdb.db.interface import base
from configdb.db.interface import inmemory_interface
import logging
log = logging.getLogger(__name__)
class EtcdSession(inmemory_interface.InMemorySession):
"""A EtcdInterface session."""
def __init__(self,db):
self.db = db
self.revisions = {}
def _escape(self,s):
return s.encode('hex')
def _unescape(self, s):
return s.decode('hex')
def _mkpath(self, entity_name, obj_name=None):
path = os.path.join(self.db.root, self._escape(entity_name))
if obj_name:
path = os.path.join(path, self._escape(obj_name))
return path
def add(self, obj, create=False):
path = self._mkpath(obj._entity_name, obj.name)
# If we don't have a revision,
rev = self.revisions.get(path, None)
log.debug("Path %s, rev %s", path, rev)
if rev is None:
opts = {'prevExist': False}
else:
opts = {'prevIndex': rev}
if create:
opts['prevExist'] = False
# Will raise ValueError if the test fails.
try:
r = self.db.conn.write(path, self.db._serialize(obj), **opts)
self.revisions[path] = r.modifiedIndex
except (ValueError, KeyError):
raise exceptions.IntegrityError('Bad revision')
def delete(self, obj):
self._delte_by_name(obj._entity_name, obj.name)
def _delete_by_name(self, entity_name, obj_name):
path = self._mkpath(entity_name, obj_name)
try:
rev = self.revisions.pop(path, 0)
# etcd has no way to atomically delete objects depending on their index. Meh!
# we simulate (coarsely) the correct behaviour
self.db.conn.write(path, '__to_delete',prevIndex = rev)
self.db.conn.delete(path)
except KeyError:
pass
except ValueError:
# CAS has failed
raise exceptions.IntegrityError('Bad revision')
def _deserialize_if_not_none(self, data):
if data:
return self.db._deserialize(data)
else:
return None
def _get(self, entity_name, obj_name):
path = self._mkpath(entity_name, obj_name)
try:
# Again, reads are not atomic in etcd and watchIndex is not useful.
data = self.db.conn.read(path)
self.revisions[path] = data.modifiedIndex
return self._deserialize_if_not_none(data.value)
except KeyError:
pass
def _find(self, entity_name):
path = self._mkpath(entity_name)
for r in self.db.conn.read(path, recursive = True).children:
if not r.dir:
curpath = r.key.replace(self.db.conn.key_endpoint,'')
self.revisions[curpath] = r.modifiedIndex
yield self._deserialize_if_not_none(r.value)
def commit(self):
pass
def rollback(self):
pass
class EtcdInterface(base.DbInterface):
"""Database interface for an Etcd backend.
This needs the 'python-etcd' library, available at:
https://github.com/lavagetto/python-etcd
"""
AUDIT_SUPPORT = True
AUDIT_LOG_LENGTH = 100
def __init__(self, url, schema, root='/configdb', timeout=30):
self.root = root
self.schema = schema
try:
p = urlparse(url)
host, port = p.netloc.split(':')
except ValueError:
raise ValueError(
'Url {} is not in the host:port format'.format(p.netloc))
#TODO: find a way to allow use of SSL client certificates.
self.conn = etcd.Client(
host=host, port=int(port), protocol = p.scheme, allow_reconnect = True)
def _serialize(self, obj):
return base64.b64encode(
pickle.dumps(obj, protocol=pickle.HIGHEST_PROTOCOL))
def _deserialize(self, data):
return pickle.loads(base64.b64decode(data))
def session(self):
return base.session_context_manager(EtcdSession(self))
def get_by_name(self, entity_name, object_name, session):
return session._get(entity_name, object_name)
def find(self, entity_name, query, session):
entity = self.schema.get_entity(entity_name)
return self._run_query(entity, query,
session._find(entity_name))
def create(self, entity_name, attrs, session):
entity = self.schema.get_entity(entity_name)
obj = inmemory_interface.InMemoryObject(entity, attrs)
session.add(obj, create=True)
return obj
def delete(self, entity_name, obj_name, session):
session._delete_by_name(entity_name, obj_name)
def close(self):
self.conn.http.clear()
def _get_audit_slot(self):
path = os.path.join(self.root, '_audit', '_slots')
retries = 10
while retries > 0:
try:
res = self.conn.read(path)
except:
# we do not check for existence, on purpose
self.conn.write(path, 0)
return "0"
slot = (int(res.value) + 1) % self.AUDIT_LOG_LENGTH
try:
self.conn.write(path, slot, prevIndex = res.modifiedIndex)
return str(slot)
except:
retries -= 1
#we could not apply for a slot, it seems; just give up writing
return None
def add_audit(self, entity_name, obj_name, operation,
data, auth_ctx, session):
"""Add an entry in the audit log."""
if data is not None:
data = self.schema.get_entity(entity_name).to_net(data)
slot = self._get_audit_slot()
if slot is None:
return
path = os.path.join(self.root, '_audit', slot)
audit = {
'entity': entity_name,
'object': obj_name,
'op': operation,
'user': auth_ctx.get_username(),
'data': base64.b64encode(json.dumps(data)) if data else None,
'ts': time.time()
}
self.conn.write(path, json.dumps(audit))
try:
self.conn.write(path, json.dumps(audit))
except ValueError:
pass
def get_audit(self, query, session):
"""Query the audit log."""
# This is actually very expensive and this is why we have a limited number of slots
path = os.path.join(self.root, '_audit')
try:
data = self.conn.read(path, recursive=True)
except KeyError:
# special case: no audit log present!
return []
log = []
for result in data.children:
obj = json.loads(result.value)
if obj['data']:
obj['data'] = base64.b64decode(obj['data'])
matches = True
for (k,v) in query.iteritems():
if k not in obj:
matches = False
break
if obj[k] != v:
matches = False
break
if matches:
log.append(obj)
return sorted(log, key=lambda k: k['ts'])
|
{
"content_hash": "d14b756a053293fa2909214e1a54c692",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 91,
"avg_line_length": 30.95798319327731,
"alnum_prop": 0.5636536373507057,
"repo_name": "lavagetto/configdb",
"id": "e1d2a46015836461ea03024c24a0780a5e0dfddc",
"size": "7472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "configdb/db/interface/etcd_interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "171465"
},
{
"name": "Shell",
"bytes": "1066"
}
],
"symlink_target": ""
}
|
"""
sentry.models.organizationmember
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
from bitfield import BitField
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from hashlib import md5
from sentry.db.models import (
Model, BoundedPositiveIntegerField, FlexibleForeignKey, sane_repr
)
from sentry.utils.http import absolute_uri
# TODO(dcramer): pull in enum library
class OrganizationMemberType(object):
OWNER = 0
ADMIN = 25
MEMBER = 50
BOT = 100
class OrganizationMember(Model):
"""
Identifies relationships between teams and users.
Users listed as team members are considered to have access to all projects
and could be thought of as team owners (though their access level may not)
be set to ownership.
"""
organization = FlexibleForeignKey('sentry.Organization', related_name="member_set")
user = FlexibleForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True,
related_name="sentry_orgmember_set")
email = models.EmailField(null=True, blank=True)
type = BoundedPositiveIntegerField(choices=(
(OrganizationMemberType.BOT, _('Bot')),
(OrganizationMemberType.MEMBER, _('Member')),
(OrganizationMemberType.ADMIN, _('Admin')),
(OrganizationMemberType.OWNER, _('Owner')),
), default=OrganizationMemberType.MEMBER)
flags = BitField(flags=(
('sso:linked', 'sso:linked'),
('sso:invalid', 'sso:invalid'),
), default=0)
date_added = models.DateTimeField(default=timezone.now)
has_global_access = models.BooleanField(default=True)
teams = models.ManyToManyField('sentry.Team', blank=True)
class Meta:
app_label = 'sentry'
db_table = 'sentry_organizationmember'
unique_together = (('organization', 'user'), ('organization', 'email'))
__repr__ = sane_repr('organization_id', 'user_id', 'type')
def save(self, *args, **kwargs):
assert self.user_id or self.email, \
'Must set user or email'
return super(OrganizationMember, self).save(*args, **kwargs)
@property
def is_pending(self):
return self.user_id is None
@property
def token(self):
checksum = md5()
for x in (str(self.organization_id), self.get_email(), settings.SECRET_KEY):
checksum.update(x)
return checksum.hexdigest()
@property
def scopes(self):
scopes = []
if self.type <= OrganizationMemberType.MEMBER:
scopes.extend([
'event:read', 'event:write', 'event:delete',
'org:read', 'project:read', 'team:read',
])
if self.type <= OrganizationMemberType.ADMIN:
scopes.extend(['project:write', 'team:write'])
if self.type <= OrganizationMemberType.OWNER:
scopes.extend(['project:delete', 'team:delete'])
if self.has_global_access:
if self.type <= OrganizationMemberType.ADMIN:
scopes.extend(['org:write'])
if self.type <= OrganizationMemberType.OWNER:
scopes.extend(['org:delete'])
return scopes
def send_invite_email(self):
from sentry.utils.email import MessageBuilder
context = {
'email': self.email,
'organization': self.organization,
'url': absolute_uri(reverse('sentry-accept-invite', kwargs={
'member_id': self.id,
'token': self.token,
})),
}
msg = MessageBuilder(
subject='Invite to join organization: %s' % (self.organization.name,),
template='sentry/emails/member_invite.txt',
context=context,
)
try:
msg.send([self.get_email()])
except Exception as e:
logger = logging.getLogger('sentry.mail.errors')
logger.exception(e)
def send_sso_link_email(self):
from sentry.utils.email import MessageBuilder
context = {
'email': self.email,
'organization_name': self.organization.name,
'url': absolute_uri(reverse('sentry-auth-link-identity', kwargs={
'organization_slug': self.organization.slug,
})),
}
msg = MessageBuilder(
subject='Action Required for %s' % (self.organization.name,),
template='sentry/emails/auth-link-identity.txt',
html_template='sentry/emails/auth-link-identity.html',
context=context,
)
try:
msg.send([self.get_email()])
except Exception as e:
logger = logging.getLogger('sentry.mail.errors')
logger.exception(e)
def get_display_name(self):
if self.user_id:
return self.user.get_display_name()
return self.email
def get_email(self):
if self.user_id:
return self.user.email
return self.email
def get_audit_log_data(self):
return {
'email': self.email,
'user': self.user_id,
'teams': [t.id for t in self.teams.all()],
'has_global_access': self.has_global_access,
}
OrganizationMemberTeams = OrganizationMember.teams.through
|
{
"content_hash": "061534153ff5b904dedcee956b4af835",
"timestamp": "",
"source": "github",
"line_count": 171,
"max_line_length": 87,
"avg_line_length": 32.64912280701754,
"alnum_prop": 0.6089915815869604,
"repo_name": "jokey2k/sentry",
"id": "2073613a9070d666ff8f46e34375f8c98e1ff615",
"size": "5583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sentry/models/organizationmember.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "580459"
},
{
"name": "Gettext Catalog",
"bytes": "2933595"
},
{
"name": "HTML",
"bytes": "292821"
},
{
"name": "JavaScript",
"bytes": "608760"
},
{
"name": "Makefile",
"bytes": "2710"
},
{
"name": "Python",
"bytes": "5105385"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.urls import reverse
from django.utils.encoding import python_2_unicode_compatible
from model_utils import Choices
from minipub.models import MinipubModel
@python_2_unicode_compatible
class Article(MinipubModel):
STATUS = Choices('draft', 'published', 'archived')
title = models.CharField(unique=True, max_length=50)
slug = models.SlugField()
body = models.TextField()
def __str__(self):
return self.title
def get_absolute_url(self):
if self.status == self.STATUS.archived:
return reverse('news_with_archive:article_archived_detail', kwargs={'slug': self.slug})
else:
return reverse('news_with_archive:article_detail', kwargs={'slug': self.slug})
|
{
"content_hash": "aca2b9c5d9b1b8e7d699888895adee3d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 99,
"avg_line_length": 30.64,
"alnum_prop": 0.6971279373368147,
"repo_name": "richardbarran/django-mininews",
"id": "694faf1808a5aca591e20a6e41286cd1b81f1ec2",
"size": "766",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "example_project/news_with_archive/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "191"
},
{
"name": "HTML",
"bytes": "6752"
},
{
"name": "JavaScript",
"bytes": "471"
},
{
"name": "Python",
"bytes": "41739"
}
],
"symlink_target": ""
}
|
'''
A script to check that the (Linux) executables produced by gitian only contain
allowed gcc, glibc and libstdc++ version symbols. This makes sure they are
still compatible with the minimum supported Linux distribution versions.
Example usage:
find ../gitian-builder/build -type f -executable | xargs python contrib/devtools/symbol-check.py
'''
from __future__ import division, print_function
import subprocess
import re
import sys
# Debian 6.0.9 (Squeeze) has:
#
# - g++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=g%2B%2B)
# - libc version 2.11.3 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libc6)
# - libstdc++ version 4.4.5 (https://packages.debian.org/search?suite=default§ion=all&arch=any&searchon=names&keywords=libstdc%2B%2B6)
#
# Ubuntu 10.04.4 (Lucid Lynx) has:
#
# - g++ version 4.4.3 (http://packages.ubuntu.com/search?keywords=g%2B%2B&searchon=names&suite=lucid§ion=all)
# - libc version 2.11.1 (http://packages.ubuntu.com/search?keywords=libc6&searchon=names&suite=lucid§ion=all)
# - libstdc++ version 4.4.3 (http://packages.ubuntu.com/search?suite=lucid§ion=all&arch=any&keywords=libstdc%2B%2B&searchon=names)
#
# Taking the minimum of these as our target.
#
# According to GNU ABI document (http://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html) this corresponds to:
# GCC 4.4.0: GCC_4.4.0
# GCC 4.4.2: GLIBCXX_3.4.13, CXXABI_1.3.3
# (glibc) GLIBC_2_11
#
MAX_VERSIONS = {
'GCC': (4,4,0),
'CXXABI': (1,3,3),
'GLIBCXX': (3,4,13),
'GLIBC': (2,11)
}
READELF_CMD = '/usr/bin/readelf'
CPPFILT_CMD = '/usr/bin/c++filt'
class CPPFilt(object):
'''
Demangle C++ symbol names.
Use a pipe to the 'c++filt' command.
'''
def __init__(self):
self.proc = subprocess.Popen(CPPFILT_CMD, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
def __call__(self, mangled):
self.proc.stdin.write(mangled + '\n')
return self.proc.stdout.readline().rstrip()
def close(self):
self.proc.stdin.close()
self.proc.stdout.close()
self.proc.wait()
def read_symbols(executable, imports=True):
'''
Parse an ELF executable and return a list of (symbol,version) tuples
for dynamic, imported symbols.
'''
p = subprocess.Popen([READELF_CMD, '--dyn-syms', '-W', executable], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if p.returncode:
raise IOError('Could not read symbols for %s: %s' % (executable, stderr.strip()))
syms = []
for line in stdout.split('\n'):
line = line.split()
if len(line)>7 and re.match('[0-9]+:$', line[0]):
(sym, _, version) = line[7].partition('@')
is_import = line[6] == 'UND'
if version.startswith('@'):
version = version[1:]
if is_import == imports:
syms.append((sym, version))
return syms
def check_version(max_versions, version):
if '_' in version:
(lib, _, ver) = version.rpartition('_')
else:
lib = version
ver = '0'
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
return ver <= max_versions[lib]
if __name__ == '__main__':
cppfilt = CPPFilt()
retval = 0
for filename in sys.argv[1:]:
for sym,version in read_symbols(filename, True):
if version and not check_version(MAX_VERSIONS, version):
print('%s: symbol %s from unsupported version %s' % (filename, cppfilt(sym), version))
retval = 1
exit(retval)
|
{
"content_hash": "d6191ce6c96fd7d41c4fc4426356154a",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 142,
"avg_line_length": 35.72115384615385,
"alnum_prop": 0.6409152086137281,
"repo_name": "mzng/minacoin",
"id": "4f6a18da2d39e3b586702e41261b877104e35944",
"size": "3920",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "contrib/devtools/symbol-check.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "167663"
},
{
"name": "C++",
"bytes": "2911884"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "IDL",
"bytes": "755"
},
{
"name": "Objective-C++",
"bytes": "6262"
},
{
"name": "Python",
"bytes": "107754"
},
{
"name": "Shell",
"bytes": "41889"
},
{
"name": "TypeScript",
"bytes": "10312659"
}
],
"symlink_target": ""
}
|
import re
import math
import multiprocessing
from time import time
from .helpers import parse_date, scrape, headers, dubizzle_request
from .regions import uae
from bs4 import BeautifulSoup
class SearchException(BaseException):
pass
class Search(object):
"""
Simple class that organizes search parameters into a dictionary and allows for a search request to be made.
Works only with Dubizzle UAE.
Arguments:
A keyword (string) and any number of kwargs. Details of possible arguments are provided in the docs or in
`regions.py`.
Returns:
A `Results` object.
"""
def __init__(self, **kwargs):
# General parameters
keyword = kwargs.get('keyword', '')
city = kwargs.get('city', 'all')
section = kwargs.get('section', 'all')
category = kwargs.get('category', 'all')
min_price = kwargs.get('min_price', '')
max_price = kwargs.get('max_price', '')
added_days = kwargs.get('added_days', 30)
# Motors only
make = kwargs.get('make', 'all')
min_year = kwargs.get('min_year', '')
max_year = kwargs.get('max_year', '')
min_kms = kwargs.get('min_kms', '')
max_kms = kwargs.get('max_kms', '')
seller = kwargs.get('seller', 'all')
fuel = kwargs.get('fuel', 'all')
cylinders = kwargs.get('cylinders', 'all')
transmission = kwargs.get('transmission', 'all')
self.params = {
uae['cities']['code']: uae['cities']['options'][city],
uae['sections']['code']: uae['sections']['options'][section],
uae['categories']['code']: uae['categories']['options'][category],
uae['makes']['code']: uae['makes']['options'][make],
'keywords': keyword,
'price__gte': min_price,
'price__lte': max_price,
'added__gte': added_days,
'year__gte': min_year,
'year__lte': max_year,
'kilometers__gte': min_kms,
'kilometers__lte': max_kms,
'seller_type': uae['motors_options']['seller'][seller],
'fuel_type': uae['motors_options']['fuel'][fuel],
'no._of_cylinders': uae['motors_options']['cylinders'][cylinders],
'transmission_type': uae['motors_options']['transmission'][transmission]
}
self.num_results = kwargs.get('num_results', 50)
self.detailed = kwargs.get('detailed', False)
def search(self):
"""Returns a Results object."""
resp = dubizzle_request(uae['base_url'], headers, self.params)
return Results(resp.text, self.num_results, resp.url, self.detailed)
class Results(object):
"""
Given a base search page in HTML, this fetches (when `fetch` is invoked) required amount of pages in parallel
and then parses the results from each page. The final results are stored in the `results` instance variable.
Arguments:
html (string), num_results (int), url (string)
Returns:
A list of results each in dictionary format.
"""
def __init__(self, html, num_results, url, detailed):
self.html = BeautifulSoup(html)
self.num_results = num_results
self.url = url
self.detailed = detailed
self.results = []
def fetch(self):
# Track time
self.time = time()
items = self.html.select('.listing-item')
if not items:
return []
# Find total pages
try:
num_pages = int(re.match(r'^\?page=(\d+)', self.html.select('.paging_forward > #last_page')[0]['href']).group(1))
except IndexError:
num_pages = 1
# Make sure num_results is less than total results
total_results = len(items) * num_pages
if self.num_results > total_results or self.num_results == 'all':
self.num_results = total_results
# Collect enough page urls to satisfy num_results
needed_pages = int(math.ceil(self.num_results / float(len(items))))
page_urls = [self.url]
search_base = re.match(r'^(.+)\?', self.url).group(1) # Use base provided by Dubizzle's redirect
for el in self.html.select('.pages > .page-links')[1:needed_pages+1]:
page_urls.append(search_base + el['href'])
# Scrape pages in parallel
pool = multiprocessing.Pool(processes=multiprocessing.cpu_count()*2)
raw_results = []
# Iterate through fetched pages and render into BS objects
for page in pool.map(scrape, page_urls):
soup = BeautifulSoup(page)
raw_results += soup.select('.listing-item')
# Parse the raw results and store into self.results; return self.results
return self.parse(raw_results)
def parse(self, raw_results):
for index, result in enumerate(raw_results):
# Stop when requested result count is exceeded
if index+1 > self.num_results:
return self.results
# Skip any featured listings that cause parsing errors
try:
# Don't try to understand the hacks below. I don't even... just hope they don't change the design :P
parsed_result = {
u'title': result.select('.title')[0].text.strip(),
u'date': parse_date(result.select('.date')[0].text.strip()),
u'url': re.match(r'^(.+)\?back', result.select('.title > a')[0]['href']).group(1),
u'location': ' '.join(result.select('.location')[0].text.replace('\n', '').replace(u'\u202a', '')
.split()).replace('Located : ', '').split(' > ')
}
# Get price
try:
parsed_result[u'price'] = int(result.select('.price')[0].text.strip().split(' ')[1].replace(',', ''))
except IndexError:
parsed_result[u'price'] = 0
# Get the category
try:
parsed_result[u'category'] = result.select('.description .breadcrumbs')[0].text.replace(u'\u202a', '') \
.lstrip().split(' > ')
except IndexError:
parsed_result[u'category'] = result.select('.descriptionindented .breadcrumbs')[0].text\
.replace(u'\u202a', '').lstrip().split(' > ')
# Get the image, if available
image = result.select('.has_photo > .thumb > a > div')
if result.select('.has_photo > .thumb > a > div'):
parsed_result[u'image'] = re.findall(r'\((.+)\)', image[0]['style'])[0]
else:
parsed_result[u'image'] = ''
# Get the features
features = {}
for feature in result.select('.features'):
data = feature.select('li')
if data:
for each in data:
pair = each.text.split(': ')
feature_name, feature_value = pair[0].lower(), pair[1].lower()
if feature_name in ['kilometers', 'year']:
if feature_value == 'none':
feature_value = 0
feature_value = int(feature_value)
elif feature_name == 'doors':
feature_value = int(feature_value.split(' ')[0].rstrip('+'))
features[feature_name] = feature_value
parsed_result[u'features'] = features
# Add dict to results list
self.results.append(parsed_result)
except:
continue
return self.results
class Listing(object):
"""Represents a single Dubizzle UAE listing."""
def __init__(self, url):
self.url = url
def fetch(self):
# Listing dict
listing = {}
# Track time
start = time()
# Get listing html
resp = dubizzle_request(self.url, headers)
soup = BeautifulSoup(resp.text)
# URL
listing[u'url'] = self.url
# Title
listing[u'title'] = soup.select('.title')[0].text.strip()
# Photos, if found
photos = []
if soup.select('#photo-count'):
# Find photo count
num_photos = int(soup.select('#photo-count')[0].text.strip().split(' Photo')[0])
# Iterate through photo thumbs
for i in range(1, num_photos+1):
photo = soup.select('#thumb%d > a' % i)[0]
photos.append(photo['href'])
listing[u'photos'] = photos
# Location; too experimental to explain
raw_location = soup.select('.location')[0].text.replace('\n', '').replace('\t', '') \
.replace(u'\u202a', '').replace(u'\xa0', '').strip().split(';')
location = [each.strip() for each in raw_location[0].split(' > ')]
near_to = [raw_location[1].strip()] if raw_location[1] else []
listing[u'location'] = location + near_to
# Google Maps URL
map_js = soup.select('.map-wrapper > script')[0].text
coordinates = re.findall(r'\.setCenter\((\S+)\);', map_js)[0]
listing[u'map'] = u'http://maps.google.com/?q=%s' % coordinates
# Phone number
listing[u'phone'] = soup.find('div', {'class' : 'phone-content'}).contents[0].strip()[1:-1]
# Post date
raw_date = soup.select('.listing-details-header > span')[0].text.split(': ')[1]
listing[u'date'] = parse_date(raw_date)
# Description
listing[u'description'] = soup.select('.trans_toggle_box')[0].text.strip()
# Details
raw_details = soup.select('#listing-details-list li')
parsed_details = {}
for detail in raw_details:
detail_text = detail.text.replace('\n', '').replace(u'\xa0', '').strip()
# For multi-part information
if ',' in detail_text:
split_detail = detail_text.split(':')
title, info = split_detail[0].lower(), [each.strip().lower() for each in split_detail[1].split(', ')]
else:
title, info = [each.strip().lower() for each in detail_text.split(':')]
# Try to convert to integer
try:
info = int(info)
except:
pass
# Store each detail
parsed_details[title] = info
listing[u'details'] = parsed_details
listing[u'time'] = time() - start
return listing
|
{
"content_hash": "09717f65f27eb049fe26817e66d4bd96",
"timestamp": "",
"source": "github",
"line_count": 295,
"max_line_length": 125,
"avg_line_length": 36.70169491525424,
"alnum_prop": 0.5272928789138266,
"repo_name": "b4oshany/dubizzle",
"id": "ea156d7ce7570cc8f188ba511d25425e29a25f76",
"size": "10878",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "dubizzle/uae.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19237"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from twisted.web.template import flattenString
from quoting_element import ExampleElement
def renderDone(output):
print(output)
flattenString(None, ExampleElement()).addCallback(renderDone)
|
{
"content_hash": "152ab2b38428df02d31c6e60818bbc1c",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 61,
"avg_line_length": 33.285714285714285,
"alnum_prop": 0.8154506437768241,
"repo_name": "EricMuller/mywebmarks-backend",
"id": "51e82aa9062f50b8e9d384c41eebec41101886db",
"size": "233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "requirements/twisted/Twisted-17.1.0/docs/web/howto/listings/render_quoting.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "23736"
},
{
"name": "Batchfile",
"bytes": "3516"
},
{
"name": "C",
"bytes": "37168"
},
{
"name": "CSS",
"bytes": "66211"
},
{
"name": "DIGITAL Command Language",
"bytes": "1032"
},
{
"name": "GAP",
"bytes": "36244"
},
{
"name": "HTML",
"bytes": "1087560"
},
{
"name": "Makefile",
"bytes": "6766"
},
{
"name": "Nginx",
"bytes": "998"
},
{
"name": "Objective-C",
"bytes": "2584"
},
{
"name": "Python",
"bytes": "23014526"
},
{
"name": "Roff",
"bytes": "160293"
},
{
"name": "Shell",
"bytes": "15482"
},
{
"name": "Smarty",
"bytes": "1366"
}
],
"symlink_target": ""
}
|
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the KZCash Core data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/KZCash/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "KZCash")
return os.path.expanduser("~/.kzcash")
def read_bitcoin_config(dbdir):
"""Read the kzcash.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "kzcash.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a KZCash Core JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 18276 if testnet else 8276
connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the kzcashd we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(kzcashd):
info = kzcashd.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
kzcashd.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = kzcashd.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(kzcashd):
address_summary = dict()
address_to_account = dict()
for info in kzcashd.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = kzcashd.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = kzcashd.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-kzcash-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(kzcashd, fromaddresses, toaddress, amount, fee):
all_coins = list_available(kzcashd)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to kzcashd.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = kzcashd.createrawtransaction(inputs, outputs)
signed_rawtx = kzcashd.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(kzcashd, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = kzcashd.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(kzcashd, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = kzcashd.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(kzcashd, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get kzcashs from")
parser.add_option("--to", dest="to", default=None,
help="address to get send kzcashs to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of kzcash.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
kzcashd = connect_JSON(config)
if options.amount is None:
address_summary = list_available(kzcashd)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(kzcashd) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(kzcashd, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(kzcashd, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = kzcashd.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
{
"content_hash": "4fc36109bce7ce58a82b10da71900575",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 111,
"avg_line_length": 38.25,
"alnum_prop": 0.6148978109762423,
"repo_name": "kzcashteam/kzcash",
"id": "0e403406a9f5fa55d2f52f0a5107cd0904a3ad9a",
"size": "10014",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/spendfrom/spendfrom.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1290743"
},
{
"name": "C++",
"bytes": "5314204"
},
{
"name": "CSS",
"bytes": "124335"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "147899"
},
{
"name": "Makefile",
"bytes": "97350"
},
{
"name": "Objective-C",
"bytes": "3228"
},
{
"name": "Objective-C++",
"bytes": "7228"
},
{
"name": "Python",
"bytes": "706407"
},
{
"name": "QMake",
"bytes": "2057"
},
{
"name": "Roff",
"bytes": "3754"
},
{
"name": "Shell",
"bytes": "35722"
}
],
"symlink_target": ""
}
|
"""
lxml custom element classes for slide-related XML elements, including all
masters.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from . import parse_from_template, parse_xml
from .ns import nsdecls
from .simpletypes import XsdString
from .xmlchemy import (
BaseOxmlElement, OneAndOnlyOne, OptionalAttribute, RequiredAttribute,
ZeroOrMore, ZeroOrOne
)
class _BaseSlideElement(BaseOxmlElement):
"""
Base class for the six slide types, providing common methods.
"""
@property
def spTree(self):
"""
Return required `p:cSld/p:spTree` grandchild.
"""
return self.cSld.spTree
class CT_CommonSlideData(BaseOxmlElement):
"""
``<p:cSld>`` element.
"""
_tag_seq = (
'p:bg', 'p:spTree', 'p:custDataLst', 'p:controls', 'p:extLst'
)
spTree = OneAndOnlyOne('p:spTree')
del _tag_seq
name = OptionalAttribute('name', XsdString, default='')
class CT_NotesMaster(_BaseSlideElement):
"""
``<p:notesMaster>`` element, root of a notes master part
"""
_tag_seq = ('p:cSld', 'p:clrMap', 'p:hf', 'p:notesStyle', 'p:extLst')
cSld = OneAndOnlyOne('p:cSld')
del _tag_seq
@classmethod
def new_default(cls):
"""
Return a new ``<p:notesMaster>`` element based on the built-in
default template.
"""
return parse_from_template('notesMaster')
class CT_NotesSlide(_BaseSlideElement):
"""
``<p:notes>`` element, root of a notes slide part
"""
_tag_seq = ('p:cSld', 'p:clrMapOvr', 'p:extLst')
cSld = OneAndOnlyOne('p:cSld')
del _tag_seq
@classmethod
def new(cls):
"""
Return a new ``<p:notes>`` element based on the default template.
Note that the template does not include placeholders, which must be
subsequently cloned from the notes master.
"""
return parse_from_template('notes')
class CT_Slide(_BaseSlideElement):
"""
``<p:sld>`` element, root of a slide part
"""
_tag_seq = (
'p:cSld', 'p:clrMapOvr', 'p:transition', 'p:timing', 'p:extLst'
)
cSld = OneAndOnlyOne('p:cSld')
clrMapOvr = ZeroOrOne('p:clrMapOvr', successors=_tag_seq[2:])
del _tag_seq
@classmethod
def new(cls):
"""
Return a new ``<p:sld>`` element configured as a base slide shape.
"""
return parse_xml(cls._sld_xml())
@staticmethod
def _sld_xml():
return (
'<p:sld %s>\n'
' <p:cSld>\n'
' <p:spTree>\n'
' <p:nvGrpSpPr>\n'
' <p:cNvPr id="1" name=""/>\n'
' <p:cNvGrpSpPr/>\n'
' <p:nvPr/>\n'
' </p:nvGrpSpPr>\n'
' <p:grpSpPr/>\n'
' </p:spTree>\n'
' </p:cSld>\n'
' <p:clrMapOvr>\n'
' <a:masterClrMapping/>\n'
' </p:clrMapOvr>\n'
'</p:sld>' % nsdecls('a', 'p', 'r')
)
class CT_SlideLayout(_BaseSlideElement):
"""
``<p:sldLayout>`` element, root of a slide layout part
"""
_tag_seq = (
'p:cSld', 'p:clrMapOvr', 'p:transition', 'p:timing', 'p:hf',
'p:extLst'
)
cSld = OneAndOnlyOne('p:cSld')
del _tag_seq
class CT_SlideLayoutIdList(BaseOxmlElement):
"""
``<p:sldLayoutIdLst>`` element, child of ``<p:sldMaster>`` containing
references to the slide layouts that inherit from the slide master.
"""
sldLayoutId = ZeroOrMore('p:sldLayoutId')
class CT_SlideLayoutIdListEntry(BaseOxmlElement):
"""
``<p:sldLayoutId>`` element, child of ``<p:sldLayoutIdLst>`` containing
a reference to a slide layout.
"""
rId = RequiredAttribute('r:id', XsdString)
class CT_SlideMaster(_BaseSlideElement):
"""
``<p:sldMaster>`` element, root of a slide master part
"""
_tag_seq = (
'p:cSld', 'p:clrMap', 'p:sldLayoutIdLst', 'p:transition', 'p:timing',
'p:hf', 'p:txStyles', 'p:extLst'
)
cSld = OneAndOnlyOne('p:cSld')
sldLayoutIdLst = ZeroOrOne('p:sldLayoutIdLst', successors=_tag_seq[3:])
del _tag_seq
|
{
"content_hash": "79e8c9c5a1fc3c5ee49393a83fc1536d",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 77,
"avg_line_length": 26.935897435897434,
"alnum_prop": 0.5680628272251309,
"repo_name": "biggihs/python-pptx",
"id": "dc5ff02b9f1dbf829569f2112fe33aed33f69c76",
"size": "4221",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pptx/oxml/slide.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "101192"
},
{
"name": "Makefile",
"bytes": "2091"
},
{
"name": "PLpgSQL",
"bytes": "48599"
},
{
"name": "Python",
"bytes": "1877645"
}
],
"symlink_target": ""
}
|
from datetime import datetime
import pytest
from pydantic import ValidationError
from datahub.configuration.time_window_config import BucketDuration, get_time_bucket
from datahub.emitter.mcp import MetadataChangeProposalWrapper
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.ingestion.source.usage.usage_common import (
BaseUsageConfig,
GenericAggregatedDataset,
)
from datahub.metadata.schema_classes import DatasetUsageStatisticsClass
_TestTableRef = str
_TestAggregatedDataset = GenericAggregatedDataset[_TestTableRef]
def test_add_one_query_without_columns():
test_email = "test_email@test.com"
test_query = "select * from test"
event_time = datetime(2020, 1, 1)
floored_ts = get_time_bucket(event_time, BucketDuration.DAY)
resource = "test_db.test_schema.test_table"
ta = _TestAggregatedDataset(bucket_start_time=floored_ts, resource=resource)
ta.add_read_entry(
test_email,
test_query,
[],
)
assert ta.queryCount == 1
assert ta.queryFreq[test_query] == 1
assert ta.userFreq[test_email] == 1
assert len(ta.columnFreq) == 0
def test_multiple_query_without_columns():
test_email = "test_email@test.com"
test_email2 = "test_email2@test.com"
test_query = "select * from test"
test_query2 = "select * from test2"
event_time = datetime(2020, 1, 1)
floored_ts = get_time_bucket(event_time, BucketDuration.DAY)
resource = "test_db.test_schema.test_table"
ta = _TestAggregatedDataset(bucket_start_time=floored_ts, resource=resource)
ta.add_read_entry(
test_email,
test_query,
[],
)
ta.add_read_entry(
test_email,
test_query,
[],
)
ta.add_read_entry(
test_email2,
test_query2,
[],
)
assert ta.queryCount == 3
assert ta.queryFreq[test_query] == 2
assert ta.userFreq[test_email] == 2
assert ta.queryFreq[test_query2] == 1
assert ta.userFreq[test_email2] == 1
assert len(ta.columnFreq) == 0
def test_make_usage_workunit():
test_email = "test_email@test.com"
test_query = "select * from test"
event_time = datetime(2020, 1, 1)
floored_ts = get_time_bucket(event_time, BucketDuration.DAY)
resource = "test_db.test_schema.test_table"
ta = _TestAggregatedDataset(bucket_start_time=floored_ts, resource=resource)
ta.add_read_entry(
test_email,
test_query,
[],
)
wu: MetadataWorkUnit = ta.make_usage_workunit(
bucket_duration=BucketDuration.DAY, urn_builder=lambda x: x, top_n_queries=10
)
assert wu.id == "2020-01-01T00:00:00-test_db.test_schema.test_table"
assert isinstance(wu.get_metadata()["metadata"], MetadataChangeProposalWrapper)
du: DatasetUsageStatisticsClass = wu.get_metadata()["metadata"].aspect
assert du.totalSqlQueries == 1
assert du.topSqlQueries
assert du.topSqlQueries.pop() == test_query
def test_query_trimming():
test_email: str = "test_email@test.com"
test_query: str = "select * from test where a > 10 and b > 20 order by a asc"
top_n_queries: int = 10
total_budget_for_query_list: int = 200
event_time = datetime(2020, 1, 1)
floored_ts = get_time_bucket(event_time, BucketDuration.DAY)
resource = "test_db.test_schema.test_table"
ta = _TestAggregatedDataset(bucket_start_time=floored_ts, resource=resource)
ta.total_budget_for_query_list = total_budget_for_query_list
ta.add_read_entry(
test_email,
test_query,
[],
)
wu: MetadataWorkUnit = ta.make_usage_workunit(
bucket_duration=BucketDuration.DAY,
urn_builder=lambda x: x,
top_n_queries=top_n_queries,
)
assert wu.id == "2020-01-01T00:00:00-test_db.test_schema.test_table"
assert isinstance(wu.get_metadata()["metadata"], MetadataChangeProposalWrapper)
du: DatasetUsageStatisticsClass = wu.get_metadata()["metadata"].aspect
assert du.totalSqlQueries == 1
assert du.topSqlQueries
assert du.topSqlQueries.pop() == "select * f ..."
def test_top_n_queries_validator_fails():
with pytest.raises(ValidationError) as excinfo:
GenericAggregatedDataset.total_budget_for_query_list = 20
BaseUsageConfig(top_n_queries=2)
assert "top_n_queries is set to 2 but it can be maximum 1" in str(excinfo.value)
|
{
"content_hash": "81580853b626418e184497505627e696",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 85,
"avg_line_length": 30.282758620689656,
"alnum_prop": 0.6770667273969483,
"repo_name": "linkedin/WhereHows",
"id": "021a988422688a214f9e09a70a6689946dfb6d27",
"size": "4391",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "metadata-ingestion/tests/unit/test_usage_common.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "110129"
},
{
"name": "Dockerfile",
"bytes": "2521"
},
{
"name": "HTML",
"bytes": "131513"
},
{
"name": "Java",
"bytes": "1307442"
},
{
"name": "JavaScript",
"bytes": "148450"
},
{
"name": "Nearley",
"bytes": "2837"
},
{
"name": "Python",
"bytes": "1419332"
},
{
"name": "Shell",
"bytes": "2564"
},
{
"name": "TSQL",
"bytes": "42644"
},
{
"name": "TypeScript",
"bytes": "641014"
}
],
"symlink_target": ""
}
|
from keras.layers import Highway as KerasHighway
class Highway(KerasHighway):
"""
Keras' `Highway` layer does not support masking, but it easily could, just by returning the
mask. This `Layer` makes this possible.
"""
def __init__(self, **kwargs):
super(Highway, self).__init__(**kwargs)
self.supports_masking = True
|
{
"content_hash": "c6121946b2fd7939c7d8b7a7cd679c61",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 95,
"avg_line_length": 35.5,
"alnum_prop": 0.6591549295774648,
"repo_name": "matt-gardner/deep_qa",
"id": "117843ce5833515b50d8b3cf6d05d6712df232b9",
"size": "355",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "deep_qa/layers/highway.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1236066"
},
{
"name": "Shell",
"bytes": "5494"
}
],
"symlink_target": ""
}
|
from keystone.common.ldap.core import *
|
{
"content_hash": "b9668f7faa2bc2bc322177b5be775a81",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 39,
"avg_line_length": 40,
"alnum_prop": 0.8,
"repo_name": "sanket4373/keystone",
"id": "5cfd58385a1b4bb7d70fd1eb02b7f17c97591ec7",
"size": "642",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "keystone/common/ldap/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3009738"
},
{
"name": "Shell",
"bytes": "4619"
}
],
"symlink_target": ""
}
|
"""
Meteogram
=========
Plots time series data as a meteogram.
"""
import datetime as dt
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from metpy.calc import dewpoint_from_relative_humidity
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo
from metpy.units import units
def calc_mslp(t, p, h):
return p * (1 - (0.0065 * h) / (t + 0.0065 * h + 273.15)) ** (-5.257)
# Make meteogram plot
class Meteogram:
""" Plot a time series of meteorological data from a particular station as a
meteogram with standard variables to visualize, including thermodynamic,
kinematic, and pressure. The functions below control the plotting of each
variable.
TO DO: Make the subplot creation dynamic so the number of rows is not
static as it is currently. """
def __init__(self, fig, dates, probeid, time=None, axis=0):
"""
Required input:
fig: figure object
dates: array of dates corresponding to the data
probeid: ID of the station
Optional Input:
time: Time the data is to be plotted
axis: number that controls the new axis to be plotted (FOR FUTURE)
"""
if not time:
time = dt.datetime.utcnow()
self.start = dates[0]
self.fig = fig
self.end = dates[-1]
self.axis_num = 0
self.dates = mpl.dates.date2num(dates)
self.time = time.strftime('%Y-%m-%d %H:%M UTC')
self.title = f'Latest Ob Time: {self.time}\nProbe ID: {probeid}'
def plot_winds(self, ws, wd, wsmax, plot_range=None):
"""
Required input:
ws: Wind speeds (knots)
wd: Wind direction (degrees)
wsmax: Wind gust (knots)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT WIND SPEED AND WIND DIRECTION
self.ax1 = fig.add_subplot(4, 1, 1)
ln1 = self.ax1.plot(self.dates, ws, label='Wind Speed')
self.ax1.fill_between(self.dates, ws, 0)
self.ax1.set_xlim(self.start, self.end)
if not plot_range:
plot_range = [0, 20, 1]
self.ax1.set_ylabel('Wind Speed (knots)', multialignment='center')
self.ax1.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax1.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
ln2 = self.ax1.plot(self.dates, wsmax, '.r', label='3-sec Wind Speed Max')
ax7 = self.ax1.twinx()
ln3 = ax7.plot(self.dates, wd, '.k', linewidth=0.5, label='Wind Direction')
ax7.set_ylabel('Wind\nDirection\n(degrees)', multialignment='center')
ax7.set_ylim(0, 360)
ax7.set_yticks(np.arange(45, 405, 90))
ax7.set_yticklabels(['NE', 'SE', 'SW', 'NW'])
lines = ln1 + ln2 + ln3
labs = [line.get_label() for line in lines]
ax7.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
ax7.legend(lines, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=3, prop={'size': 12})
def plot_thermo(self, t, td, plot_range=None):
"""
Required input:
T: Temperature (deg F)
TD: Dewpoint (deg F)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT TEMPERATURE AND DEWPOINT
if not plot_range:
plot_range = [10, 90, 2]
self.ax2 = fig.add_subplot(4, 1, 2, sharex=self.ax1)
ln4 = self.ax2.plot(self.dates, t, 'r-', label='Temperature')
self.ax2.fill_between(self.dates, t, td, color='r')
self.ax2.set_ylabel('Temperature\n(F)', multialignment='center')
self.ax2.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
self.ax2.set_ylim(plot_range[0], plot_range[1], plot_range[2])
ln5 = self.ax2.plot(self.dates, td, 'g-', label='Dewpoint')
self.ax2.fill_between(self.dates, td, self.ax2.get_ylim()[0], color='g')
ax_twin = self.ax2.twinx()
ax_twin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
lines = ln4 + ln5
labs = [line.get_label() for line in lines]
ax_twin.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax2.legend(lines, labs, loc='upper center',
bbox_to_anchor=(0.5, 1.2), ncol=2, prop={'size': 12})
def plot_rh(self, rh, plot_range=None):
"""
Required input:
RH: Relative humidity (%)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT RELATIVE HUMIDITY
if not plot_range:
plot_range = [0, 100, 4]
self.ax3 = fig.add_subplot(4, 1, 3, sharex=self.ax1)
self.ax3.plot(self.dates, rh, 'g-', label='Relative Humidity')
self.ax3.legend(loc='upper center', bbox_to_anchor=(0.5, 1.22), prop={'size': 12})
self.ax3.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
self.ax3.set_ylim(plot_range[0], plot_range[1], plot_range[2])
self.ax3.fill_between(self.dates, rh, self.ax3.get_ylim()[0], color='g')
self.ax3.set_ylabel('Relative Humidity\n(%)', multialignment='center')
self.ax3.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
axtwin = self.ax3.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
def plot_pressure(self, p, plot_range=None):
"""
Required input:
P: Mean Sea Level Pressure (hPa)
Optional Input:
plot_range: Data range for making figure (list of (min,max,step))
"""
# PLOT PRESSURE
if not plot_range:
plot_range = [970, 1030, 2]
self.ax4 = fig.add_subplot(4, 1, 4, sharex=self.ax1)
self.ax4.plot(self.dates, p, 'm', label='Mean Sea Level Pressure')
self.ax4.set_ylabel('Mean Sea\nLevel Pressure\n(mb)', multialignment='center')
self.ax4.set_ylim(plot_range[0], plot_range[1], plot_range[2])
axtwin = self.ax4.twinx()
axtwin.set_ylim(plot_range[0], plot_range[1], plot_range[2])
axtwin.fill_between(self.dates, p, axtwin.get_ylim()[0], color='m')
axtwin.xaxis.set_major_formatter(mpl.dates.DateFormatter('%d/%H UTC'))
self.ax4.legend(loc='upper center', bbox_to_anchor=(0.5, 1.2), prop={'size': 12})
self.ax4.grid(b=True, which='major', axis='y', color='k', linestyle='--',
linewidth=0.5)
# OTHER OPTIONAL AXES TO PLOT
# plot_irradiance
# plot_precipitation
# set the starttime and endtime for plotting, 24 hour range
endtime = dt.datetime(2016, 3, 31, 22, 0, 0, 0)
starttime = endtime - dt.timedelta(hours=24)
# Height of the station to calculate MSLP
hgt_example = 292.
# Parse dates from .csv file, knowing their format as a string and convert to datetime
def parse_date(date):
return dt.datetime.strptime(date.decode('ascii'), '%Y-%m-%d %H:%M:%S')
testdata = np.genfromtxt(get_test_data('timeseries.csv', False), names=True, dtype=None,
usecols=list(range(1, 8)),
converters={'DATE': parse_date}, delimiter=',')
# Temporary variables for ease
temp = testdata['T']
pres = testdata['P']
rh = testdata['RH']
ws = testdata['WS']
wsmax = testdata['WSMAX']
wd = testdata['WD']
date = testdata['DATE']
# ID For Plotting on Meteogram
probe_id = '0102A'
data = {'wind_speed': (np.array(ws) * units('m/s')).to(units('knots')),
'wind_speed_max': (np.array(wsmax) * units('m/s')).to(units('knots')),
'wind_direction': np.array(wd) * units('degrees'),
'dewpoint': dewpoint_from_relative_humidity((np.array(temp) * units.degC).to(units.K),
np.array(rh) / 100.).to(units('degF')),
'air_temperature': (np.array(temp) * units('degC')).to(units('degF')),
'mean_slp': calc_mslp(np.array(temp), np.array(pres), hgt_example) * units('hPa'),
'relative_humidity': np.array(rh), 'times': np.array(date)}
fig = plt.figure(figsize=(20, 16))
add_metpy_logo(fig, 250, 180)
meteogram = Meteogram(fig, data['times'], probe_id)
meteogram.plot_winds(data['wind_speed'], data['wind_direction'], data['wind_speed_max'])
meteogram.plot_thermo(data['air_temperature'], data['dewpoint'])
meteogram.plot_rh(data['relative_humidity'])
meteogram.plot_pressure(data['mean_slp'])
fig.subplots_adjust(hspace=0.5)
plt.show()
|
{
"content_hash": "823d78bdd0048b0ae5a14bd0d758e586",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 94,
"avg_line_length": 40.34722222222222,
"alnum_prop": 0.5932300631095812,
"repo_name": "metpy/MetPy",
"id": "fbb78df2b3e679ff597a4ae53f06b0cba8439283",
"size": "8853",
"binary": false,
"copies": "2",
"ref": "refs/heads/gh-pages",
"path": "v1.0/_downloads/133a14b679bd8a5b58cfff249184243a/meteogram_metpy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "989941"
},
{
"name": "Python",
"bytes": "551868"
}
],
"symlink_target": ""
}
|
"""This module has configurations for flask app."""
import logging
import os
CONFIG = {
"development": "flask_app.config.DevelopmentConfig",
"testing": "flask_app.config.TestingConfig",
"production": "flask_app.config.ProductionConfig",
"default": "flask_app.config.ProductionConfig"
}
class BaseConfig(object):
"""Base class for default set of configs."""
DEBUG = False
TESTING = False
SECURITY_PASSWORD_HASH = 'pbkdf2_sha512'
SECURITY_TRACKABLE = True
LOGGING_FORMAT = "[%(asctime)s] [%(funcName)-30s] +\
[%(levelname)-6s] %(message)s"
LOGGING_LOCATION = 'web.log'
LOGGING_LEVEL = logging.DEBUG
SECURITY_TOKEN_MAX_AGE = 60 * 30
SECURITY_CONFIRMABLE = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
CACHE_TYPE = 'simple'
SECURITY_PASSWORD_SALT = 'super-secret-stuff-here'
COMPRESS_MIMETYPES = ['text/html', 'text/css', 'text/xml',
'application/json', 'application/javascript']
WTF_CSRF_ENABLED = False
COMPRESS_LEVEL = 6
COMPRESS_MIN_SIZE = 500
# Change it based on your admin user
ADMIN_USER = 'admin'
ADMIN_PASSWORD = 'admin'
class DevelopmentConfig(BaseConfig):
"""Default set of configurations for development mode."""
DEBUG = True
TESTING = False
BASEDIR = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASEDIR, 'app.db')
SECRET_KEY = 'not-so-super-secret'
class ProductionConfig(BaseConfig):
"""Default set of configurations for prod mode."""
DEBUG = False
TESTING = False
BASEDIR = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(BASEDIR, 'app.db')
SECRET_KEY = 'Super-awesome-secret-stuff'
class TestingConfig(BaseConfig):
"""Default set of configurations for test mode."""
DEBUG = False
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
SECRET_KEY = '792842bc-c4df-4de1-9177-d5207bd9faa6'
|
{
"content_hash": "87382eaabae20a39d809b4f1b7d2ee04",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 76,
"avg_line_length": 30.029411764705884,
"alnum_prop": 0.6523016650342801,
"repo_name": "stevenaubertin/angular2-flask",
"id": "177021a2324758d5244f999b33f4e28311e96b14",
"size": "2042",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "backend/flask_app/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20250"
},
{
"name": "HTML",
"bytes": "17608"
},
{
"name": "JavaScript",
"bytes": "54107"
},
{
"name": "Nginx",
"bytes": "616"
},
{
"name": "Python",
"bytes": "12500"
},
{
"name": "TypeScript",
"bytes": "24614"
}
],
"symlink_target": ""
}
|
import sys
import os.path as op
def main():
sys.path.insert(0, op.abspath('.'))
from niworkflows.__about__ import __version__
print(__version__)
if __name__ == '__main__':
main()
|
{
"content_hash": "fbf7e4370c07ec7d427ab3ebab242d0a",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 49,
"avg_line_length": 16.583333333333332,
"alnum_prop": 0.5728643216080402,
"repo_name": "oesteban/niworkflows",
"id": "7f287990193416f3c5abc840886e20e7bfc830e4",
"size": "410",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "get_version.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "11035"
},
{
"name": "HTML",
"bytes": "500"
},
{
"name": "Makefile",
"bytes": "413"
},
{
"name": "Python",
"bytes": "791805"
},
{
"name": "Shell",
"bytes": "1717"
},
{
"name": "Smarty",
"bytes": "5875"
}
],
"symlink_target": ""
}
|
"""This module tests the brillo sdk command."""
from __future__ import print_function
import mock
import os
from chromite.cbuildbot import constants
from chromite.cbuildbot import repository
from chromite.cli import command_unittest
from chromite.cli.brillo import brillo_sdk
from chromite.lib import bootstrap_lib
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_build_lib_unittest
from chromite.lib import cros_test_lib
from chromite.lib import gs
from chromite.lib import gs_unittest
from chromite.lib import osutils
from chromite.lib import project_sdk
from chromite.lib import workspace_lib
# Unittests often access internals.
# pylint: disable=protected-access
class MockSdkCommand(command_unittest.MockCommand):
"""Mock out the `brillo sdk` command."""
TARGET = 'chromite.cli.brillo.brillo_sdk.SdkCommand'
TARGET_CLASS = brillo_sdk.SdkCommand
COMMAND = 'sdk'
class BrilloSdkTest(cros_test_lib.MockTempDirTestCase):
"""Test class for brillo_sdk module functions."""
def setUp(self):
# Avoid self-updating for most tests.
os.environ[brillo_sdk._BRILLO_SDK_NO_UPDATE] = '1'
self.bootstrap_path = os.path.join(self.tempdir, 'bootstrap')
self.sdk_path = os.path.join(self.tempdir, 'sdk')
self.workspace_path = os.path.join(self.tempdir, 'workspace')
osutils.SafeMakedirs(self.workspace_path)
def fakeRepoRoot(d):
bootstrap_checkouts = os.path.join(self.bootstrap_path,
bootstrap_lib.SDK_CHECKOUTS)
if d.startswith(self.sdk_path) or d.startswith(bootstrap_checkouts):
return d
return None
self.PatchObject(project_sdk, 'FindRepoRoot', side_effect=fakeRepoRoot)
# Prevent actually downloading a repository.
self.mock_repo = self.PatchObject(repository, 'RepoRepository')
# Prevent actual GS interaction.
self.gs_mock = self.StartPatcher(gs_unittest.GSContextMock())
self.gs_mock.SetDefaultCmdResult()
# Looking up the 'latest' version from GS will return 4.5.6
self.latest_version = '4.5.6'
self.gs_mock.AddCmdResult(
['cat', constants.BRILLO_LATEST_RELEASE_URL],
output=self.latest_version)
def testResolveLatest(self):
"""Tests _ResolveLatest()."""
result = brillo_sdk._ResolveLatest(gs.GSContext())
self.assertEqual(self.latest_version, result)
def testUpdateWorkspaceSdk(self):
"""Tests _UpdateWorkspaceSdk() with a numeric version."""
brillo_sdk._UpdateWorkspaceSdk(
gs.GSContext(), self.bootstrap_path, self.workspace_path, '1.2.3')
# Given the explicit path and version, sync what we expect, and where.
expected = [mock.call(mock.ANY,
os.path.join(self.tempdir,
'bootstrap/sdk_checkouts/1.2.3'),
depth=1,
repo_cmd=mock.ANY),
mock.call().Sync()]
self.assertEqual(expected, self.mock_repo.mock_calls)
# Update a second time, to ensure it does nothing the second time.
brillo_sdk._UpdateWorkspaceSdk(
gs.GSContext(), self.bootstrap_path, self.workspace_path, '1.2.3')
self.assertEqual(expected, self.mock_repo.mock_calls)
def testUpdateWorkspaceSdkLatest(self):
"""Tests _UpdateWorkspaceSdk() with 'latest'."""
brillo_sdk._UpdateWorkspaceSdk(
gs.GSContext(), self.bootstrap_path, self.workspace_path, 'latest')
# Given the explicit path and version, sync what we expect, and where.
expected = [mock.call(mock.ANY,
os.path.join(self.tempdir,
'bootstrap/sdk_checkouts',
self.latest_version),
depth=1,
repo_cmd=mock.ANY),
mock.call().Sync()]
self.assertEqual(expected, self.mock_repo.mock_calls)
def testUpdateWorkspaceSdkTot(self):
"""Tests _UpdateWorkspaceSdk() with 'tot'."""
brillo_sdk._UpdateWorkspaceSdk(
gs.GSContext(), self.bootstrap_path, self.workspace_path, 'tot')
# Given the explicit path and version, sync what we expect, and where.
expected = [mock.call(constants.MANIFEST_URL,
os.path.join(self.tempdir,
'bootstrap/sdk_checkouts/tot'),
groups='project_sdk',
repo_cmd=mock.ANY),
mock.call().Sync()]
self.assertEqual(expected, self.mock_repo.mock_calls)
# Update a second time, to ensure it DOES update.
brillo_sdk._UpdateWorkspaceSdk(
gs.GSContext(), self.bootstrap_path, self.workspace_path, 'tot')
self.assertEqual(2 * expected, self.mock_repo.mock_calls)
def testDownloadSdk(self):
"""Tests DownloadSdk() with a numeric version."""
brillo_sdk._DownloadSdk(gs.GSContext(), self.sdk_path, '1.2.3')
# Given the explicit path and version, sync what we expect, and where.
expected = [mock.call(mock.ANY,
self.sdk_path,
depth=1,
repo_cmd=mock.ANY),
mock.call().Sync()]
self.assertEqual(expected, self.mock_repo.mock_calls)
# Verify that the right version number was written out.
sdk_version_file = project_sdk.VersionFile(self.sdk_path)
self.assertEqual('1.2.3', osutils.ReadFile(sdk_version_file))
def testDownloadSdkFailureCleanup(self):
"""Tests DownloadSdk() with a numeric version."""
# Create sdk_path, and put something in it so we confirm it's gone.
osutils.Touch(os.path.join(self.sdk_path, 'contents'), makedirs=True)
# Prep for failure via the repo mock.
class FakeException(Exception):
"""Raised to simulate a failure."""
self.mock_repo.side_effect = FakeException('Testing a failure.')
# Run, and fail.
with self.assertRaises(FakeException):
brillo_sdk._DownloadSdk(gs.GSContext(), self.sdk_path, '1.2.3')
# Make sure the SDK dir was cleaned up.
self.assertFalse(os.path.exists(self.sdk_path))
class SdkVersionExistsTest(cros_test_lib.WorkspaceTestCase):
"""Tests for _SdkVersionExists()."""
VALID_LOCAL_SDK_VERSION = '1.2.3'
VALID_GS_SDK_VERSION = '4.5.6'
VALID_GS_SDK_URL = brillo_sdk._GetSdkManifestUrl(VALID_GS_SDK_VERSION)
class GSContextFake(object):
"""Fake GSContext class for Exists() functionality."""
def Exists(self, url, **_kwargs):
"""Only VALID_GS_SDK_URL returns True."""
return url == SdkVersionExistsTest.VALID_GS_SDK_URL
def _VerifySdkVersionExists(self, version, expected):
"""Verifies _SdkVersionExists().
Args:
version: Version to pass in.
expected: Expected result.
"""
result = brillo_sdk._SdkVersionExists(self.GSContextFake(),
self.bootstrap_path, version)
self.assertEqual(expected, result)
def testSdkVersionExists(self):
"""Tests the expected behavior."""
self.CreateBootstrap(sdk_version=self.VALID_LOCAL_SDK_VERSION)
self._VerifySdkVersionExists('tot', True)
self._VerifySdkVersionExists('latest', True)
self._VerifySdkVersionExists(self.VALID_LOCAL_SDK_VERSION, True)
self._VerifySdkVersionExists(self.VALID_GS_SDK_VERSION, True)
self._VerifySdkVersionExists('tot2', False)
self._VerifySdkVersionExists('', False)
class BrilloSdkTestUpdateBootstrap(cros_test_lib.MockTempDirTestCase):
"""Test the bootstrap update functionality of brillo_sdk.
This is a new class, to avoid mocks interfering with each other.
"""
def setUp(self):
if brillo_sdk._BRILLO_SDK_NO_UPDATE in os.environ:
del os.environ[brillo_sdk._BRILLO_SDK_NO_UPDATE]
self.bootstrap_path = os.path.join(self.tempdir, 'bootstrap')
self.rc_mock = self.StartPatcher(cros_build_lib_unittest.RunCommandMock())
self.rc_mock.SetDefaultCmdResult()
def testUpdateBootstrap(self):
"""Tests _UpdateBootstrap()."""
with self.assertRaises(commandline.ExecRequiredError):
brillo_sdk._UpdateBootstrap(self.bootstrap_path)
# Test we did the git pull before raising....
self.rc_mock.assertCommandContains(
['git', 'pull'], cwd=self.bootstrap_path)
# Test we updated our env before raising....
self.assertIn(brillo_sdk._BRILLO_SDK_NO_UPDATE, os.environ)
def testBootstrapAlreadyUpdated(self):
"""Tests _UpdateBootstrap() doesn't run if already updated."""
# Mark that we already updated.
os.environ[brillo_sdk._BRILLO_SDK_NO_UPDATE] = '1'
# Try to update again (no exception raised).
brillo_sdk._UpdateBootstrap(self.bootstrap_path)
# Test we didn't run a git pull.
self.assertEquals(0, self.rc_mock.call_count)
class BrilloSdkCommandTest(cros_test_lib.OutputTestCase,
cros_test_lib.WorkspaceTestCase):
"""Test class for our SdkCommand class."""
def setUp(self):
if brillo_sdk._BRILLO_SDK_NO_UPDATE in os.environ:
del os.environ[brillo_sdk._BRILLO_SDK_NO_UPDATE]
self.cmd_mock = None
# Workspace is supposed to exist in advance.
self.CreateBootstrap()
self.CreateWorkspace()
# Pretend we are outside the chroot, since this command only runs there.
self.mock_inside = self.PatchObject(cros_build_lib, 'IsInsideChroot',
return_value=False)
# Need to mock this; since RunCommand() is mocked out, all the environment
# checks would fail.
self.verify_environment_mock = self.PatchObject(
project_sdk, 'VerifyEnvironment', return_value=True)
# Prevent repo operations.
self.PatchObject(repository, 'PrepManifestForRepo')
self.PatchObject(repository, 'RepoRepository')
# Default to valid SDK version.
self.sdk_version_exists_mock = self.PatchObject(
brillo_sdk, '_SdkVersionExists', return_value=True)
def SetupCommandMock(self, cmd_args):
"""Sets up the command mock."""
self.cmd_mock = MockSdkCommand(cmd_args)
self.StartPatcher(self.cmd_mock)
def testHandleSelfUpdateAndRestart(self):
"""Tests that --update causes a re-exec."""
self.SetupCommandMock(['--update', 'latest'])
with self.assertRaises(commandline.ExecRequiredError):
self.cmd_mock.inst.Run()
def testHandleVersionOnlyNoUpdate(self):
"""Tests that `cros sdk` logs a version and doesn't re-exec."""
self.SetupCommandMock([])
workspace_lib.SetActiveSdkVersion(self.workspace_path, '1.2.3')
with self.OutputCapturer():
self.cmd_mock.inst.Run()
self.AssertOutputContainsLine('1.2.3', check_stderr=True)
def testHandleSelfUpdateAfterRestart(self):
"""Tests that --update doesn't re-exec a second time."""
os.environ[brillo_sdk._BRILLO_SDK_NO_UPDATE] = '1'
self.SetupCommandMock(['--update', 'latest'])
self.cmd_mock.inst.Run()
def testVerifyEnvironmentAfterSelfUpdate(self):
"""Tests that environment verification happens after self-update."""
update_bootstrap_mock = self.PatchObject(brillo_sdk, '_UpdateBootstrap')
self.verify_environment_mock.side_effect = (
lambda: update_bootstrap_mock.called)
self.SetupCommandMock(['--update', 'latest'])
self.cmd_mock.inst.Run()
def testInvalidUpdateVersion(self):
"""Tests --update with an invalid SDK version."""
update_bootstrap_mock = self.PatchObject(brillo_sdk, '_UpdateBootstrap')
update_workspace_sdk_mock = self.PatchObject(brillo_sdk,
'_UpdateWorkspaceSdk')
self.sdk_version_exists_mock.return_value = False
self.SetupCommandMock(['--update', 'bad_version'])
with self.assertRaises(cros_build_lib.DieSystemExit):
self.cmd_mock.inst.Run()
# None of the update functions should have been called.
self.assertFalse(update_bootstrap_mock.called)
self.assertFalse(self.verify_environment_mock.called)
self.assertFalse(update_workspace_sdk_mock.called)
def testVersionOption(self):
"""Tests that --version prints to stdout."""
self.SetupCommandMock(['--version'])
workspace_lib.SetActiveSdkVersion(self.workspace_path, 'foo')
with self.OutputCapturer():
self.cmd_mock.inst.Run()
self.AssertOutputContainsLine('foo')
def testVersionOptionSdkNotFound(self):
"""Tests that --version errors out if a version can't be found."""
self.SetupCommandMock(['--version'])
with self.OutputCapturer():
with self.assertRaises(cros_build_lib.DieSystemExit):
self.cmd_mock.inst.Run()
self.AssertOutputContainsLine('This workspace does not have an SDK.',
check_stderr=True, check_stdout=False)
|
{
"content_hash": "aac5ae69c988f3c39f55e4e23892198c",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 78,
"avg_line_length": 37.228070175438596,
"alnum_prop": 0.6714577442664154,
"repo_name": "guorendong/iridium-browser-ubuntu",
"id": "3439347fe2cd8937d2dc3550ad7e4fe36fc24140",
"size": "12898",
"binary": false,
"copies": "1",
"ref": "refs/heads/ubuntu/precise",
"path": "third_party/chromite/cli/brillo/brillo_sdk_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "8402"
},
{
"name": "Assembly",
"bytes": "256197"
},
{
"name": "Batchfile",
"bytes": "34966"
},
{
"name": "C",
"bytes": "15445429"
},
{
"name": "C++",
"bytes": "276628399"
},
{
"name": "CMake",
"bytes": "27829"
},
{
"name": "CSS",
"bytes": "867238"
},
{
"name": "Emacs Lisp",
"bytes": "3348"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "7777"
},
{
"name": "HTML",
"bytes": "20250399"
},
{
"name": "Java",
"bytes": "9950308"
},
{
"name": "JavaScript",
"bytes": "13873772"
},
{
"name": "LLVM",
"bytes": "1169"
},
{
"name": "Logos",
"bytes": "6893"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "179129"
},
{
"name": "Objective-C",
"bytes": "1871766"
},
{
"name": "Objective-C++",
"bytes": "9674498"
},
{
"name": "PHP",
"bytes": "42038"
},
{
"name": "PLpgSQL",
"bytes": "163248"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "474121"
},
{
"name": "Python",
"bytes": "11646662"
},
{
"name": "Ragel in Ruby Host",
"bytes": "104923"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "1151673"
},
{
"name": "Standard ML",
"bytes": "5034"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
from classytags.arguments import Argument
from classytags.core import Options
from classytags.helpers import AsTag
from django import template
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db.models import Count
from taggit_templatetags2 import settings
from taggit_templatetags2.compat import get_model
T_MAX = getattr(settings, 'TAGCLOUD_MAX', 6.0)
T_MIN = getattr(settings, 'TAGCLOUD_MIN', 1.0)
register = template.Library()
def get_queryset(forvar, taggeditem_model, tag_model):
through_opts = taggeditem_model._meta
count_field = (
"%s_%s_items" % (
through_opts.app_label,
through_opts.object_name)).lower()
if forvar is None:
# get all tags
queryset = tag_model.objects.all()
else:
# extract app label and model name
beginning, applabel, model = None, None, None
try:
beginning, applabel, model = forvar.rsplit('.', 2)
except ValueError:
try:
applabel, model = forvar.rsplit('.', 1)
except ValueError:
applabel = forvar
applabel = applabel.lower()
# filter tagged items
if model is None:
# Get tags for a whole app
queryset = taggeditem_model.objects.filter(
content_type__app_label=applabel)
tag_ids = queryset.values_list('tag_id', flat=True)
queryset = tag_model.objects.filter(id__in=tag_ids)
else:
# Get tags for a model
model = model.lower()
if ":" in model:
model, manager_attr = model.split(":", 1)
else:
manager_attr = "tags"
model_class = get_model(applabel, model)
if not model_class:
raise Exception(
'Not found such a model "%s" in the application "%s"' %
(model, applabel))
manager = getattr(model_class, manager_attr)
queryset = manager.all()
through_opts = manager.through._meta
count_field = ("%s_%s_items" % (through_opts.app_label,
through_opts.object_name)).lower()
if count_field is None:
# Retain compatibility with older versions of Django taggit
# a version check (for example taggit.VERSION <= (0,8,0)) does NOT
# work because of the version (0,8,0) of the current dev version of
# django-taggit
try:
return queryset.annotate(
num_times=Count(settings.TAG_FIELD_RELATED_NAME))
except FieldError:
return queryset.annotate(
num_times=Count('taggit_taggeditem_items'))
else:
return queryset.annotate(num_times=Count(count_field))
def get_weight_fun(t_min, t_max, f_min, f_max):
def weight_fun(f_i, t_min=t_min, t_max=t_max, f_min=f_min, f_max=f_max):
# Prevent a division by zero here, found to occur under some
# pathological but nevertheless actually occurring circumstances.
if f_max == f_min:
mult_fac = 1.0
else:
mult_fac = float(t_max - t_min) / float(f_max - f_min)
return t_max - (f_max - f_i) * mult_fac
return weight_fun
@register.tag
class GetTagForObject(AsTag):
name = 'get_tags_for_object'
options = Options(
Argument('source_object', resolve=True, required=True),
'as',
Argument('varname', resolve=False, required=False),
)
def get_value(self, context, source_object, varname=''):
"""
Args:
source_object - <django model object>
Return:
queryset tags
"""
tag_model = settings.TAG_MODEL
app_label = source_object._meta.app_label
try:
model = source_object._meta.model_name
except AttributeError:
model = source_object._meta.module_name.lower()
content_type = ContentType.objects.get(app_label=app_label,
model=model)
try:
tags = tag_model.objects.filter(
taggit_taggeditem_items__object_id=source_object,
taggit_taggeditem_items__content_type=content_type)
except:
tags = tag_model.objects.filter(
taggit_taggeditem_items__object_id=source_object.pk,
taggit_taggeditem_items__content_type=content_type)
if varname:
context[varname]
return ''
else:
return tags
class TaggitBaseTag(AsTag):
options = Options(
'as',
Argument('varname', resolve=False, required=False),
'for',
Argument('forvar', required=False),
'limit',
Argument('limit', required=False, default=5, resolve=True),
)
@register.tag
class GetTagList(TaggitBaseTag):
name = 'get_taglist'
def get_value(self, context, varname, forvar, limit=settings.LIMIT, order_by=settings.TAG_LIST_ORDER_BY):
# TODO: remove default value for limit, report a bug in the application
# django-classy-tags, the default value does not work
queryset = get_queryset(
forvar,
settings.TAGGED_ITEM_MODEL,
settings.TAG_MODEL)
queryset = queryset.order_by(order_by)
context[varname] = queryset
if limit:
queryset = queryset[:limit]
return ''
@register.tag
class GetTagCloud(TaggitBaseTag):
name = 'get_tagcloud'
def get_value(self, context, varname, forvar, limit=settings.LIMIT, order_by=settings.TAG_CLOUD_ORDER_BY):
queryset = get_queryset(
forvar,
settings.TAGGED_ITEM_MODEL,
settings.TAG_MODEL)
num_times = queryset.values_list('num_times', flat=True)
if(len(num_times) == 0):
context[varname] = queryset
return ''
weight_fun = get_weight_fun(
T_MIN, T_MAX, min(num_times), max(num_times))
queryset = queryset.order_by(order_by)
if limit:
queryset = queryset[:limit]
for tag in queryset:
tag.weight = weight_fun(tag.num_times)
context[varname] = queryset
return ''
@register.inclusion_tag('taggit_templatetags2/tagcloud_include.html')
def include_tagcloud(forvar=None):
return {'forvar': forvar}
@register.inclusion_tag('taggit_templatetags2/taglist_include.html')
def include_taglist(forvar=None):
return {'forvar': forvar}
@register.inclusion_tag('taggit_templatetags2/tagcanvas_include.html')
def include_tagcanvas(element_id, width, height, url_name='tagcanvas-list',
forvar=None, limit=3):
"""
Args:
element_id - str - html id
width - int - pixels width
height - int - pixels height
url_name - if url_name=='' then no links. Default: tagcanvas-list
"""
if url_name == 'default':
url_name = 'tagcanvas-list'
return {
'forvar': forvar,
'element_id': element_id,
'width': width,
'height': height,
'url_name': url_name,
'limit': limit}
|
{
"content_hash": "168431c92b5b39eec30bd0568713f469",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 110,
"avg_line_length": 32.486607142857146,
"alnum_prop": 0.5858183317301086,
"repo_name": "fizista/django-taggit-templatetags2",
"id": "a9dd2d83685958d198f72d924d78ad459057e45d",
"size": "7277",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taggit_templatetags2/templatetags/taggit_templatetags2_tags.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2650"
},
{
"name": "JavaScript",
"bytes": "90112"
},
{
"name": "Python",
"bytes": "28517"
}
],
"symlink_target": ""
}
|
import os
import hashlib
import tempfile
import unittest
import vivisect
from vivisect.const import *
def add_events(vw):
# Call some APIs to populate the event list.
# Coherency is not the objective. Just get some events in there.
filehash = hashlib.md5(b'testfile').hexdigest()
vw.addFile('testfile', 0x1000, filehash)
vw.setMeta('Architecture', 'i386')
vw.setMeta('Format', 'pe')
vw.addMemoryMap(0x1000, 7, 'testfile', b'\x00' * 0x9000)
vw.addLocation(0x2000, 4, 4, tinfo='fakeptr')
vw.addLocation(0x3000, 16, 6, tinfo='oogieboogie')
vw.addLocation(0x4000, 3, 5)
vw.addLocation(0x5000, 3, 5)
vw.addLocation(0x6000, 3, 5)
vw.delLocation(0x4000)
vw.addXref(0x4000, 0x3000, 1)
vw.addXref(0x5000, 0x3000, 1)
vw.delXref((0x5000, 0x3000, 1, 0))
vw.setMeta('foo', 'bar')
vw.setFileMeta('testfile', 'neato', 'burrito')
vw.addVaSet('EmptySet', (('va', VASET_ADDRESS),))
vw.delVaSet('FuncWrappers')
vw.setComment(0x2000, 'test comment')
vw.addExport(0x7000, EXP_FUNCTION, 'kernel32.YoThisExportFake', 'testfile')
class StorageTests(unittest.TestCase):
def setUp(self):
'''
So on windows, you can't double open a temporary file (results in a fun "Permission Denied"
exception). So instead, we setup a temporary file here and delete it in tearDown so that
we don't maintain an open file descriptor to the temporary file
'''
self.tmpf = tempfile.NamedTemporaryFile(delete=False)
def tearDown(self):
self.tmpf.close()
os.unlink(self.tmpf.name)
def test_msgpack_idempotent(self):
# test that what we put in, we can get out
vw = vivisect.VivWorkspace()
vw.setMeta('StorageName', self.tmpf.name)
vw.setMeta('StorageModule', 'vivisect.storage.mpfile')
add_events(vw)
vw.saveWorkspace()
self.tmpf.flush()
ovw = vivisect.VivWorkspace()
ovw.setMeta('StorageModule', 'vivisect.storage.mpfile')
# So this is a bit naughty, but just the act of creating a workspace
# induces some events in to the workspace. Nothing crazy, just some va sets
# so delete those so we can have a clean comparison
ovw._event_list = []
ovw.loadWorkspace(self.tmpf.name)
old = list(vw.exportWorkspace())
new = list(ovw.exportWorkspace())
self.assertEqual(len(old), 38)
self.assertEqual(len(new), 39) # the last event is a setMeta made by loadWorkspace
self.assertEqual(new[-1], (VWE_SETMETA, ('StorageName', self.tmpf.name)))
for idx in range(len(old)):
self.assertEqual(old[idx], new[idx])
def test_msgpack_to_basicfile(self):
# make sure we're on par with what the OG storage mechanism can do
mpfile = tempfile.NamedTemporaryFile(delete=False)
basicfile = tempfile.NamedTemporaryFile(delete=False)
try:
ogvw = vivisect.VivWorkspace()
add_events(ogvw)
ogvw.setMeta('StorageName', mpfile.name)
ogvw.setMeta('StorageModule', 'vivisect.storage.mpfile')
ogvw.saveWorkspace()
# Get rid of those last two meta sets so that the two new workspaces should be
# the same save for the last meta set
ogvw._event_list.pop()
ogvw._event_list.pop()
ogvw.setMeta('StorageName', basicfile.name)
ogvw.setMeta('StorageModule', 'vivisect.storage.basicfile')
ogvw.saveWorkspace()
ogevt = list(ogvw.exportWorkspace())
mvw = vivisect.VivWorkspace()
mvw.setMeta('StorageModule', 'vivisect.storage.mpfile')
mvw._event_list = []
mvw.loadWorkspace(mpfile.name)
mevt = list(mvw.exportWorkspace())
self.assertEqual(len(mevt), 39)
bvw = vivisect.VivWorkspace()
bvw.setMeta('StorageModule', 'vivisect.storage.basicfile')
bvw._event_list = []
bvw.loadWorkspace(basicfile.name)
bevt = list(bvw.exportWorkspace())
self.assertEqual(len(bevt), 39)
# the last three events are specific to the different storage modules
for idx in range(len(mevt) - 3):
self.assertEqual(mevt[idx], bevt[idx])
self.assertEqual(ogevt[idx], bevt[idx])
finally:
mpfile.close()
basicfile.close()
os.unlink(mpfile.name)
os.unlink(basicfile.name)
def test_bad_event(self):
vw = vivisect.VivWorkspace()
with self.assertLogs() as logcap:
vw.importWorkspace([(VWE_MAX + 1, (0xabad1dea, 4, 3, 'nope')),
(VWE_ADDFILE, ('VivisectFile', 0x1000, '3bfdad02b9a6522c84e356cf8f69135b'))])
files = vw.getFiles()
self.assertIn("IndexError: list index out of range", ''.join(logcap.output))
self.assertEqual(1, len(files))
self.assertEqual('VivisectFile', files[0])
|
{
"content_hash": "48cee4c7d0d636b7f4db8ca578334b4d",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 109,
"avg_line_length": 40.023809523809526,
"alnum_prop": 0.6210588935157644,
"repo_name": "atlas0fd00m/vivisect",
"id": "b191525e29ae4ba79c09725ee09bd1dc5d0837b5",
"size": "5043",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "vivisect/tests/teststorage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "17710506"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
}
|
import argparse
import numpy as np
import sys
def generate():
parser = argparse.ArgumentParser()
parser.add_argument('--vocab_file', default='vocab.txt', type=str)
parser.add_argument('--vectors_file', default='vectors.txt', type=str)
args = parser.parse_args()
with open(args.vocab_file, 'r') as f:
words = [x.rstrip().split(' ')[0] for x in f.readlines()]
with open(args.vectors_file, 'r') as f:
vectors = {}
for line in f:
vals = line.rstrip().split(' ')
vectors[vals[0]] = [float(x) for x in vals[1:]]
vocab_size = len(words)
vocab = {w: idx for idx, w in enumerate(words)}
ivocab = {idx: w for idx, w in enumerate(words)}
vector_dim = len(vectors[ivocab[0]])
W = np.zeros((vocab_size, vector_dim))
for word, v in vectors.items():
if word == '<unk>':
continue
W[vocab[word], :] = v
# normalize each word vector to unit variance
W_norm = np.zeros(W.shape)
d = (np.sum(W ** 2, 1) ** (0.5))
W_norm = (W.T / d).T
return (W_norm, vocab, ivocab)
def distance(W, vocab, ivocab, input_term):
vecs = {}
if len(input_term.split(' ')) < 3:
print("Only %i words were entered.. three words are needed at the input to perform the calculation\n" % len(input_term.split(' ')))
return
else:
for idx, term in enumerate(input_term.split(' ')):
if term in vocab:
print('Word: %s Position in vocabulary: %i' % (term, vocab[term]))
vecs[idx] = W[vocab[term], :]
else:
print('Word: %s Out of dictionary!\n' % term)
return
vec_result = vecs[1] - vecs[0] + vecs[2]
vec_norm = np.zeros(vec_result.shape)
d = (np.sum(vec_result ** 2,) ** (0.5))
vec_norm = (vec_result.T / d).T
dist = np.dot(W, vec_norm.T)
for term in input_term.split(' '):
index = vocab[term]
dist[index] = -np.Inf
a = np.argsort(-dist)[:N]
print("\n Word Cosine distance\n")
print("---------------------------------------------------------\n")
for x in a:
print("%35s\t\t%f\n" % (ivocab[x], dist[x]))
if __name__ == "__main__":
N = 100; # number of closest words that will be shown
W, vocab, ivocab = generate()
while True:
input_term = raw_input("\nEnter three words (EXIT to break): ")
if input_term == 'EXIT':
break
else:
distance(W, vocab, ivocab, input_term)
|
{
"content_hash": "9de3bf1bc93a60a3a53b8ff63ea74970",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 139,
"avg_line_length": 32.8375,
"alnum_prop": 0.5135135135135135,
"repo_name": "Ignotus/word2vec_theano",
"id": "bed075e782eb6290aef2c9dc80b393dbe949ffc2",
"size": "2627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evaluation/GloVe/eval/python/word_analogy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "94503"
},
{
"name": "Python",
"bytes": "23760"
}
],
"symlink_target": ""
}
|
import os
import sys
# Add the third_party/ dir to our search path so that we can find the
# modules in there automatically. This isn't normal, so don't replicate
# this pattern elsewhere.
_chromite_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
_containing_dir = os.path.dirname(_chromite_dir)
_third_party_dirs = [os.path.join(_chromite_dir, 'third_party')]
# If chromite is living inside the Chrome checkout under
# <chrome_root>/src/third_party/chromite, its dependencies will be checked out
# to <chrome_root>/src/third_party instead of the normal chromite/third_party
# location due to git-submodule limitations (a submodule cannot be contained
# inside another submodule's workspace), so we want to add that to the
# search path.
if os.path.basename(_containing_dir) == 'third_party':
_third_party_dirs.append(_containing_dir)
# List of third_party packages that might need subpaths added to search.
_paths = [
'pyelftools',
]
for _path in _paths:
for _third_party in _third_party_dirs[:]:
_component = os.path.join(_third_party, _path)
if os.path.isdir(_component):
_third_party_dirs.append(_component)
sys.path = _third_party_dirs + sys.path
|
{
"content_hash": "d949e95488ae57318b9bb317662eaf28",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 78,
"avg_line_length": 41.275862068965516,
"alnum_prop": 0.7343358395989975,
"repo_name": "zhang0137/chromite",
"id": "43226417d34b3a1bae6adaa1e64e596cd0797567",
"size": "1367",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85"
},
{
"name": "HTML",
"bytes": "2189"
},
{
"name": "Python",
"bytes": "2015328"
},
{
"name": "Shell",
"bytes": "14875"
}
],
"symlink_target": ""
}
|
from typing import Union
import copy
import numpy as np
from collections import OrderedDict
from rl_coach.agents.agent import Agent
from rl_coach.agents.policy_optimization_agent import PolicyOptimizationAgent
from rl_coach.architectures.head_parameters import SACQHeadParameters,SACPolicyHeadParameters,VHeadParameters
from rl_coach.architectures.middleware_parameters import FCMiddlewareParameters
from rl_coach.base_parameters import AlgorithmParameters, NetworkParameters, AgentParameters, EmbedderScheme, MiddlewareScheme
from rl_coach.core_types import ActionInfo, EnvironmentSteps, RunPhase
from rl_coach.exploration_policies.additive_noise import AdditiveNoiseParameters
from rl_coach.memories.non_episodic.experience_replay import ExperienceReplayParameters
from rl_coach.architectures.embedder_parameters import InputEmbedderParameters
from rl_coach.spaces import BoxActionSpace
# There are 3 networks in SAC implementation. All have the same topology but parameters are not shared.
# The networks are:
# 1. State Value Network - SACValueNetwork
# 2. Soft Q Value Network - SACCriticNetwork
# 3. Policy Network - SACPolicyNetwork - currently supporting only Gaussian Policy
# 1. State Value Network - SACValueNetwork
# this is the state value network in SAC.
# The network is trained to predict (regression) the state value in the max-entropy settings
# The objective to be minimized is given in equation (5) in the paper:
#
# J(psi)= E_(s~D)[0.5*(V_psi(s)-y(s))^2]
# where y(s) = E_(a~pi)[Q_theta(s,a)-log(pi(a|s))]
# Default parameters for value network:
# topology :
# input embedder : EmbedderScheme.Medium (Dense(256)) , relu activation
# middleware : EmbedderScheme.Medium (Dense(256)) , relu activation
class SACValueNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(activation_function='relu')}
self.middleware_parameters = FCMiddlewareParameters(activation_function='relu')
self.heads_parameters = [VHeadParameters(initializer='xavier')]
self.rescale_gradient_from_head_by_factor = [1]
self.optimizer_type = 'Adam'
self.batch_size = 256
self.async_training = False
self.learning_rate = 0.0003 # 3e-4 see appendix D in the paper
self.create_target_network = True # tau is set in SoftActorCriticAlgorithmParameters.rate_for_copying_weights_to_target
# 2. Soft Q Value Network - SACCriticNetwork
# the whole network is built in the SACQHeadParameters. we use empty input embedder and middleware
class SACCriticNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(scheme=EmbedderScheme.Empty)}
self.middleware_parameters = FCMiddlewareParameters(scheme=MiddlewareScheme.Empty)
self.heads_parameters = [SACQHeadParameters()] # SACQHeadParameters includes the topology of the head
self.rescale_gradient_from_head_by_factor = [1]
self.optimizer_type = 'Adam'
self.batch_size = 256
self.async_training = False
self.learning_rate = 0.0003
self.create_target_network = False
# 3. policy Network
# Default parameters for policy network:
# topology :
# input embedder : EmbedderScheme.Medium (Dense(256)) , relu activation
# middleware : EmbedderScheme = [Dense(256)] , relu activation --> scheme should be overridden in preset
class SACPolicyNetworkParameters(NetworkParameters):
def __init__(self):
super().__init__()
self.input_embedders_parameters = {'observation': InputEmbedderParameters(activation_function='relu')}
self.middleware_parameters = FCMiddlewareParameters(activation_function='relu')
self.heads_parameters = [SACPolicyHeadParameters()]
self.rescale_gradient_from_head_by_factor = [1]
self.optimizer_type = 'Adam'
self.batch_size = 256
self.async_training = False
self.learning_rate = 0.0003
self.create_target_network = False
self.l2_regularization = 0 # weight decay regularization. not used in the original paper
# Algorithm Parameters
class SoftActorCriticAlgorithmParameters(AlgorithmParameters):
"""
:param num_steps_between_copying_online_weights_to_target: (StepMethod)
The number of steps between copying the online network weights to the target network weights.
:param rate_for_copying_weights_to_target: (float)
When copying the online network weights to the target network weights, a soft update will be used, which
weight the new online network weights by rate_for_copying_weights_to_target. (Tau as defined in the paper)
:param use_deterministic_for_evaluation: (bool)
If True, during the evaluation phase, action are chosen deterministically according to the policy mean
and not sampled from the policy distribution.
"""
def __init__(self):
super().__init__()
self.num_steps_between_copying_online_weights_to_target = EnvironmentSteps(1)
self.rate_for_copying_weights_to_target = 0.005
self.use_deterministic_for_evaluation = True # evaluate agent using deterministic policy (i.e. take the mean value)
class SoftActorCriticAgentParameters(AgentParameters):
def __init__(self):
super().__init__(algorithm=SoftActorCriticAlgorithmParameters(),
exploration=AdditiveNoiseParameters(),
memory=ExperienceReplayParameters(), # SAC doesnt use episodic related data
# network wrappers:
networks=OrderedDict([("policy", SACPolicyNetworkParameters()),
("q", SACCriticNetworkParameters()),
("v", SACValueNetworkParameters())]))
@property
def path(self):
return 'rl_coach.agents.soft_actor_critic_agent:SoftActorCriticAgent'
# Soft Actor Critic - https://arxiv.org/abs/1801.01290
class SoftActorCriticAgent(PolicyOptimizationAgent):
def __init__(self, agent_parameters, parent: Union['LevelManager', 'CompositeAgent']=None):
super().__init__(agent_parameters, parent)
self.last_gradient_update_step_idx = 0
# register signals to track (in learn_from_batch)
self.policy_means = self.register_signal('Policy_mu_avg')
self.policy_logsig = self.register_signal('Policy_logsig')
self.policy_logprob_sampled = self.register_signal('Policy_logp_sampled')
self.policy_grads = self.register_signal('Policy_grads_sumabs')
self.q1_values = self.register_signal("Q1")
self.TD_err1 = self.register_signal("TD err1")
self.q2_values = self.register_signal("Q2")
self.TD_err2 = self.register_signal("TD err2")
self.v_tgt_ns = self.register_signal('V_tgt_ns')
self.v_onl_ys = self.register_signal('V_onl_ys')
self.action_signal = self.register_signal("actions")
@property
def is_on_policy(self) -> bool:
return False
def learn_from_batch(self, batch):
#########################################
# need to update the following networks:
# 1. actor (policy)
# 2. state value (v)
# 3. critic (q1 and q2)
# 4. target network - probably already handled by V
#########################################
# define the networks to be used
# State Value Network
value_network = self.networks['v']
value_network_keys = self.ap.network_wrappers['v'].input_embedders_parameters.keys()
# Critic Network
q_network = self.networks['q'].online_network
q_head = q_network.output_heads[0]
q_network_keys = self.ap.network_wrappers['q'].input_embedders_parameters.keys()
# Actor (policy) Network
policy_network = self.networks['policy'].online_network
policy_network_keys = self.ap.network_wrappers['policy'].input_embedders_parameters.keys()
##########################################
# 1. updating the actor - according to (13) in the paper
policy_inputs = copy.copy(batch.states(policy_network_keys))
policy_results = policy_network.predict(policy_inputs)
policy_mu, policy_std, sampled_raw_actions, sampled_actions, sampled_actions_logprob, \
sampled_actions_logprob_mean = policy_results
self.policy_means.add_sample(policy_mu)
self.policy_logsig.add_sample(policy_std)
self.policy_logprob_sampled.add_sample(sampled_actions_logprob_mean)
# get the state-action values for the replayed states and their corresponding actions from the policy
q_inputs = copy.copy(batch.states(q_network_keys))
q_inputs['output_0_0'] = sampled_actions
log_target = q_network.predict(q_inputs)[0].squeeze()
# log internal q values
q1_vals, q2_vals = q_network.predict(q_inputs, outputs=[q_head.q1_output, q_head.q2_output])
self.q1_values.add_sample(q1_vals)
self.q2_values.add_sample(q2_vals)
# calculate the gradients according to (13)
# get the gradients of log_prob w.r.t the weights (parameters) - indicated as phi in the paper
initial_feed_dict = {policy_network.gradients_weights_ph[5]: np.array(1.0)}
dlogp_dphi = policy_network.predict(policy_inputs,
outputs=policy_network.weighted_gradients[5],
initial_feed_dict=initial_feed_dict)
# calculate dq_da
dq_da = q_network.predict(q_inputs,
outputs=q_network.gradients_wrt_inputs[1]['output_0_0'])
# calculate da_dphi
initial_feed_dict = {policy_network.gradients_weights_ph[3]: dq_da}
dq_dphi = policy_network.predict(policy_inputs,
outputs=policy_network.weighted_gradients[3],
initial_feed_dict=initial_feed_dict)
# now given dlogp_dphi, dq_dphi we need to calculate the policy gradients according to (13)
policy_grads = [dlogp_dphi[l] - dq_dphi[l] for l in range(len(dlogp_dphi))]
# apply the gradients to policy networks
policy_network.apply_gradients(policy_grads)
grads_sumabs = np.sum([np.sum(np.abs(policy_grads[l])) for l in range(len(policy_grads))])
self.policy_grads.add_sample(grads_sumabs)
##########################################
# 2. updating the state value online network weights
# done by calculating the targets for the v head according to (5) in the paper
# value_targets = log_targets-sampled_actions_logprob
value_inputs = copy.copy(batch.states(value_network_keys))
value_targets = log_target - sampled_actions_logprob
self.v_onl_ys.add_sample(value_targets)
# call value_network apply gradients with this target
value_loss = value_network.online_network.train_on_batch(value_inputs, value_targets[:,None])[0]
##########################################
# 3. updating the critic (q networks)
# updating q networks according to (7) in the paper
# define the input to the q network: state has been already updated previously, but now we need
# the actions from the batch (and not those sampled by the policy)
q_inputs['output_0_0'] = batch.actions(len(batch.actions().shape) == 1)
# define the targets : scale_reward * reward + (1-terminal)*discount*v_target_next_state
# define v_target_next_state
value_inputs = copy.copy(batch.next_states(value_network_keys))
v_target_next_state = value_network.target_network.predict(value_inputs)
self.v_tgt_ns.add_sample(v_target_next_state)
# Note: reward is assumed to be rescaled by RewardRescaleFilter in the preset parameters
TD_targets = batch.rewards(expand_dims=True) + \
(1.0 - batch.game_overs(expand_dims=True)) * self.ap.algorithm.discount * v_target_next_state
# call critic network update
result = q_network.train_on_batch(q_inputs, TD_targets, additional_fetches=[q_head.q1_loss, q_head.q2_loss])
total_loss, losses, unclipped_grads = result[:3]
q1_loss, q2_loss = result[3]
self.TD_err1.add_sample(q1_loss)
self.TD_err2.add_sample(q2_loss)
##########################################
# 4. updating the value target network
# I just need to set the parameter rate_for_copying_weights_to_target in the agent parameters to be 1-tau
# where tau is the hyper parameter as defined in sac original implementation
return total_loss, losses, unclipped_grads
def get_prediction(self, states):
"""
get the mean and stdev of the policy distribution given 'states'
:param states: the states for which we need to sample actions from the policy
:return: mean and stdev
"""
tf_input_state = self.prepare_batch_for_inference(states, 'policy')
return self.networks['policy'].online_network.predict(tf_input_state)
def train(self):
# since the algorithm works with experience replay buffer (non-episodic),
# we cant use the policy optimization train method. we need Agent.train
# note that since in Agent.train there is no apply_gradients, we need to do it in learn from batch
return Agent.train(self)
def choose_action(self, curr_state):
"""
choose_action - chooses the most likely action
if 'deterministic' - take the mean of the policy which is the prediction of the policy network.
else - use the exploration policy
:param curr_state:
:return: action wrapped in ActionInfo
"""
if not isinstance(self.spaces.action, BoxActionSpace):
raise ValueError("SAC works only for continuous control problems")
# convert to batch so we can run it through the network
tf_input_state = self.prepare_batch_for_inference(curr_state, 'policy')
# use the online network for prediction
policy_network = self.networks['policy'].online_network
policy_head = policy_network.output_heads[0]
result = policy_network.predict(tf_input_state,
outputs=[policy_head.policy_mean, policy_head.actions])
action_mean, action_sample = result
# if using deterministic policy, take the mean values. else, use exploration policy to sample from the pdf
if self.phase == RunPhase.TEST and self.ap.algorithm.use_deterministic_for_evaluation:
action = action_mean[0]
else:
action = action_sample[0]
self.action_signal.add_sample(action)
action_info = ActionInfo(action=action)
return action_info
|
{
"content_hash": "a37395207175a3853db71f7bc567eb00",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 129,
"avg_line_length": 48.799352750809064,
"alnum_prop": 0.6609854764904834,
"repo_name": "NervanaSystems/coach",
"id": "39d38803d29ea77099b41855746f71b2a922f478",
"size": "15670",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rl_coach/agents/soft_actor_critic_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "CSS",
"bytes": "6493"
},
{
"name": "Dockerfile",
"bytes": "1118"
},
{
"name": "HTML",
"bytes": "161"
},
{
"name": "Jupyter Notebook",
"bytes": "91174"
},
{
"name": "Makefile",
"bytes": "5036"
},
{
"name": "Python",
"bytes": "1926733"
},
{
"name": "Shell",
"bytes": "428"
}
],
"symlink_target": ""
}
|
import unittest
from avro import schema
from avro import io
from avro import datafile
class TestAvro(unittest.TestCase):
def test_container(self):
writer = open('data.avro', 'wb')
datum_writer = io.DatumWriter()
schema_object = schema.parse("""\
{ "type": "record",
"name": "StringPair",
"doc": "A pair of strings.",
"fields": [
{"name": "left", "type": "string"},
{"name": "right", "type": "string"}
]
}""")
dfw = datafile.DataFileWriter(writer, datum_writer, schema_object)
datum = {'left':'L', 'right':'R'}
dfw.append(datum)
dfw.close()
reader = open('data.avro', 'rb')
datum_reader = io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
data = []
for datum in dfr:
data.append(datum)
self.assertEquals(1, len(data));
self.assertEquals(datum, data[0]);
def test_write_data(self):
writer = open('pairs.avro', 'wb')
datum_writer = io.DatumWriter()
schema_object = schema.parse(open('Pair.avsc').read())
dfw = datafile.DataFileWriter(writer, datum_writer, schema_object)
dfw.append({'left':'a', 'right':'1'})
dfw.append({'left':'c', 'right':'2'})
dfw.append({'left':'b', 'right':'3'})
dfw.append({'left':'b', 'right':'2'})
dfw.close()
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c23cbd727c1482234ba388d3430c92f1",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 74,
"avg_line_length": 32.51063829787234,
"alnum_prop": 0.5235602094240838,
"repo_name": "qrsforever/workspace",
"id": "6e6cff2e2f2feeeee54811b26fb8fdc744782558",
"size": "1574",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "java/learn/hadoop/avro/src/main/python/test_avro.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "208"
},
{
"name": "C",
"bytes": "591303"
},
{
"name": "C++",
"bytes": "98511"
},
{
"name": "CLIPS",
"bytes": "52178"
},
{
"name": "HTML",
"bytes": "1780"
},
{
"name": "HiveQL",
"bytes": "13"
},
{
"name": "Java",
"bytes": "381448"
},
{
"name": "Jupyter Notebook",
"bytes": "3148168"
},
{
"name": "Makefile",
"bytes": "108609"
},
{
"name": "Python",
"bytes": "991124"
},
{
"name": "R",
"bytes": "22072"
},
{
"name": "Ruby",
"bytes": "7046"
},
{
"name": "Shell",
"bytes": "119856"
},
{
"name": "TSQL",
"bytes": "5817"
}
],
"symlink_target": ""
}
|
import sys
try:
from setuptools import setup
except ImportError:
from distutils import setup
config = {
'description': 'test', 'author': 'danbordeanu', 'version': '0.1'
}
setup(**config)
|
{
"content_hash": "ed10f28dea2740064bd6ce1b780d3128",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 68,
"avg_line_length": 16.833333333333332,
"alnum_prop": 0.6732673267326733,
"repo_name": "danbordeanu/my_hello_world",
"id": "a8b87aced87ec2aff27db1bb7be92515c086e072",
"size": "202",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "360"
}
],
"symlink_target": ""
}
|
"""Manifest unit tests."""
import firebase_functions.private.manifest as _manifest
import firebase_functions.params as _params
full_endpoint = _manifest.ManifestEndpoint(
platform="gcfv2",
region=["us-west1"],
availableMemoryMb=512,
timeoutSeconds=60,
minInstances=1,
maxInstances=3,
concurrency=20,
vpc={
"connector": "aConnector",
"egressSettings": "ALL_TRAFFIC",
},
serviceAccountEmail="root@",
ingressSettings="ALLOW_ALL",
labels={
"hello": "world",
},
secretEnvironmentVariables=[{
"key": "MY_SECRET"
}],
)
full_endpoint_dict = {
"platform": "gcfv2",
"region": ["us-west1"],
"availableMemoryMb": 512,
"timeoutSeconds": 60,
"minInstances": 1,
"maxInstances": 3,
"concurrency": 20,
"vpc": {
"connector": "aConnector",
"egressSettings": "ALL_TRAFFIC",
},
"serviceAccountEmail": "root@",
"ingressSettings": "ALLOW_ALL",
"labels": {
"hello": "world",
},
"secretEnvironmentVariables": [{
"key": "MY_SECRET"
}],
}
full_stack = _manifest.ManifestStack(
endpoints={"test": full_endpoint},
params=[
_params.BoolParam("bool_test", default=False),
_params.IntParam("int_test", description="int_description"),
_params.FloatParam("float_test", immutable=True),
_params.SecretParam("secret_test"),
_params.StringParam("string_test"),
_params.ListParam("list_test", default=["1", "2", "3"]),
],
requiredAPIs=[{
"api": "test_api",
"reason": "testing"
}])
full_stack_dict = {
"specVersion": "v1alpha1",
"endpoints": {
"test": full_endpoint_dict
},
"params": [{
"name": "bool_test",
"type": "boolean",
"default": False,
}, {
"name": "int_test",
"type": "int",
"description": "int_description"
}, {
"name": "float_test",
"type": "float",
"immutable": True,
}, {
"name": "secret_test",
"type": "secret"
}, {
"name": "string_test",
"type": "string"
}, {
"default": "1,2,3",
"name": "list_test",
"type": "list"
}],
"requiredAPIs": [{
"api": "test_api",
"reason": "testing"
}]
}
class TestManifestStack:
"""Stack unit tests."""
def test_stack_to_dict(self):
"""Generic check that all ManifestStack values convert to dict."""
stack_dict = _manifest.manifest_to_spec_dict(full_stack)
assert (stack_dict == full_stack_dict
), "Generated manifest spec dict does not match expected dict."
class TestManifestEndpoint:
"""Manifest unit tests."""
def test_endpoint_to_dict(self):
"""Generic check that all ManifestEndpoint values convert to dict."""
# pylint: disable=protected-access
endpoint_dict = _manifest._dataclass_to_spec(full_endpoint)
assert (endpoint_dict == full_endpoint_dict
), "Generated endpoint spec dict does not match expected dict."
def test_endpoint_expressions(self):
"""Check Expression values convert to CEL strings."""
expressions_test = _manifest.ManifestEndpoint(
availableMemoryMb=_params.TernaryExpression(
_params.BoolParam("large"), 1024, 256),
minInstances=_params.StringParam("large").equals("yes").then(6, 1),
maxInstances=_params.IntParam("max").compare(">", 6).then(
6, _params.IntParam("max")),
timeoutSeconds=_params.IntParam("world"),
concurrency=_params.IntParam("bar"),
vpc={"connector": _params.SecretParam("secret")})
expressions_expected_dict = {
"platform": "gcfv2",
"region": [],
"secretEnvironmentVariables": [],
"availableMemoryMb": "{{ params.large ? 1024 : 256 }}",
"minInstances": "{{ params.large == \"yes\" ? 6 : 1 }}",
"maxInstances": "{{ params.max > 6 ? 6 : params.max }}",
"timeoutSeconds": "{{ params.world }}",
"concurrency": "{{ params.bar }}",
"vpc": {
"connector": "{{ params.secret }}"
}
}
# pylint: disable=protected-access
expressions_actual_dict = _manifest._dataclass_to_spec(expressions_test)
assert (expressions_actual_dict == expressions_expected_dict
), "Generated endpoint spec dict does not match expected dict."
def test_endpoint_nones(self):
"""Check all None values are removed."""
expressions_test = _manifest.ManifestEndpoint(
timeoutSeconds=None,
minInstances=None,
maxInstances=None,
concurrency=None,
)
expressions_expected_dict = {
"platform": "gcfv2",
"region": [],
"secretEnvironmentVariables": [],
}
# pylint: disable=protected-access
expressions_actual_dict = _manifest._dataclass_to_spec(expressions_test)
assert (expressions_actual_dict == expressions_expected_dict
), "Generated endpoint spec dict does not match expected dict."
|
{
"content_hash": "c90fc1c9172ad210f0740a5c85fc8849",
"timestamp": "",
"source": "github",
"line_count": 165,
"max_line_length": 80,
"avg_line_length": 31.854545454545455,
"alnum_prop": 0.5627853881278538,
"repo_name": "firebase/firebase-functions-python",
"id": "e029e454b3891387e1e30272f8b3929940744d45",
"size": "5833",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/test_manifest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1818"
},
{
"name": "Python",
"bytes": "90101"
}
],
"symlink_target": ""
}
|
"""Training helper that checkpoints models and computes summaries."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import os
import time
from tensorflow.core.framework.summary_pb2 import Summary
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import saver as saver_mod
from tensorflow.python.training import session_manager as session_manager_mod
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
class Supervisor(object):
"""A training helper that checkpoints models and computes summaries.
The Supervisor is a small wrapper around a `Coordinator`, a `Saver`,
and a `SessionManager` that takes care of common needs of TensorFlow
training programs.
#### Use for a single program
```python
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that will checkpoint the model in '/tmp/mydir'.
sv = Supervisor(logdir='/tmp/mydir')
# Get a TensorFlow session managed by the supervisor.
with sv.managed_session(FLAGS.master) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
Within the `with sv.managed_session()` block all variables in the graph have
been initialized. In addition, a few services have been started to
checkpoint the model and add summaries to the event log.
If the program crashes and is restarted, the managed session automatically
reinitialize variables from the most recent checkpoint.
The supervisor is notified of any exception raised by one of the services.
After an exception is raised, `should_stop()` returns `True`. In that case
the training loop should also stop. This is why the training loop has to
check for `sv.should_stop()`.
Exceptions that indicate that the training inputs have been exhausted,
`tf.errors.OutOfRangeError`, also cause `sv.should_stop()` to return `True`
but are not re-raised from the `with` block: they indicate a normal
termination.
#### Use for multiple replicas
To train with replicas you deploy the same program in a `Cluster`.
One of the tasks must be identified as the *chief*: the task that handles
initialization, checkpoints, summaries, and recovery. The other tasks
depend on the *chief* for these services.
The only change you have to do to the single program code is to indicate
if the program is running as the *chief*.
```python
# Choose a task as the chief. This could be based on server_def.task_index,
# or job_def.name, or job_def.tasks. It's entirely up to the end user.
# But there can be only one *chief*.
is_chief = (server_def.task_index == 0)
server = tf.train.Server(server_def)
with tf.Graph().as_default():
...add operations to the graph...
# Create a Supervisor that uses log directory on a shared file system.
# Indicate if you are the 'chief'
sv = Supervisor(logdir='/shared_directory/...', is_chief=is_chief)
# Get a Session in a TensorFlow server on the cluster.
with sv.managed_session(server.target) as sess:
# Use the session to train the graph.
while not sv.should_stop():
sess.run(<my_train_op>)
```
In the *chief* task, the `Supervisor` works exactly as in the first example
above. In the other tasks `sv.managed_session()` waits for the Model to have
been intialized before returning a session to the training code. The
non-chief tasks depend on the chief taks for initializing the model.
If one of the tasks crashes and restarts, `managed_session()`
checks if the Model is initialized. If yes, it just creates a session and
returns it to the training code that proceeds normally. If the model needs
to be initialized, the chief task takes care of reinitializing it; the other
tasks just wait for the model to have been initialized.
NOTE: This modified program still works fine as a single program.
The single program marks itself as the chief.
#### What `master` string to use
Whether you are running on your machine or in the cluster you can use the
following values for the --master flag:
* Specifying `''` requests an in-process session that does not use RPC.
* Specifying `'local'` requests a session that uses the RPC-based
"Master interface" to run TensorFlow programs. See
[`tf.train.Server.create_local_server()`](#Server.create_local_server) for
details.
* Specifying `'grpc://hostname:port'` requests a session that uses
the RPC interface to a specific , and also allows the in-process
master to access remote tensorflow workers. Often, it is
appropriate to pass `server.target` (for some `tf.train.Server`
named `server).
#### Advanced use
##### Launching additional services
`managed_session()` launches the Checkpoint and Summary services (threads).
If you need more services to run you can simply launch them in the block
controlled by `managed_session()`.
Example: Start a thread to print losses. We want this thread to run
every 60 seconds, so we launch it with `sv.loop()`.
```python
...
sv = Supervisor(logdir='/tmp/mydir')
with sv.managed_session(FLAGS.master) as sess:
sv.loop(60, print_loss, (sess))
while not sv.should_stop():
sess.run(my_train_op)
```
##### Launching fewer services
`managed_session()` launches the "summary" and "checkpoint" threads which use
either the optionally `summary_op` and `saver` passed to the constructor, or
default ones created automatically by the supervisor. If you want to run
your own summary and checkpointing logic, disable these services by passing
`None` to the `summary_op` and `saver` parameters.
Example: Create summaries manually every 100 steps in the chief.
```python
# Create a Supervisor with no automatic summaries.
sv = Supervisor(logdir='/tmp/mydir', is_chief=is_chief, summary_op=None)
# As summary_op was None, managed_session() does not start the
# summary thread.
with sv.managed_session(FLAGS.master) as sess:
for step in xrange(1000000):
if sv.should_stop():
break
if is_chief and step % 100 == 0:
# Create the summary every 100 chief steps.
sv.summary_computed(sess, sess.run(my_summary_op))
else:
# Train normally
sess.run(my_train_op)
```
##### Custom model initialization
`managed_session()` only supports initializing the model by running an
`init_op` or restoring from the latest checkpoint. If you have special
initialization needs, see how to specify a `local_init_op` when creating the
supervisor. You can also use the `SessionManager` directly to create a
session and check if it could be initialized automatically.
@@__init__
@@managed_session
@@prepare_or_wait_for_session
@@start_standard_services
@@start_queue_runners
@@summary_computed
@@stop
@@request_stop
@@should_stop
@@stop_on_exception
@@wait_for_stop
"""
# Value to pass for the 'ready_op', 'init_op', 'summary_op', 'saver',
# and 'global_step' parameters of Supervisor.__init__() to indicate that
# the default behavior should be used.
USE_DEFAULT = 0
def __init__(self, graph=None, ready_op=USE_DEFAULT, is_chief=True,
init_op=USE_DEFAULT, init_feed_dict=None,
local_init_op=USE_DEFAULT, logdir=None,
summary_op=USE_DEFAULT, saver=USE_DEFAULT,
global_step=USE_DEFAULT, save_summaries_secs=120,
save_model_secs=600, recovery_wait_secs=30, stop_grace_secs=120,
checkpoint_basename="model.ckpt", session_manager=None,
summary_writer=USE_DEFAULT, init_fn=None):
"""Create a `Supervisor`.
Args:
graph: A `Graph`. The graph that the model will use. Defaults to the
default `Graph`. The supervisor may add operations to the graph before
creating a session, but the graph should not be modified by the caller
after passing it to the supervisor.
ready_op: 1-D string `Tensor`. This tensor is evaluated by supervisors in
`prepare_or_wait_for_session()` to check if the model is ready to use.
The model is considered ready if it returns an empty array. Defaults to
the tensor returned from `tf.report_uninitialized_variables()` If
`None`, the model is not checked for readiness.
is_chief: If True, create a chief supervisor in charge of initializing
and restoring the model. If False, create a supervisor that relies
on a chief supervisor for inits and restore.
init_op: `Operation`. Used by chief supervisors to initialize the model
when it can not be recovered. Defaults to an `Operation` that
initializes all variables. If `None`, no initialization is done
automatically unless you pass a value for `init_fn`, see below.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
local_init_op: `Operation`. Used by all supervisors to run initializations
that should run for every new supervisor instance. By default these
are table initializers and initializers for local variables.
If `None`, no further per supervisor-instance initialization is
done automatically.
logdir: A string. Optional path to a directory where to checkpoint the
model and log events for the visualizer. Used by chief supervisors.
The directory will be created if it does not exist.
summary_op: An `Operation` that returns a Summary for the event logs.
Used by chief supervisors if a `logdir` was specified. Defaults to the
operation returned from merge_all_summaries(). If `None`, summaries are
not computed automatically.
saver: A Saver object. Used by chief supervisors if a `logdir` was
specified. Defaults to the saved returned by Saver().
If `None`, the model is not saved automatically.
global_step: An integer Tensor of size 1 that counts steps. The value
from 'global_step' is used in summaries and checkpoint filenames.
Default to the op named 'global_step' in the graph if it exists, is of
rank 1, size 1, and of type tf.int32 ot tf.int64. If `None` the global
step is not recorded in summaries and checkpoint files. Used by chief
supervisors if a `logdir` was specified.
save_summaries_secs: Number of seconds between the computation of
summaries for the event log. Defaults to 120 seconds. Pass 0 to
disable summaries.
save_model_secs: Number of seconds between the creation of model
checkpoints. Defaults to 600 seconds. Pass 0 to disable checkpoints.
recovery_wait_secs: Number of seconds between checks that the model
is ready. Used by supervisors when waiting for a chief supervisor
to initialize or restore the model. Defaults to 30 seconds.
stop_grace_secs: Grace period, in seconds, given to running threads to
stop when `stop()` is called. Defaults to 120 seconds.
checkpoint_basename: The basename for checkpoint saving.
session_manager: `SessionManager`, which manages Session creation and
recovery. If it is `None`, a default `SessionManager` will be created
with the set of arguments passed in for backwards compatibility.
summary_writer: `SummaryWriter` to use or `USE_DEFAULT`. Can be `None`
to indicate that no summaries should be written.
init_fn: Optional callable used to initialize the model. Called
after the optional `init_op` is called. The callable must accept one
argument, the session being initialized.
Returns:
A `Supervisor`.
"""
# Set default values of arguments.
if graph is None:
graph = ops.get_default_graph()
with graph.as_default():
self._init_ready_op(ready_op=ready_op)
self._init_init_op(init_op=init_op, init_feed_dict=init_feed_dict)
self._init_local_init_op(local_init_op=local_init_op)
self._init_saver(saver=saver)
self._init_summary_op(summary_op=summary_op)
self._init_global_step(global_step=global_step)
self._graph = graph
self._is_chief = is_chief
self._coord = coordinator.Coordinator()
self._recovery_wait_secs = recovery_wait_secs
self._stop_grace_secs = stop_grace_secs
self._init_fn = init_fn
# Set all attributes related to checkpointing and writing events to None.
# Afterwards, set them appropriately for chief supervisors, as these are
# the only supervisors that can write checkpoints and events.
self._logdir = None
self._save_summaries_secs = None
self._save_model_secs = None
self._save_path = None
self._summary_writer = None
if self._is_chief:
self._logdir = logdir
self._save_summaries_secs = save_summaries_secs
self._save_model_secs = save_model_secs
if self._logdir:
self._save_path = os.path.join(self._logdir, checkpoint_basename)
if summary_writer is Supervisor.USE_DEFAULT:
if self._logdir:
self._summary_writer = summary_io.SummaryWriter(self._logdir)
else:
self._summary_writer = summary_writer
self._graph_added_to_summary = False
self._init_session_manager(session_manager=session_manager)
self._verify_setup()
# The graph is not allowed to change anymore.
graph.finalize()
def _init_session_manager(self, session_manager=None):
if session_manager is None:
self._session_manager = session_manager_mod.SessionManager(
local_init_op=self._local_init_op,
ready_op=self._ready_op, graph=self._graph,
recovery_wait_secs=self._recovery_wait_secs)
else:
self._session_manager = session_manager
def _get_first_op_from_collection(self, key):
"""Returns the first `Operation` from a collection.
Args:
key: A string collection key.
Returns:
The first Op found in a collection, or `None` if the collection is empty.
"""
try:
op_list = ops.get_collection(key)
if len(op_list) > 1:
logging.info("Found %d %s operations. Returning the first one.",
len(op_list), key)
if op_list:
return op_list[0]
except LookupError:
pass
return None
def _init_ready_op(self, ready_op=USE_DEFAULT):
"""Initializes ready_op.
Args:
ready_op: `Tensor` to check if the model is initialized.
If it's set to USE_DEFAULT, creates an op that checks all
the variables are initialized.
"""
if ready_op is Supervisor.USE_DEFAULT:
ready_op = self._get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
self._ready_op = ready_op
def _init_init_op(self, init_op=USE_DEFAULT, init_feed_dict=None):
"""Initializes init_op.
Args:
init_op: `Operation` to initialize the variables. If set to USE_DEFAULT,
create an op that initializes all variables and tables.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
"""
if init_op is Supervisor.USE_DEFAULT:
init_op = self._get_first_op_from_collection(ops.GraphKeys.INIT_OP)
if init_op is None:
init_op = variables.initialize_all_variables()
ops.add_to_collection(ops.GraphKeys.INIT_OP, init_op)
self._init_op = init_op
self._init_feed_dict = init_feed_dict
def _init_local_init_op(self, local_init_op=USE_DEFAULT):
"""Initializes local_init_op.
Args:
local_init_op: `Operation` run for every new supervisor instance. If set
to USE_DEFAULT, use the first op from the GraphKeys.LOCAL_INIT_OP
collection. If the collection is empty, create an op that initializes
all local variables and all tables.
"""
if local_init_op is Supervisor.USE_DEFAULT:
local_init_op = self._get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
self._local_init_op = local_init_op
def _init_saver(self, saver=USE_DEFAULT):
"""Initializes saver.
Args:
saver: A `Saver` object. If set to USE_DEFAULT, create one that
saves all the variables.
"""
if saver is Supervisor.USE_DEFAULT:
saver = self._get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is None and variables.all_variables():
saver = saver_mod.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
self._saver = saver
def _init_summary_op(self, summary_op=USE_DEFAULT):
"""Initilizes summary_op.
Args:
summary_op: An Operation that returns a Summary for the event logs.
If set to USE_DEFAULT, create an op that merges all the summaries.
"""
if summary_op is Supervisor.USE_DEFAULT:
summary_op = self._get_first_op_from_collection(ops.GraphKeys.SUMMARY_OP)
if summary_op is None:
summary_op = logging_ops.merge_all_summaries()
if summary_op is not None:
ops.add_to_collection(ops.GraphKeys.SUMMARY_OP, summary_op)
self._summary_op = summary_op
def _init_global_step(self, global_step=USE_DEFAULT):
"""Initializes global_step.
Args:
global_step: An integer Tensor of size 1 that counts steps. If
set to USE_DEFAULT, creates global_step tensor.
"""
if global_step is Supervisor.USE_DEFAULT:
global_step = self._get_first_op_from_collection(
ops.GraphKeys.GLOBAL_STEP)
if global_step is None:
global_step = self._default_global_step_tensor()
if global_step is not None:
ops.add_to_collection(ops.GraphKeys.GLOBAL_STEP, global_step)
self._global_step = global_step
@property
def is_chief(self):
"""Return True if this is a chief supervisor.
Returns:
A bool.
"""
return self._is_chief
@property
def session_manager(self):
"""Return the SessionManager used by the Supervisor.
Returns:
A SessionManager object.
"""
return self._session_manager
@property
def coord(self):
"""Return the Coordinator used by the Supervisor.
The Coordinator can be useful if you want to run multiple threads
during your training.
Returns:
A Coordinator object.
"""
return self._coord
@property
def init_op(self):
"""Return the Init Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._init_op
@property
def init_feed_dict(self):
"""Return the feed dictionary used when evaluating the `init_op`.
Returns:
A feed dictionary or `None`.
"""
return self._init_feed_dict
@property
def ready_op(self):
"""Return the Ready Op used by the supervisor.
Returns:
An Op or `None`.
"""
return self._ready_op
@property
def summary_writer(self):
"""Return the SummaryWriter used by the chief supervisor.
Returns:
A SummaryWriter.
"""
return self._summary_writer
@property
def summary_op(self):
"""Return the Summary Tensor used by the chief supervisor.
Returns:
A string Tensor for the summary or `None`.
"""
return self._summary_op
@property
def save_summaries_secs(self):
"""Return the delay between summary computations.
Returns:
A timestamp.
"""
return self._save_summaries_secs
@property
def global_step(self):
"""Return the global_step Tensor used by the supervisor.
Returns:
An integer Tensor for the global_step.
"""
return self._global_step
@property
def saver(self):
"""Return the Saver used by the supervisor.
Returns:
A Saver object.
"""
return self._saver
@property
def save_model_secs(self):
"""Return the delay between checkpoints.
Returns:
A timestamp.
"""
return self._save_model_secs
@property
def save_path(self):
"""Return the save path used by the supervisor.
Returns:
A string.
"""
return self._save_path
def _write_graph(self):
"""Writes graph_def to `logdir` and adds it to summary if applicable."""
assert self._is_chief
if self._logdir:
training_util.write_graph(self._graph.as_graph_def(add_shapes=True),
self._logdir, "graph.pbtxt")
if self._summary_writer and not self._graph_added_to_summary:
self._summary_writer.add_graph(self._graph)
self._graph_added_to_summary = True
def start_standard_services(self, sess):
"""Start the standard services for 'sess'.
This starts services in the background. The services started depend
on the parameters to the constructor and may include:
- A Summary thread computing summaries every save_summaries_secs.
- A Checkpoint thread saving the model every save_model_secs.
- A StepCounter thread measure step time.
Args:
sess: A Session.
Returns:
A list of threads that are running the standard services. You can use
the Supervisor's Coordinator to join these threads with:
sv.coord.Join(<list of threads>)
Raises:
RuntimeError: If called with a non-chief Supervisor.
ValueError: If not `logdir` was passed to the constructor as the
services need a log directory.
"""
if not self._is_chief:
raise RuntimeError("Only chief supervisor can start standard services. "
"Because only chief supervisors can write events.")
if not self._logdir:
logging.warning("Standard services need a 'logdir' "
"passed to the SessionManager")
return
if self._global_step is not None and self._summary_writer:
# Only add the session log if we keep track of global step.
# TensorBoard cannot use START message for purging expired events
# if there is no step value.
current_step = training_util.global_step(sess, self._global_step)
self._summary_writer.add_session_log(
SessionLog(status=SessionLog.START),
current_step)
threads = []
if self._save_summaries_secs and self._summary_writer:
if self._summary_op is not None:
threads.append(SVSummaryThread(self, sess))
if self._global_step is not None:
threads.append(SVStepCounterThread(self, sess))
if self.saver and self._save_model_secs:
threads.append(SVTimerCheckpointThread(self, sess))
for t in threads:
t.start()
return threads
def prepare_or_wait_for_session(self, master="", config=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
start_standard_services=True):
"""Make sure the model is ready to be used.
Create a session on 'master', recovering or initializing the model as
needed, or wait for a session to be ready. If running as the chief
and `start_standard_service` is set to True, also call the session
manager to start the standard services.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional ConfigProto proto used to configure the session,
which is passed as-is to create the session.
wait_for_checkpoint: Whether we should wait for the availability of a
checkpoint before creating Session. Defaults to False.
max_wait_secs: Maximum time to wait for the session to become available.
start_standard_services: Whether to start the standard services and the
queue runners.
Returns:
A Session object that can be used to drive the model.
"""
# For users who recreate the session with prepare_or_wait_for_session(), we
# need to clear the coordinator's stop_event so that threads managed by the
# coordinator can run.
self._coord.clear_stop()
if self._summary_writer:
self._summary_writer.reopen()
if self._is_chief:
sess = self._session_manager.prepare_session(
master, init_op=self.init_op, saver=self.saver,
checkpoint_dir=self._logdir, wait_for_checkpoint=wait_for_checkpoint,
max_wait_secs=max_wait_secs, config=config,
init_feed_dict=self._init_feed_dict, init_fn=self._init_fn)
self._write_graph()
if start_standard_services:
self.start_standard_services(sess)
else:
sess = self._session_manager.wait_for_session(master,
config=config,
max_wait_secs=max_wait_secs)
if start_standard_services:
self.start_queue_runners(sess)
return sess
def start_queue_runners(self, sess, queue_runners=None):
"""Start threads for `QueueRunners`.
Note that the queue runners collected in the graph key `QUEUE_RUNNERS`
are already started automatically when you create a session with the
supervisor, so unless you have non-collected queue runners to start
you do not need to call this explicitely.
Args:
sess: A `Session`.
queue_runners: A list of `QueueRunners`. If not specified, we'll use the
list of queue runners gathered in the graph under the key
`GraphKeys.QUEUE_RUNNERS`.
Returns:
The list of threads started for the `QueueRunners`.
"""
if queue_runners is None:
queue_runners = self._graph.get_collection(ops.GraphKeys.QUEUE_RUNNERS)
threads = []
for qr in queue_runners:
threads.extend(qr.create_threads(sess, coord=self._coord, daemon=True,
start=True))
return threads
def loop(self, timer_interval_secs, target, args=None, kwargs=None):
"""Start a LooperThread that calls a function periodically.
If `timer_interval_secs` is None the thread calls `target(*args, **kwargs)`
repeatedly. Otherwise it calls it every `timer_interval_secs`
seconds. The thread terminates when a stop is requested.
The started thread is added to the list of threads managed by the supervisor
so it does not need to be passed to the `stop()` method.
Args:
timer_interval_secs: Number. Time boundaries at which to call `target`.
target: A callable object.
args: Optional arguments to pass to `target` when calling it.
kwargs: Optional keyword arguments to pass to `target` when calling it.
Returns:
The started thread.
"""
looper = coordinator.LooperThread(self._coord, timer_interval_secs,
target=target, args=args, kwargs=kwargs)
looper.start()
return looper
def stop(self, threads=None, close_summary_writer=True):
"""Stop the services and the coordinator.
This does not close the session.
Args:
threads: Optional list of threads to join with the coordinator. If
`None`, defaults to the threads running the standard services, the
threads started for `QueueRunners`, and the threads started by the
`loop()` method. To wait on additional threads, pass the
list in this parameter.
close_summary_writer: Whether to close the `summary_writer`. Defaults to
`True` if the summary writer was created by the supervisor, `False`
otherwise.
"""
self._coord.request_stop()
try:
# coord.join() re-raises the first reported exception; the "finally"
# block ensures that we clean up whether or not an exception was
# reported.
self._coord.join(threads,
stop_grace_period_secs=self._stop_grace_secs)
finally:
# Close the writer last, in case one of the running threads was using it.
if close_summary_writer and self._summary_writer:
# Stop messages are not logged with event.step,
# since the session may have already terminated.
self._summary_writer.add_session_log(SessionLog(status=SessionLog.STOP))
self._summary_writer.close()
self._graph_added_to_summary = False
def request_stop(self, ex=None):
"""Request that the coordinator stop the threads.
See `Coordinator.request_stop()`.
Args:
ex: Optional `Exception`, or Python `exc_info` tuple as returned by
`sys.exc_info()`. If this is the first call to `request_stop()` the
corresponding exception is recorded and re-raised from `join()`.
"""
self._coord.request_stop(ex=ex)
def should_stop(self):
"""Check if the coordinator was told to stop.
See `Coordinator.should_stop()`.
Returns:
True if the coordinator was told to stop, False otherwise.
"""
return self._coord.should_stop()
def stop_on_exception(self):
"""Context handler to stop the supervisor when an exception is raised.
See `Coordinator.stop_on_exception()`.
Returns:
A context handler.
"""
return self._coord.stop_on_exception()
def wait_for_stop(self):
"""Block waiting for the coordinator to stop."""
self._coord.wait_for_stop()
def summary_computed(self, sess, summary, global_step=None):
"""Indicate that a summary was computed.
Args:
sess: A `Session` object.
summary: A Summary proto, or a string holding a serialized summary proto.
global_step: Int. global step this summary is associated with. If `None`,
it will try to fetch the current step.
Raises:
TypeError: if 'summary' is not a Summary proto or a string.
RuntimeError: if the Supervisor was created without a `logdir`.
"""
if not self._summary_writer:
raise RuntimeError("Writing a summary requires a summary writer.")
if global_step is None and self.global_step is not None:
global_step = training_util.global_step(sess, self.global_step)
self._summary_writer.add_summary(summary, global_step)
def _default_global_step_tensor(self):
"""Returns the global_step from the default graph.
Returns:
The global step `Tensor` or `None`.
"""
try:
gs = ops.get_default_graph().get_tensor_by_name("global_step:0")
if gs.dtype.base_dtype in [dtypes.int32, dtypes.int64]:
return gs
else:
logging.warning("Found 'global_step' is not an int type: %s", gs.dtype)
return None
except KeyError:
return None
def _verify_setup(self):
"""Check that all is good.
Raises:
ValueError: If something is not good.
"""
# Not running as chief means that replicas are used.
# In that case all Variables must have their device set.
if not self._is_chief:
for op in self._graph.get_operations():
if op.type == "Variable" and not op.device:
raise ValueError("When using replicas, all Variables must have "
"their device set: %s" % op)
# pylint: disable=g-doc-return-or-yield,broad-except
@contextlib.contextmanager
def managed_session(self, master="", config=None,
start_standard_services=True,
close_summary_writer=True):
"""Returns a context manager for a managed session.
This context manager creates and automatically recovers a session. It
optionally starts the standard services that handle checkpoints and
summaries. It monitors exceptions raised from the `with` block or from the
services and stops the supervisor as needed.
The context manager is typically used as follows:
```python
def train():
sv = tf.train.Supervisor(...)
with sv.managed_session(<master>) as sess:
for step in xrange(..):
if sv.should_stop():
break
sess.run(<my training op>)
...do other things needed at each training step...
```
An exception raised from the `with` block or one of the service threads is
raised again when the block exits. This is done after stopping all threads
and closing the session. For example, an `AbortedError` exception, raised
in case of preemption of one of the workers in a distributed model, is
raised again when the block exits.
If you want to retry the training loop in case of preemption you can do it
as follows:
```python
def main(...):
while True
try:
train()
except tf.errors.Aborted:
pass
```
As a special case, exceptions used for control flow, such as
`OutOfRangeError` which reports that input queues are exhausted, are not
raised again from the `with` block: they indicate a clean termination of
the training loop and are considered normal termination.
Args:
master: name of the TensorFlow master to use. See the `tf.Session`
constructor for how this is interpreted.
config: Optional `ConfigProto` proto used to configure the session.
Passed as-is to create the session.
start_standard_services: Whether to start the standard services,
such as checkpoint, summary and step counter.
close_summary_writer: Whether to close the summary writer when
closing the session. Defaults to True.
Returns:
A context manager that yields a `Session` restored from the latest
checkpoint or initialized from scratch if not checkpoint exists. The
session is closed when the `with` block exits.
"""
try:
sess = self.prepare_or_wait_for_session(
master=master, config=config,
start_standard_services=start_standard_services)
yield sess
except Exception as e:
self.request_stop(e)
finally:
try:
# Request all the threads to stop and wait for them to do so. Any
# exception raised by the threads is raised again from stop().
# Passing stop_grace_period_secs is for blocked enqueue/dequeue
# threads which are not checking for `should_stop()`. They
# will be stopped when we close the session further down.
self.stop(close_summary_writer=close_summary_writer)
finally:
# Close the session to finish up all pending calls. We do not care
# about exceptions raised when closing. This takes care of
# blocked enqueue/dequeue calls.
try:
sess.close()
except Exception:
# Silently ignore exceptions raised by close().
pass
# pylint: enable=g-doc-return-or-yield,broad-except
class SVSummaryThread(coordinator.LooperThread):
"""A thread to save summaries on a timer."""
def __init__(self, sv, sess):
"""Create a SVSummaryThread.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVSummaryThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
if self._sv.global_step is not None:
summary_strs, global_step = self._sess.run([self._sv.summary_op,
self._sv.global_step])
else:
summary_strs = self._sess.run(self._sv.summary_op)
global_step = None
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary_strs, global_step)
class SVStepCounterThread(coordinator.LooperThread):
"""Threads to count steps and measure their duration."""
def __init__(self, sv, sess):
"""Create a `SVStepCounterThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVStepCounterThread, self).__init__(sv.coord, sv.save_summaries_secs)
self._sv = sv
self._sess = sess
self._last_time = 0.0
self._last_step = 0
self._summary_tag = "%s/sec" % self._sv.global_step.op.name
def start_loop(self):
self._last_time = time.time()
self._last_step = training_util.global_step(
self._sess, self._sv.global_step)
def run_loop(self):
# Count the steps.
current_step = training_util.global_step(self._sess, self._sv.global_step)
added_steps = current_step - self._last_step
self._last_step = current_step
# Measure the elapsed time.
current_time = time.time()
elapsed_time = current_time - self._last_time
self._last_time = current_time
# Reports the number of steps done per second
steps_per_sec = added_steps / elapsed_time
summary = Summary(value=[Summary.Value(tag=self._summary_tag,
simple_value=steps_per_sec)])
if self._sv.summary_writer:
self._sv.summary_writer.add_summary(summary, current_step)
logging.log_first_n(logging.INFO, "%s: %g", 10,
self._summary_tag, steps_per_sec)
class SVTimerCheckpointThread(coordinator.LooperThread):
"""A thread to checkpoint on a timer."""
def __init__(self, sv, sess):
"""Create a `SVTimerCheckpointThread`.
Args:
sv: A `Supervisor`.
sess: A `Session`.
"""
super(SVTimerCheckpointThread, self).__init__(sv.coord, sv.save_model_secs)
self._sv = sv
self._sess = sess
def run_loop(self):
self._sv.saver.save(self._sess, self._sv.save_path,
global_step=self._sv.global_step)
if self._sv.summary_writer and self._sv.global_step is not None:
current_step = training_util.global_step(self._sess, self._sv.global_step)
self._sv.summary_writer.add_session_log(
SessionLog(status=SessionLog.CHECKPOINT,
checkpoint_path=self._sv.save_path),
current_step)
# TODO(sherrym): All non-PEP8 compliant names will be deprecated shortly.
setattr(Supervisor, "PrepareSession", Supervisor.prepare_or_wait_for_session)
setattr(Supervisor, "StartQueueRunners", Supervisor.start_queue_runners)
setattr(Supervisor, "StartStandardServices", Supervisor.start_standard_services)
setattr(Supervisor, "Stop", Supervisor.stop)
setattr(Supervisor, "RequestStop", Supervisor.request_stop)
setattr(Supervisor, "Loop", Supervisor.loop)
setattr(Supervisor, "ShouldStop", Supervisor.should_stop)
setattr(Supervisor, "StopOnException", Supervisor.stop_on_exception)
setattr(Supervisor, "WaitForStop", Supervisor.wait_for_stop)
setattr(Supervisor, "SummaryComputed", Supervisor.summary_computed)
|
{
"content_hash": "ab5d904d0799c0ab5203cc6acb5bc088",
"timestamp": "",
"source": "github",
"line_count": 1031,
"max_line_length": 80,
"avg_line_length": 38.11833171677983,
"alnum_prop": 0.6714758269720101,
"repo_name": "Lab603/PicEncyclopedias",
"id": "a3ee383758be7553b49bf003dcf7593b7cc7fe7b",
"size": "39989",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "jni-build/jni/include/tensorflow/python/training/supervisor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "361482"
},
{
"name": "C++",
"bytes": "22994090"
},
{
"name": "CMake",
"bytes": "72924"
},
{
"name": "CSS",
"bytes": "1548"
},
{
"name": "HTML",
"bytes": "1040352"
},
{
"name": "Java",
"bytes": "252082"
},
{
"name": "JavaScript",
"bytes": "25902"
},
{
"name": "Jupyter Notebook",
"bytes": "3547008"
},
{
"name": "Makefile",
"bytes": "47206"
},
{
"name": "Objective-C",
"bytes": "10664"
},
{
"name": "Objective-C++",
"bytes": "91354"
},
{
"name": "Python",
"bytes": "19063444"
},
{
"name": "Shell",
"bytes": "476334"
},
{
"name": "TypeScript",
"bytes": "1264488"
}
],
"symlink_target": ""
}
|
"""
Created on Jan 28, 2013
@author: agross
"""
import os as os
import pickle as pickle
from collections import defaultdict
import pandas as pd
import numpy as np
from Helpers.Misc import make_path_dump
from Processing.ProcessClinical import get_clinical
from Data.Annotations import read_in_pathways
def tree():
return defaultdict(tree)
class Run(object):
"""
Object for storing meta-data and functions for dealing with Firehose runs.
Entry level for loading data from pre-processed dumps in the ucsd_analyses
file tree.
"""
def __init__(self, date, version, data_path, result_path, parameters,
cancer_codes, sample_matrix, description=''):
self.date = date
self.data_path = data_path
self.version = version
self.result_path = result_path
self.report_path = result_path
self.parameters = parameters
self.dependency_tree = tree()
self.description = description
self.cancer_codes = cancer_codes
self.sample_matrix = sample_matrix
self.cancers = np.array(self.sample_matrix.index[:-1])
self.data_types = np.array(self.sample_matrix.columns)
if 'pathway_file' in self.parameters:
self._init_gene_sets(parameters['pathway_file'])
else:
self.gene_sets = {}
self.gene_lookup = {}
self.genes = np.array([])
def _init_gene_sets(self, gene_set_file):
self.gene_sets, self.gene_lookup = read_in_pathways(gene_set_file)
self.genes = np.array(self.gene_lookup.keys())
def __repr__(self):
s = 'Run object for TCGA Analysis\n'
s += 'Firehose run date: ' + self.date + '\n'
s += 'Code version: ' + self.version + '\n'
if self.description:
s += 'Comment: ' + self.description + '\n'
return s
def load_cancer(self, cancer):
path = '/'.join([self.report_path, cancer, 'CancerObject.p'])
obj = pickle.load(open(path, 'rb'))
return obj
def save(self):
self.report_path = (self.result_path + 'Run_' +
self.version.replace('.', '_'))
make_path_dump(self, self.report_path + '/RunObject.p')
def get_run(firehose_dir, version='Latest'):
"""
Helper to get a run from the file-system.
"""
path = '{}/ucsd_analyses'.format(firehose_dir)
if version is 'Latest':
version = sorted(os.listdir(path))[-1]
run = pickle.load(open('{}/{}/RunObject.p'.format(path, version), 'rb'))
return run
class Cancer(object):
def __init__(self, name, run):
self.name = name
if name in run.cancer_codes:
self.full_name = run.cancer_codes.ix[name]
else:
self.full_name = name
counts = run.sample_matrix.ix[name]
self.samples = counts[counts > 0]
self.data_types = np.array(self.samples.index)
self.run_path = run.report_path
self.path = '/'.join([self.run_path, self.name])
def load_clinical(self):
path = '/'.join([self.path, 'Clinical', 'ClinicalObject.p'])
obj = pickle.load(open(path, 'rb'))
return obj
def load_global_vars(self):
path = '/'.join([self.path, 'Global_Vars.csv'])
df = pd.read_csv(path, index_col=0)
ft = pd.MultiIndex.from_tuples
df.columns = ft(map(lambda s: eval(s, {}, {}), df.columns))
return df
def load_data(self, data_type):
path = '/'.join([self.path, data_type, 'DataObject.p'])
obj = pickle.load(open(path, 'rb'))
return obj
def __repr__(self):
return self.full_name + '(\'' + self.name + '\') cancer object'
def initialize_data(self, run, save=False, get_vars=False):
clinical = Clinical(self, run)
clinical.artificially_censor(5)
# global_vars = IM.get_global_vars(run.data_path, self.name)
# global_vars = global_vars.groupby(level=0).first()
if save is True:
self.save()
clinical.save()
# global_vars.to_csv(self.path + '/Global_Vars.csv')
if get_vars is True:
return clinical
# return clinical, global_vars
def save(self):
self.path = '{}/{}'.format(self.run_path, self.name)
make_path_dump(self, self.path + '/CancerObject.p')
class Clinical(object):
def __init__(self, cancer, run, patients=None):
"""
:param cancer: Cancer object
:param run: Run object
:param patients: list of patients to filter down to (optional)
"""
self.cancer = cancer.name
self.run_path = run.report_path
self.path = '/'.join([self.run_path, cancer.name])
tup = get_clinical(cancer.name, run.data_path, patients)
(self.clinical, self.drugs, self.followup, self.stage,
self.timeline, self.survival) = tup
def __repr__(self):
return 'Clinical Object for ' + self.cancer
def artificially_censor(self, years):
for n, s in self.survival.iteritems():
if n.endswith('y'):
continue
df = s.unstack().copy()
df['event'] = df.event * (df.days < int(365.25 * years))
df['days'] = df.days.clip_upper(int((365.25 * years)))
self.survival[n + '_' + str(years) + 'y'] = df.stack()
def save(self):
self.path = '{}/{}'.format(self.run_path, self.cancer)
make_path_dump(self, self.path + '/Clinical/ClinicalObject.p')
if self.drugs is not None:
self.drugs.to_csv(self.path + '/Clinical/drugs.csv')
if self.survival is not None:
self.survival.to_csv(self.path + '/Clinical/survival.csv')
self.timeline.to_csv(self.path + '/Clinical/timeline.csv')
self.clinical.to_csv(self.path + '/Clinical/clinical.csv')
def patient_filter(df, can):
if can.patients is not None:
return df[[p for p in df.columns if p in can.patients]]
elif can.filtered_patients is not None:
return df[[p for p in df.columns if p not in can.filtered_patients]]
else:
return df
class Dataset(object):
def __init__(self, cancer_path, data_type, compressed=True):
self.data_type = data_type
self.path = '{}/{}'.format(cancer_path, data_type)
self.compressed = compressed
self.patients = []
self.df = None
self.features = None
return
def compress(self):
assert len(self.df.shape) == 2
self.patients = self.df.columns
self.df = self.df.replace(0, np.nan).stack()
if self.features is not None:
self.features = self.features.replace(0, np.nan).stack()
self.compressed = True
def uncompress(self):
assert len(self.df.shape) == 1
self.df = self.df.unstack().ix[:, self.patients].fillna(0.)
if self.features is not None:
self.features = self.features.unstack().ix[:, self.patients]
self.features = self.features.fillna(0.)
self.compressed = False
def save(self):
if self.compressed is False:
self.compress()
make_path_dump(self, self.path + '/DataObject.p')
def __repr__(self):
return self.data_type + ' dataset'
|
{
"content_hash": "992c3f9a182fe4d33122fa20974de23a",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 78,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.5701556057986434,
"repo_name": "theandygross/CancerData",
"id": "43bf7c1e0a235112e8cd942668f813ea4e3f68ca",
"size": "7519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/Data/Containers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61729"
}
],
"symlink_target": ""
}
|
from webskewer.serve.main import main
if __name__ == '__main__':
#import cProfile
#import pstats
#try:
# cProfile.run('main()', 'mainprof')
#except KeyboardInterrupt:
# pass
#p = pstats.Stats('mainprof')
#p.strip_dirs()
#p.sort_stats('calls', 'time')
#p.print_stats()
#p.print_callers()
main()
|
{
"content_hash": "1b4a0836e0fe1463ddb2870b917c7d4f",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 43,
"avg_line_length": 20.647058823529413,
"alnum_prop": 0.5641025641025641,
"repo_name": "dhain/webskewer",
"id": "2e73f6846d2c1e426f26f835bc673310fd8075af",
"size": "351",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "webskewer.serve/webskewer/serve/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "60675"
}
],
"symlink_target": ""
}
|
"""Linters that warn about common problems with kake rules."""
from __future__ import absolute_import
import inspect
import re
import sys
from shared import ka_root
from kake.lib import compile_rule
from kake.lib import computed_inputs
# captures "'foo' in context", "context['foo']", and
# context.get('foo'). We also get the remainder of the line to catch
# '@Nolint' directives.
_CONTEXT_USE_RE = re.compile(
'(?:'
r'[\'\"](\w+)[\'\"]\s+in\s+context'
r'|context\[[\'\"](\w+)[\'\"]\]'
r'|context\.get\(\s*[\'\"](\w+)[\'\"]'
').*')
def _all_subclasses(cls, seen=None):
"""Return all subclasses of cls, not just direct ones."""
if seen is None:
seen = set()
try:
subclasses = cls.__subclasses__()
except TypeError: # fails only when cls is type
subclasses = cls.__subclasses__(cls)
for subclass in subclasses:
if subclass not in seen:
seen.add(subclass)
yield subclass
for recurse in _all_subclasses(subclass, seen):
yield recurse
def lint_missing_used_context_keys(files_to_lint):
"""Attempts to find places the user failed to update used_context_keys().
If you write a compile_rule that uses context['foo'], you're
supposed to advertise that fact by including 'foo' in your
used_context_keys() method. But it's easy to forget to do that.
This rule attempts to remind you by looking at the source code for
your class and trying to find all uses.
This isn't perfect, which is why it's a lint rule and we don't
just automatically extract uses of context['foo'], but it's better
than nothing! If it's claiming a line is a use of context when
it's not, just stick a @Nolint at the end of the line.
"""
# Only files under the kake directory might have compile rules.
relfiles_to_lint = [ka_root.relpath(f) for f in files_to_lint
if ka_root.relpath(f).startswith('kake/')]
if not relfiles_to_lint:
return
# This forces us to import all the kake compile_rules.
from kake import make # @UnusedImport
classes = (list(_all_subclasses(compile_rule.CompileBase)) +
list(_all_subclasses(computed_inputs.ComputedInputsBase)))
for cls in classes:
class_file = cls.__module__.replace('.', '/') + '.py'
if class_file not in relfiles_to_lint:
continue
claimed_used_context_keys = set(cls.used_context_keys())
actual_used_context_keys = {} # map from key to linenum where used
class_source = inspect.getsource(cls)
module_source = inspect.getsource(sys.modules[cls.__module__])
# Find what line-number the class we're linting starts on.
class_source_pos = module_source.find(class_source)
class_startline = module_source.count('\n', 0, class_source_pos) + 1
# Find what line-number class.used_context_keys() starts on.
used_context_keys_pos = class_source.find('def used_context_keys')
if used_context_keys_pos == -1:
used_context_keys_line = 1
else:
used_context_keys_line = (class_source.count('\n', 0,
used_context_keys_pos)
+ class_startline)
for m in _CONTEXT_USE_RE.finditer(class_source):
if '@Nolint' not in m.group(0):
key = m.group(1) or m.group(2) or m.group(3)
linenum = (class_source.count('\n', 0, m.start())
+ class_startline)
actual_used_context_keys.setdefault(key, linenum)
must_add = set(actual_used_context_keys) - claimed_used_context_keys
must_remove = claimed_used_context_keys - set(actual_used_context_keys)
for key in must_add:
# We don't require people to register system keys (start with _)
# or glob vars (start with '{').
if not key.startswith(('_', '{')):
yield (ka_root.join(class_file),
actual_used_context_keys[key], # linenum
'Build rule uses "%s" but it is not listed in'
' used_context_keys(). Add it there or mark'
' it with @Nolint if this is in error.' % key)
for key in must_remove:
yield (ka_root.join(class_file),
used_context_keys_line,
'Build rule does not use "%s" but it is listed in'
' used_context_keys(). Add it there or fix this'
' linter if it is in error.' % key)
|
{
"content_hash": "62c8f598dc9afd9dcf6dae4bbc06e479",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 79,
"avg_line_length": 39.889830508474574,
"alnum_prop": 0.5810495007435734,
"repo_name": "Khan/khan-linter",
"id": "215785b49124f4ccfcbe7d7b7c424aba18216810",
"size": "4707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "contrib/compile_rule_lint.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Go",
"bytes": "473"
},
{
"name": "JavaScript",
"bytes": "9867"
},
{
"name": "Makefile",
"bytes": "1397"
},
{
"name": "Python",
"bytes": "409986"
},
{
"name": "Shell",
"bytes": "5543"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function
import pytest
sa = pytest.importorskip('sqlalchemy')
import itertools
from distutils.version import LooseVersion
import datashape
from odo import into, resource, discover
from pandas import DataFrame
from toolz import unique
from blaze.compute.sql import compute, select, lower_column, compute_up
from blaze.expr import (
symbol, transform, summary, by, sin, join,
floor, cos, merge, nunique, mean, sum, count, exp
)
from blaze.compatibility import xfail
from blaze.utils import tmpfile, example, normalize
def computefull(t, s):
return select(compute(t, s))
names = ('tbl%d' % i for i in itertools.count())
@pytest.fixture(scope='module')
def data():
# make the engine
engine = sa.create_engine('sqlite:///:memory:')
metadata = sa.MetaData(engine)
# name table
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
name.create()
# city table
city = sa.Table('city', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
city.create()
s = symbol('s', discover(engine))
return {'engine': engine, 'metadata': metadata, 'name': name, 'city': city,
's': s}
t = symbol('t', 'var * {name: string, amount: int, id: int}')
nt = symbol('t', 'var * {name: ?string, amount: float64, id: int}')
metadata = sa.MetaData()
s = sa.Table('accounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True))
tdate = symbol('t',
"""var * {
name: string,
amount: int,
id: int,
occurred_on: datetime
}""")
ns = sa.Table('nullaccounts', metadata,
sa.Column('name', sa.String, nullable=True),
sa.Column('amount', sa.REAL),
sa.Column('id', sa.Integer, primary_key=True),
)
sdate = sa.Table('accdate', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('occurred_on', sa.DateTime))
tbig = symbol('tbig',
'var * {name: string, sex: string[1], amount: int, id: int}')
sbig = sa.Table('accountsbig', metadata,
sa.Column('name', sa.String),
sa.Column('sex', sa.String),
sa.Column('amount', sa.Integer),
sa.Column('id', sa.Integer, primary_key=True))
def test_table():
result = str(computefull(t, s))
expected = """
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
""".strip()
assert normalize(result) == normalize(expected)
def test_projection():
print(compute(t[['name', 'amount']], s))
assert str(compute(t[['name', 'amount']], s)) == \
str(sa.select([s.c.name, s.c.amount]))
def test_eq():
assert str(compute(t['amount'] == 100, s, post_compute=False)) == \
str(s.c.amount == 100)
def test_eq_unicode():
assert str(compute(t['name'] == u'Alice', s, post_compute=False)) == \
str(s.c.name == u'Alice')
def test_selection():
assert str(compute(t[t['amount'] == 0], s)) == \
str(sa.select([s]).where(s.c.amount == 0))
assert str(compute(t[t['amount'] > 150], s)) == \
str(sa.select([s]).where(s.c.amount > 150))
def test_arithmetic():
assert str(compute(t['amount'] + t['id'], s)) == \
str(sa.select([s.c.amount + s.c.id]))
assert str(compute(t['amount'] + t['id'], s, post_compute=False)) == \
str(s.c.amount + s.c.id)
assert str(compute(t['amount'] * t['id'], s, post_compute=False)) == \
str(s.c.amount * s.c.id)
assert str(compute(t['amount'] * 2, s, post_compute=False)) == \
str(s.c.amount * 2)
assert str(compute(2 * t['amount'], s, post_compute=False)) == \
str(2 * s.c.amount)
assert (str(compute(~(t['amount'] > 10), s, post_compute=False)) ==
"accounts.amount <= :amount_1")
assert str(compute(t['amount'] + t['id'] * 2, s)) == \
str(sa.select([s.c.amount + s.c.id * 2]))
def test_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
expected = lhs.join(rhs, lhs.c.name == rhs.c.name)
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L, R, 'name')
result = compute(joined, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name""")
assert str(select(result)) == str(select(expected))
# Schemas match
assert list(result.c.keys()) == list(joined.fields)
# test sort on join
result = compute(joined.sort('amount'), {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
select
anon_1.name,
anon_1.amount,
anon_1.id
from (select
amounts.name as name,
amounts.amount as amount,
ids.id as id
from
amounts
join
ids
on
amounts.name = ids.name) as anon_1
order by
anon_1.amount asc""")
def test_clean_complex_join():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
joined = join(L[L.amount > 0], R, 'name')
result = compute(joined, {L: lhs, R: rhs})
expected1 = """
SELECT amounts.name, amounts.amount, ids.id
FROM amounts JOIN ids ON amounts.name = ids.name
WHERE amounts.amount > :amount_1"""
expected2 = """
SELECT alias.name, alias.amount, ids.id
FROM (SELECT amounts.name AS name, amounts.amount AS amount
FROM amounts
WHERE amounts.amount > :amount_1) AS alias
JOIN ids ON alias.name = ids.name"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_multi_column_join():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {w: int, x: int, y: int}')
joined = join(L, R, ['x', 'y'])
expected = lhs.join(rhs, (lhs.c.x == rhs.c.x)
& (lhs.c.y == rhs.c.y))
expected = select(list(unique(expected.columns, key=lambda c:
c.name))).select_from(expected)
result = compute(joined, {L: lhs, R: rhs})
assert str(result) == str(expected)
assert str(select(result)) == str(select(expected))
# Schemas match
print(result.c.keys())
print(joined.fields)
assert list(result.c.keys()) == list(joined.fields)
def test_unary_op():
assert str(compute(exp(t['amount']), s, post_compute=False)) == \
str(sa.func.exp(s.c.amount))
assert str(compute(-t['amount'], s, post_compute=False)) == \
str(-s.c.amount)
@pytest.mark.parametrize('unbiased', [True, False])
def test_std(unbiased):
assert str(compute(t.amount.std(unbiased=unbiased), s, post_compute=False)) == \
str(getattr(sa.func,
'stddev_%s' % ('samp' if unbiased else 'pop'))(s.c.amount))
@pytest.mark.parametrize('unbiased', [True, False])
def test_var(unbiased):
assert str(compute(t.amount.var(unbiased=unbiased), s, post_compute=False)) == \
str(getattr(sa.func,
'var_%s' % ('samp' if unbiased else 'pop'))(s.c.amount))
def test_reductions():
assert str(compute(sum(t['amount']), s, post_compute=False)) == \
str(sa.sql.functions.sum(s.c.amount))
assert str(compute(mean(t['amount']), s, post_compute=False)) == \
str(sa.sql.func.avg(s.c.amount))
assert str(compute(count(t['amount']), s, post_compute=False)) == \
str(sa.sql.func.count(s.c.amount))
assert 'amount_sum' == compute(
sum(t['amount']), s, post_compute=False).name
def test_reduction_with_invalid_axis_argument():
with pytest.raises(ValueError):
compute(t.amount.mean(axis=1))
with pytest.raises(ValueError):
compute(t.count(axis=1))
with pytest.raises(ValueError):
compute(t[['amount', 'id']].count(axis=1))
def test_nelements():
rhs = str(compute(t.count(), s))
assert str(compute(t.nelements(), s)) == rhs
assert str(compute(t.nelements(axis=None), s)) == rhs
assert str(compute(t.nelements(axis=0), s)) == rhs
assert str(compute(t.nelements(axis=(0,)), s)) == rhs
@pytest.mark.xfail(raises=Exception, reason="We don't support axis=1 for"
" Record datashapes")
def test_nelements_axis_1():
assert compute(t.nelements(axis=1), s) == len(s.columns)
def test_count_on_table():
result = compute(t.count(), s)
assert normalize(str(result)) == normalize("""
SELECT count(accounts.id) as count_1
FROM accounts""")
result = compute(t[t.amount > 0].count(), s)
assert (
normalize(str(result)) == normalize("""
SELECT count(accounts.id) as t_count
FROM accounts
WHERE accounts.amount > :amount_1""")
or
normalize(str(result)) == normalize("""
SELECT count(alias.id) as t_count
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts
WHERE accounts.amount > :amount_1) as alias_2"""))
def test_distinct():
result = str(compute(t['amount'].distinct(), s, post_compute=False))
assert 'distinct' in result.lower()
assert 'amount' in result.lower()
print(result)
assert result == str(sa.distinct(s.c.amount))
def test_distinct_multiple_columns():
assert normalize(str(compute(t.distinct(), s))) == normalize("""
SELECT DISTINCT accounts.name, accounts.amount, accounts.id
FROM accounts""")
def test_nunique():
result = str(computefull(nunique(t['amount']), s))
print(result)
assert 'distinct' in result.lower()
assert 'count' in result.lower()
assert 'amount' in result.lower()
def test_nunique_table():
result = normalize(str(computefull(t.nunique(), s)))
expected = normalize("""SELECT count(alias.id) AS tbl_row_count
FROM (SELECT DISTINCT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts) as alias""")
assert result == expected
@xfail(reason="Fails because SQLAlchemy doesn't seem to know binary reductions")
def test_binary_reductions():
assert str(compute(any(t['amount'] > 150), s)) == \
str(sa.sql.functions.any(s.c.amount > 150))
def test_by():
expr = by(t['name'], total=t['amount'].sum())
result = compute(expr, s)
expected = sa.select([s.c.name,
sa.sql.functions.sum(s.c.amount).label('total')]
).group_by(s.c.name)
assert str(result) == str(expected)
def test_by_head():
t2 = t.head(100)
expr = by(t2['name'], total=t2['amount'].sum())
result = compute(expr, s)
# s2 = select(s).limit(100)
# expected = sa.select([s2.c.name,
# sa.sql.functions.sum(s2.c.amount).label('amount_sum')]
# ).group_by(s2.c.name)
expected = """
SELECT alias.name, sum(alias.amount) as total
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS ID
FROM accounts
LIMIT :param_1) as alias
GROUP BY alias.name"""
expected = """
SELECT accounts.name, sum(accounts.amount) as total
FROM accounts
GROUP by accounts.name
LIMIT :param_1"""
assert normalize(str(result)) == normalize(str(expected))
def test_by_two():
expr = by(tbig[['name', 'sex']], total=tbig['amount'].sum())
result = compute(expr, sbig)
expected = (sa.select([sbig.c.name,
sbig.c.sex,
sa.sql.functions.sum(sbig.c.amount).label('total')])
.group_by(sbig.c.name, sbig.c.sex))
assert str(result) == str(expected)
def test_by_three():
result = compute(by(tbig[['name', 'sex']],
total=(tbig['id'] + tbig['amount']).sum()),
sbig)
assert normalize(str(result)) == normalize("""
SELECT accountsbig.name,
accountsbig.sex,
sum(accountsbig.id + accountsbig.amount) AS total
FROM accountsbig GROUP BY accountsbig.name, accountsbig.sex
""")
def test_by_summary_clean():
expr = by(t.name, min=t.amount.min(), max=t.amount.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.amount) AS max, min(accounts.amount) AS min
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_by_summary_single_column():
expr = by(t.name, n=t.name.count(), biggest=t.name.max())
result = compute(expr, s)
expected = """
SELECT accounts.name, max(accounts.name) AS biggest, count(accounts.name) AS n
FROM accounts
GROUP BY accounts.name
"""
assert normalize(str(result)) == normalize(expected)
def test_join_projection():
metadata = sa.MetaData()
lhs = sa.Table('amounts', metadata,
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
rhs = sa.Table('ids', metadata,
sa.Column('name', sa.String),
sa.Column('id', sa.Integer))
L = symbol('L', 'var * {name: string, amount: int}')
R = symbol('R', 'var * {name: string, id: int}')
want = join(L, R, 'name')[['amount', 'id']]
result = compute(want, {L: lhs, R: rhs})
print(result)
assert 'join' in str(result).lower()
assert result.c.keys() == ['amount', 'id']
assert 'amounts.name = ids.name' in str(result)
def test_sort():
assert str(compute(t.sort('amount'), s)) == \
str(select(s).order_by(sa.asc(s.c.amount)))
assert str(compute(t.sort('amount', ascending=False), s)) == \
str(select(s).order_by(sa.desc(s.c.amount)))
def test_multicolumn_sort():
assert str(compute(t.sort(['amount', 'id']), s)) == \
str(select(s).order_by(sa.asc(s.c.amount), sa.asc(s.c.id)))
assert str(compute(t.sort(['amount', 'id'], ascending=False), s)) == \
str(select(s).order_by(sa.desc(s.c.amount), sa.desc(s.c.id)))
def test_sort_on_distinct():
assert normalize(str(compute(t.amount.sort(), s))) == normalize("""
SELECT accounts.amount
FROM accounts
ORDER BY accounts.amount ASC""")
assert normalize(str(compute(t.amount.distinct().sort(), s))) == normalize("""
SELECT DISTINCT accounts.amount as amount
FROM accounts
ORDER BY amount ASC""")
def test_head():
assert str(compute(t.head(2), s)) == str(select(s).limit(2))
def test_label():
assert (str(compute((t['amount'] * 10).label('foo'),
s, post_compute=False)) ==
str((s.c.amount * 10).label('foo')))
def test_relabel_table():
result = compute(t.relabel(name='NAME', id='ID'), s)
expected = select([
s.c.name.label('NAME'),
s.c.amount,
s.c.id.label('ID'),
])
assert str(result) == str(expected)
def test_relabel_projection():
result = compute(
t[['name', 'id']].relabel(name='new_name', id='new_id'),
s,
)
assert normalize(str(result)) == normalize(
"""SELECT
accounts.name AS new_name,
accounts.id AS new_id
FROM accounts""",
)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
def test_projection_of_selection():
print(compute(t[t['amount'] < 0][['name', 'amount']], s))
assert len(str(compute(t[t['amount'] < 0], s))) > \
len(str(compute(t[t['amount'] < 0][['name', 'amount']], s)))
def test_outer_join():
L = symbol('L', 'var * {id: int, name: string, amount: real}')
R = symbol('R', 'var * {city: string, id: int}')
with tmpfile('db') as fn:
uri = 'sqlite:///' + fn
engine = resource(uri)
_left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
left = resource(uri, 'left', dshape=L.dshape)
into(left, _left)
_right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
right = resource(uri, 'right', dshape=R.dshape)
into(right, _right)
conn = engine.connect()
query = compute(join(L, R, how='inner'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='left'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
query = compute(join(L, R, how='right'),
{L: left, R: right},
post_compute=False)
print(query)
result = list(map(tuple, conn.execute(query).fetchall()))
print(result)
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
# SQLAlchemy doesn't support full outer join
"""
query = compute(join(L, R, how='outer'),
{L: left, R: right},
post_compute=False)
result = list(map(tuple, conn.execute(query).fetchall()))
assert set(result) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
"""
conn.close()
def test_summary():
expr = summary(a=t.amount.sum(), b=t.id.count())
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
def test_summary_clean():
t2 = t[t.amount > 0]
expr = summary(a=t2.amount.sum(), b=t2.id.count())
result = str(compute(expr, s))
assert normalize(result) == normalize("""
SELECT sum(accounts.amount) as a, count(accounts.id) as b
FROM accounts
WHERE accounts.amount > :amount_1""")
def test_summary_by():
expr = by(t.name, summary(a=t.amount.sum(), b=t.id.count()))
result = str(compute(expr, s))
assert 'sum(accounts.amount) as a' in result.lower()
assert 'count(accounts.id) as b' in result.lower()
assert 'group by accounts.name' in result.lower()
def test_clean_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
friends = sa.Table('friends', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
tcity = symbol('city', discover(city))
tfriends = symbol('friends', discover(friends))
tname = symbol('name', discover(name))
ns = {tname: name, tfriends: friends, tcity: city}
expr = join(tfriends, tname, 'a', 'id')
assert normalize(str(compute(expr, ns))) == normalize("""
SELECT friends.a, friends.b, name.name
FROM friends JOIN name on friends.a = name.id""")
expr = join(join(tfriends, tname, 'a', 'id'), tcity, 'a', 'id')
result = compute(expr, ns)
expected1 = """
SELECT friends.a, friends.b, name.name, place.city, place.country
FROM friends
JOIN name ON friends.a = name.id
JOIN place ON friends.a = place.id
"""
expected2 = """
SELECT alias.a, alias.b, alias.name, place.city, place.country
FROM (SELECT friends.a AS a, friends.b AS b, name.name AS name
FROM friends JOIN name ON friends.a = name.id) AS alias
JOIN place ON alias.a = place.id
"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_like():
expr = t.like(name='Alice*')
assert normalize(str(compute(expr, s))) == normalize("""
SELECT accounts.name, accounts.amount, accounts.id
FROM accounts
WHERE accounts.name LIKE :name_1""")
def test_strlen():
expr = t.name.strlen()
result = str(compute(expr, s))
expected = "SELECT char_length(accounts.name) as name FROM accounts"
assert normalize(result) == normalize(expected)
def test_columnwise_on_complex_selection():
result = str(select(compute(t[t.amount > 0].amount + 1, s)))
assert normalize(result) == \
normalize("""
SELECT accounts.amount + :amount_1 AS amount
FROM accounts
WHERE accounts.amount > :amount_2
""")
def test_reductions_on_complex_selections():
assert (
normalize(str(select(compute(t[t.amount > 0].id.sum(), s)))) ==
normalize(
"""
select
sum(alias.id) as id_sum
from (select
accounts.id as id
from accounts
where accounts.amount > :amount_1) as alias
"""
)
)
def test_clean_summary_by_where():
t2 = t[t.id == 1]
expr = by(t2.name, sum=t2.amount.sum(), count=t2.amount.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.amount) AS count, sum(accounts.amount) AS sum
FROM accounts
WHERE accounts.id = :id_1
GROUP BY accounts.name
""")
def test_by_on_count():
expr = by(t.name, count=t.count())
result = compute(expr, s)
assert normalize(str(result)) == normalize("""
SELECT accounts.name, count(accounts.id) AS count
FROM accounts
GROUP BY accounts.name
""")
def test_join_complex_clean():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
expr = join(tname[tname.id > 0], tcity, 'id')
result = compute(expr, ns)
expected1 = """
SELECT name.id, name.name, place.city, place.country
FROM name JOIN place ON name.id = place.id
WHERE name.id > :id_1"""
expected2 = """
SELECT alias.id, alias.name, place.city, place.country
FROM (SELECT name.id as id, name.name AS name
FROM name
WHERE name.id > :id_1) AS alias
JOIN place ON alias.id = place.id"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_projection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
expr = join(tname, tcity[tcity.city == 'NYC'], 'id')[['country', 'name']]
ns = {tname: name, tcity: city}
result = compute(expr, ns)
expected1 = """
SELECT place.country, name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1"""
expected2 = """
SELECT alias.country, name.name
FROM name
JOIN (SELECT place.id AS id, place.city AS city, place.country AS country
FROM place
WHERE place.city = :city_1) AS alias
ON name.id = alias_6.id"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_lower_column():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
assert lower_column(name.c.id) is name.c.id
assert lower_column(select(name).c.id) is name.c.id
j = name.join(city, name.c.id == city.c.id)
col = [c for c in j.columns if c.name == 'country'][0]
assert lower_column(col) is city.c.country
def test_selection_of_join():
metadata = sa.MetaData()
name = sa.Table('name', metadata,
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
)
city = sa.Table('place', metadata,
sa.Column('id', sa.Integer),
sa.Column('city', sa.String),
sa.Column('country', sa.String),
)
tname = symbol('name', discover(name))
tcity = symbol('city', discover(city))
ns = {tname: name, tcity: city}
j = join(tname, tcity, 'id')
expr = j[j.city == 'NYC'].name
result = compute(expr, ns)
assert normalize(str(result)) == normalize("""
SELECT name.name
FROM name JOIN place ON name.id = place.id
WHERE place.city = :city_1""")
def test_join_on_same_table():
metadata = sa.MetaData()
T = sa.Table('tab', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
t = symbol('tab', discover(T))
expr = join(t, t, 'a')
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT
tab_left.a,
tab_left.b as b_left,
tab_right.b as b_right
FROM
tab AS tab_left
JOIN
tab AS tab_right
ON
tab_left.a = tab_right.a
""")
expr = join(t, t, 'a').b_left.sum()
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
select sum(alias.b_left) as b_left_sum from
(select
tab_left.b as b_left
from
tab as tab_left
join
tab as tab_right
on
tab_left.a = tab_right.a) as
alias""")
expr = join(t, t, 'a')
expr = summary(total=expr.a.sum(), smallest=expr.b_right.min())
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT
min(tab_right.b) as smallest,
sum(tab_left.a) as total
FROM
tab AS tab_left
JOIN
tab AS tab_right
ON
tab_left.a = tab_right.a
""")
def test_join_suffixes():
metadata = sa.MetaData()
T = sa.Table('tab', metadata,
sa.Column('a', sa.Integer),
sa.Column('b', sa.Integer),
)
t = symbol('tab', discover(T))
suffixes = '_l', '_r'
expr = join(t, t, 'a', suffixes=suffixes)
result = compute(expr, {t: T})
assert normalize(str(result)) == normalize("""
SELECT
tab{l}.a,
tab{l}.b as b{l},
tab{r}.b as b{r}
FROM
tab AS tab{l}
JOIN
tab AS tab{r}
ON
tab{l}.a = tab{r}.a
""".format(l=suffixes[0], r=suffixes[1]))
def test_field_access_on_engines(data):
s, engine = data['s'], data['engine']
result = compute_up(s.city, engine)
assert isinstance(result, sa.Table)
assert result.name == 'city'
def test_computation_directly_on_sqlalchemy_Tables(data):
name = data['name']
s = symbol('s', discover(name))
result = into(list, compute(s.id + 1, name))
assert not isinstance(result, sa.sql.Selectable)
assert list(result) == []
def test_computation_directly_on_metadata(data):
metadata = data['metadata']
name = data['name']
s = symbol('s', discover(metadata))
result = compute(s.name, {s: metadata}, post_compute=False)
assert result == name
sql_bank = sa.Table('bank', sa.MetaData(),
sa.Column('id', sa.Integer),
sa.Column('name', sa.String),
sa.Column('amount', sa.Integer))
sql_cities = sa.Table('cities', sa.MetaData(),
sa.Column('name', sa.String),
sa.Column('city', sa.String))
bank = symbol('bank', discover(sql_bank))
cities = symbol('cities', discover(sql_cities))
def test_aliased_views_with_two_group_bys():
expr = by(bank.name, total=bank.amount.sum())
expr2 = by(expr.total, count=expr.name.count())
result = compute(expr2, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == normalize("""
SELECT alias.total, count(alias.name) as count
FROM (SELECT bank.name AS name, sum(bank.amount) AS total
FROM bank
GROUP BY bank.name) as alias
GROUP BY alias.total
""")
def test_aliased_views_with_join():
joined = join(bank, cities)
expr = by(joined.city, total=joined.amount.sum())
expr2 = by(expr.total, count=expr.city.nunique())
result = compute(expr2, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == normalize("""
SELECT alias.total, count(DISTINCT alias.city) AS count
FROM (SELECT cities.city AS city, sum(bank.amount) AS total
FROM bank
JOIN cities ON bank.name = cities.name
GROUP BY cities.city) as alias
GROUP BY alias.total
""")
def test_select_field_on_alias():
result = compute_up(t.amount, select(s).limit(10).alias('foo'))
assert normalize(str(select(result))) == normalize("""
SELECT foo.amount
FROM (SELECT accounts.name AS name, accounts.amount AS amount, accounts.id AS id
FROM accounts
LIMIT :param_1) as foo""")
@pytest.mark.xfail(raises=Exception,
reason="sqlalchemy.join seems to drop unnecessary tables")
def test_join_on_single_column():
expr = join(cities[['name']], bank)
result = compute(expr, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == """
SELECT bank.id, bank.name, bank.amount
FROM bank join cities ON bank.name = cities.name"""
expr = join(bank, cities.name)
result = compute(expr, {bank: sql_bank, cities: sql_cities})
assert normalize(str(result)) == """
SELECT bank.id, bank.name, bank.amount
FROM bank join cities ON bank.name = cities.name"""
def test_aliased_views_more():
metadata = sa.MetaData()
lhs = sa.Table('aaa', metadata,
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer),
sa.Column('z', sa.Integer))
rhs = sa.Table('bbb', metadata,
sa.Column('w', sa.Integer),
sa.Column('x', sa.Integer),
sa.Column('y', sa.Integer))
L = symbol('L', 'var * {x: int, y: int, z: int}')
R = symbol('R', 'var * {w: int, x: int, y: int}')
expr = join(by(L.x, y_total=L.y.sum()),
R)
result = compute(expr, {L: lhs, R: rhs})
assert normalize(str(result)) == normalize("""
SELECT alias.x, alias.y_total, bbb.w, bbb.y
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) AS alias
JOIN bbb ON alias.x = bbb.x """)
expr2 = by(expr.w, count=expr.x.count(), total2=expr.y_total.sum())
result2 = compute(expr2, {L: lhs, R: rhs})
assert (
normalize(str(result2)) == normalize("""
SELECT alias_2.w, count(alias_2.x) as count, sum(alias_2.y_total) as total2
FROM (SELECT alias.x, alias.y_total, bbb.w, bbb.y
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) AS alias
JOIN bbb ON alias.x = bbb.x) AS alias_2
GROUP BY alias_2.w""")
or
normalize(str(result2)) == normalize("""
SELECT bbb.w, count(alias.x) as count, sum(alias.y_total) as total2
FROM (SELECT aaa.x as x, sum(aaa.y) as y_total
FROM aaa
GROUP BY aaa.x) as alias
JOIN bbb ON alias.x = bbb.x
GROUP BY bbb.w"""))
def test_aliased_views_with_computation():
engine = sa.create_engine('sqlite:///:memory:')
df_aaa = DataFrame({'x': [1, 2, 3, 2, 3],
'y': [2, 1, 2, 3, 1],
'z': [3, 3, 3, 1, 2]})
df_bbb = DataFrame({'w': [1, 2, 3, 2, 3],
'x': [2, 1, 2, 3, 1],
'y': [3, 3, 3, 1, 2]})
df_aaa.to_sql('aaa', engine)
df_bbb.to_sql('bbb', engine)
metadata = sa.MetaData(engine)
metadata.reflect()
sql_aaa = metadata.tables['aaa']
sql_bbb = metadata.tables['bbb']
L = symbol('aaa', discover(df_aaa))
R = symbol('bbb', discover(df_bbb))
expr = join(by(L.x, y_total=L.y.sum()),
R)
a = compute(expr, {L: df_aaa, R: df_bbb})
b = compute(expr, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr2 = by(expr.w, count=expr.x.count(), total2=expr.y_total.sum())
a = compute(expr2, {L: df_aaa, R: df_bbb})
b = compute(expr2, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr3 = by(expr.x, count=expr.y_total.count())
a = compute(expr3, {L: df_aaa, R: df_bbb})
b = compute(expr3, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
expr4 = join(expr2, R)
a = compute(expr4, {L: df_aaa, R: df_bbb})
b = compute(expr4, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
""" # Takes a while
expr5 = by(expr4.count, total=(expr4.x + expr4.y).sum())
a = compute(expr5, {L: df_aaa, R: df_bbb})
b = compute(expr5, {L: sql_aaa, R: sql_bbb})
assert into(set, a) == into(set, b)
"""
def test_distinct_count_on_projection():
expr = t[['amount']].distinct().count()
result = compute(expr, {t: s})
assert (
normalize(str(result)) == normalize("""
SELECT count(DISTINCT accounts.amount)
FROM accounts""")
or
normalize(str(result)) == normalize("""
SELECT count(alias.amount) as count
FROM (SELECT DISTINCT accounts.amount AS amount
FROM accounts) as alias"""))
# note that id is the primary key
expr = t[['amount', 'id']].distinct().count()
result = compute(expr, {t: s})
assert normalize(str(result)) == normalize("""
SELECT count(alias.id) as count
FROM (SELECT DISTINCT accounts.amount AS amount, accounts.id AS id
FROM accounts) as alias""")
def test_join_count():
ds = datashape.dshape(
'{t1: var * {x: int, y: int}, t2: var * {a: int, b: int}}')
engine = resource('sqlite:///:memory:', dshape=ds)
db = symbol('db', ds)
expr = join(db.t1[db.t1.x > -1], db.t2, 'x', 'a').count()
result = compute(expr, {db: engine}, post_compute=False)
expected1 = """
SELECT count(alias.x) as count
FROM (SELECT t1.x AS x, t1.y AS y, t2.b AS b
FROM t1 JOIN t2 ON t1.x = t2.a
WHERE t1.x > ?) as alias
"""
expected2 = """
SELECT count(alias2.x) AS count
FROM (SELECT alias1.x AS x, alias1.y AS y, t2.b AS b
FROM (SELECT t1.x AS x, t1.y AS y
FROM t1
WHERE t1.x > ?) AS alias1
JOIN t2 ON alias1.x = t2.a) AS alias2"""
assert (normalize(str(result)) == normalize(expected1) or
normalize(str(result)) == normalize(expected2))
def test_transform_where():
t2 = t[t.id == 1]
expr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount,
accounts.id,
abs(accounts.amount) as abs_amt,
sin(accounts.id) as sine
FROM accounts
WHERE accounts.id = :id_1
"""
assert normalize(str(result)) == normalize(expected)
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
result = str(compute(expr, s))
assert 'amount * ' in result
assert 'FROM accounts' in result
assert 'SELECT accounts.name' in result
assert 'new' in result
def test_merge_where():
t2 = t[t.id == 1]
expr = merge(t2[['amount', 'name']], t2.id)
result = compute(expr, s)
expected = normalize("""SELECT
accounts.amount,
accounts.name,
accounts.id
FROM accounts
WHERE accounts.id = :id_1
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_single_column():
t2 = t[t.amount < 0]
tr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
expr = by(tr.name, avg_amt=tr.abs_amt.mean())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_multiple_columns():
t2 = t[t.amount < 0]
tr = transform(t2, abs_amt=abs(t2.amount), sine=sin(t2.id))
expr = by(tr.name, avg_amt=tr.abs_amt.mean(), sum_sine=tr.sine.sum())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) AS sum_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_different_order():
t2 = transform(t, abs_amt=abs(t.amount), sine=sin(t.id))
tr = t2[t2.amount < 0]
expr = by(tr.name,
avg_amt=tr.abs_amt.mean(),
avg_sine=tr.sine.sum() / tr.sine.count())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) / count(sin(accounts.id)) AS avg_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name
""")
assert normalize(str(result)) == expected
def test_transform_filter_by_projection():
t2 = transform(t, abs_amt=abs(t.amount), sine=sin(t.id))
tr = t2[t2.amount < 0]
expr = by(tr[['name', 'id']],
avg_amt=tr.abs_amt.mean(),
avg_sine=tr.sine.sum() / tr.sine.count())
result = compute(expr, s)
expected = normalize("""SELECT
accounts.name,
accounts.id,
avg(abs(accounts.amount)) AS avg_amt,
sum(sin(accounts.id)) / count(sin(accounts.id)) AS avg_sine
FROM accounts
WHERE accounts.amount < :amount_1
GROUP BY accounts.name, accounts.id
""")
assert normalize(str(result)) == expected
def test_merge_compute():
data = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
ds = datashape.dshape('var * {id: int, name: string, amount: real}')
s = symbol('s', ds)
with tmpfile('db') as fn:
uri = 'sqlite:///' + fn
into(uri + '::table', data, dshape=ds)
expr = transform(s, amount10=s.amount * 10)
result = into(list, compute(expr, {s: data}))
assert result == [(1, 'Alice', 100, 1000),
(2, 'Bob', 200, 2000),
(4, 'Dennis', 400, 4000)]
def test_notnull():
result = compute(nt[nt.name.notnull()], ns)
expected = """SELECT
nullaccounts.name,
nullaccounts.amount,
nullaccounts.id
FROM nullaccounts
WHERE nullaccounts.name is not null
"""
assert normalize(str(result)) == normalize(expected)
def test_head_limit():
assert compute(t.head(5).head(10), s)._limit == 5
assert compute(t.head(10).head(5), s)._limit == 5
assert compute(t.head(10).head(10), s)._limit == 10
def test_no_extraneous_join():
ds = """ {event: var * {name: ?string,
operation: ?string,
datetime_nearest_receiver: ?datetime,
aircraft: ?string,
temperature_2m: ?float64,
temperature_5cm: ?float64,
humidity: ?float64,
windspeed: ?float64,
pressure: ?float64,
include: int64},
operation: var * {name: ?string,
runway: int64,
takeoff: bool,
datetime_nearest_close: ?string}}
"""
db = resource('sqlite:///:memory:', dshape=ds)
d = symbol('db', dshape=ds)
expr = join(d.event[d.event.include == True],
d.operation[['name', 'datetime_nearest_close']],
'operation', 'name')
result = compute(expr, db)
assert normalize(str(result)) == normalize("""
SELECT
alias.operation,
alias.name as name_left,
alias.datetime_nearest_receiver,
alias.aircraft,
alias.temperature_2m,
alias.temperature_5cm,
alias.humidity,
alias.windspeed,
alias.pressure,
alias.include,
alias.datetime_nearest_close
FROM
(SELECT
event.name AS name,
event.operation AS operation,
event.datetime_nearest_receiver AS datetime_nearest_receiver,
event.aircraft AS aircraft,
event.temperature_2m AS temperature_2m,
event.temperature_5cm AS temperature_5cm,
event.humidity AS humidity,
event.windspeed AS windspeed,
event.pressure AS pressure,
event.include AS include
FROM
event WHERE event.include = 1) AS alias1
JOIN
(SELECT
operation.name AS name,
operation.datetime_nearest_close as datetime_nearest_close
FROM operation) AS alias2
ON
alias1.operation = alias2.name
""")
def test_math():
result = compute(sin(t.amount), s)
assert normalize(str(result)) == normalize("""
SELECT sin(accounts.amount) as amount
FROM accounts""")
result = compute(floor(t.amount), s)
assert normalize(str(result)) == normalize("""
SELECT floor(accounts.amount) as amount
FROM accounts""")
result = compute(t.amount // 2, s)
assert normalize(str(result)) == normalize("""
SELECT floor(accounts.amount / :amount_1) AS amount
FROM accounts""")
def test_transform_order():
r = transform(t, sin_amount=sin(t.amount), cos_id=cos(t.id))
result = compute(r, s)
expected = """SELECT
accounts.name,
accounts.amount,
accounts.id,
cos(accounts.id) as cos_id,
sin(accounts.amount) as sin_amount
FROM accounts
"""
assert normalize(str(result)) == normalize(expected)
def test_isin():
result = t[t.name.isin(['foo', 'bar'])]
result_sql_expr = str(compute(result, s))
expected = """
SELECT
accounts.name,
accounts.amount,
accounts.id
FROM
accounts
WHERE
accounts.name
IN
(:name_1,
:name_2)
"""
assert normalize(result_sql_expr) == normalize(expected)
@pytest.mark.skipif('1.0.0' <= LooseVersion(sa.__version__) <= '1.0.1',
reason=("SQLAlchemy generates different code in 1.0.0"
" and 1.0.1"))
def test_date_grouper_repeats_not_one_point_oh():
columns = [sa.Column('amount', sa.REAL),
sa.Column('ds', sa.TIMESTAMP)]
data = sa.Table('t', sa.MetaData(), *columns)
t = symbol('t', discover(data))
expr = by(t.ds.year, avg_amt=t.amount.mean())
result = str(compute(expr, data))
# FYI spark sql isn't able to parse this correctly
expected = """SELECT
EXTRACT(year FROM t.ds) as ds_year,
AVG(t.amount) as avg_amt
FROM t
GROUP BY EXTRACT(year FROM t.ds)
"""
assert normalize(result) == normalize(expected)
@pytest.mark.skipif(LooseVersion(sa.__version__) < '1.0.0' or
LooseVersion(sa.__version__) >= '1.0.2',
reason=("SQLAlchemy generates different code in < 1.0.0 "
"and >= 1.0.2"))
def test_date_grouper_repeats():
columns = [sa.Column('amount', sa.REAL),
sa.Column('ds', sa.TIMESTAMP)]
data = sa.Table('t', sa.MetaData(), *columns)
t = symbol('t', discover(data))
expr = by(t.ds.year, avg_amt=t.amount.mean())
result = str(compute(expr, data))
# FYI spark sql isn't able to parse this correctly
expected = """SELECT
EXTRACT(year FROM t.ds) as ds_year,
AVG(t.amount) as avg_amt
FROM t
GROUP BY ds_year
"""
assert normalize(result) == normalize(expected)
def test_transform_then_project_single_column():
expr = transform(t, foo=t.id + 1)[['foo', 'id']]
result = normalize(str(compute(expr, s)))
expected = normalize("""SELECT
accounts.id + :id_1 as foo,
accounts.id
FROM accounts""")
assert result == expected
def test_transform_then_project():
proj = ['foo', 'id']
expr = transform(t, foo=t.id + 1)[proj]
result = normalize(str(compute(expr, s)))
expected = normalize("""SELECT
accounts.id + :id_1 as foo,
accounts.id
FROM accounts""")
assert result == expected
def test_reduce_does_not_compose():
expr = by(t.name, counts=t.count()).counts.max()
result = str(compute(expr, s))
expected = """
SELECT max(alias.counts) AS counts_max
FROM
(SELECT count(accounts.id) AS counts
FROM accounts GROUP BY accounts.name) as alias"""
assert normalize(result) == normalize(expected)
@pytest.mark.xfail(raises=NotImplementedError)
def test_normalize_reduction():
expr = by(t.name, counts=t.count())
expr = transform(expr, normed_counts=expr.counts / expr.counts.max())
result = str(compute(expr, s))
expected = """WITH alias AS
(SELECT count(accounts.id) AS counts
FROM accounts GROUP BY accounts.name)
SELECT alias.counts / max(alias.counts) AS normed_counts
FROM alias"""
assert normalize(result) == normalize(expected)
def test_do_not_erase_group_by_functions_with_datetime():
t, s = tdate, sdate
expr = by(t[t.amount < 0].occurred_on.date,
avg_amount=t[t.amount < 0].amount.mean())
result = str(compute(expr, s))
expected = """SELECT
date(accdate.occurred_on) as occurred_on_date,
avg(accdate.amount) as avg_amount
FROM
accdate
WHERE
accdate.amount < :amount_1
GROUP BY
date(accdate.occurred_on)
"""
assert normalize(result) == normalize(expected)
def test_not():
expr = t.amount[~t.name.isin(('Billy', 'Bob'))]
result = str(compute(expr, s))
expected = """SELECT
accounts.amount
FROM
accounts
WHERE
accounts.name not in (:name_1, :name_2)
"""
assert normalize(result) == normalize(expected)
def test_slice():
start, stop, step = 50, 100, 1
result = str(compute(t[start:stop], s))
# Verifies that compute is translating the query correctly
assert result == str(select(s).offset(start).limit(stop))
# Verifies the query against expected SQL query
expected = """
SELECT accounts.name, accounts.amount, accounts.id FROM accounts
LIMIT :param_1 OFFSET :param_2
"""
assert normalize(str(result)) == normalize(str(expected))
# Step size of 1 should be alright
compute(t[start:stop:step], s)
@pytest.mark.xfail(raises=ValueError)
def test_slice_step():
start, stop, step = 50, 100, 2
compute(t[start:stop:step], s)
def test_datetime_to_date():
expr = tdate.occurred_on.date
result = str(compute(expr, sdate))
expected = """SELECT
DATE(accdate.occurred_on) as occurred_on_date
FROM
accdate
"""
assert normalize(result) == normalize(expected)
def test_sort_compose():
expr = t.name[:5].sort()
result = compute(expr, s)
expected = """select
anon_1.name
from (select
accounts.name as name
from
accounts
limit :param_1
offset :param_2) as anon_1
order by
anon_1.name asc"""
assert normalize(str(result)) == normalize(expected)
assert (normalize(str(compute(t.sort('name').name[:5], s))) !=
normalize(expected))
def test_coerce():
expr = t.amount.coerce(to='int64')
expected = """SELECT
cast(accounts.amount AS BIGINT) AS amount
FROM accounts"""
result = compute(expr, s)
assert normalize(str(result)) == normalize(expected)
def test_multi_column_by_after_transform():
tbl = transform(t, new_amount=t.amount + 1, one_two=t.amount * 2)
expr = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount * :amount_1 as one_two,
avg(accounts.amount + :amount_2) as avg_amt
FROM
accounts
GROUP BY
accounts.name, accounts.amount * :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_multi_column_by_after_transform_and_filter():
tbl = t[t.name == 'Alice']
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
expr = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
result = compute(expr, s)
expected = """SELECT
accounts.name,
accounts.amount * :amount_1 as one_two,
avg(accounts.amount + :amount_2) as avg_amt
FROM
accounts
WHERE
accounts.name = :name_1
GROUP BY
accounts.name, accounts.amount * :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_attribute_access_on_transform_filter():
tbl = transform(t, new_amount=t.amount + 1)
expr = tbl[tbl.name == 'Alice'].new_amount
result = compute(expr, s)
expected = """SELECT
accounts.amount + :amount_1 as new_amount
FROM
accounts
WHERE
accounts.name = :name_1
"""
assert normalize(str(result)) == normalize(expected)
def test_attribute_on_filter_transform_groupby():
tbl = t[t.name == 'Alice']
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
gb = by(tbl[['name', 'one_two']], avg_amt=tbl.new_amount.mean())
expr = gb.avg_amt
result = compute(expr, s)
expected = """SELECT
avg(accounts.amount + :amount_1) as avg_amt
FROM
accounts
WHERE
accounts.name = :name_1
GROUP BY
accounts.name, accounts.amount * :amount_2
"""
assert normalize(str(result)) == normalize(expected)
def test_label_projection():
tbl = t[(t.name == 'Alice')]
tbl = transform(tbl, new_amount=tbl.amount + 1, one_two=tbl.amount * 2)
expr = tbl[['new_amount', 'one_two']]
# column selection shouldn't affect the resulting SQL
result = compute(expr[expr.new_amount > 1].one_two, s)
result2 = compute(expr.one_two[expr.new_amount > 1], s)
expected = """SELECT
accounts.amount * :amount_1 as one_two
FROM accounts
WHERE accounts.name = :name_1 and accounts.amount + :amount_2 > :param_1
"""
assert normalize(str(result)) == normalize(expected)
assert normalize(str(result2)) == normalize(expected)
def test_baseball_nested_by():
data = resource('sqlite:///%s' % example('teams.db'))
dshape = discover(data)
d = symbol('d', dshape)
expr = by(d.teams.name,
start_year=d.teams.yearID.min()).start_year.count_values()
result = compute(expr, data, post_compute=False)
expected = """SELECT
anon_1.start_year,
anon_1.count
FROM
(SELECT
alias.start_year as start_year,
count(alias.start_year) as count
FROM
(SELECT
min(teams.yearid) as start_year
FROM teams
GROUP BY teams.name) as alias
GROUP BY alias.start_year) as anon_1 ORDER BY anon_1.count DESC
"""
assert normalize(str(result).replace('"', '')) == normalize(expected)
def test_label_on_filter():
expr = t[t.name == 'Alice'].amount.label('foo').head(2)
result = compute(expr, s)
expected = """SELECT
accounts.amount AS foo
FROM
accounts
WHERE
accounts.name = :name_1
LIMIT :param_1
"""
assert normalize(str(result)) == normalize(expected)
def test_single_field_filter():
expr = t.amount[t.amount > 0]
result = compute(expr, s)
expected = """SELECT
accounts.amount
FROM accounts
WHERE accounts.amount > :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_multiple_field_filter():
expr = t.name[t.amount > 0]
result = compute(expr, s)
expected = """SELECT
accounts.name
FROM accounts
WHERE accounts.amount > :amount_1
"""
assert normalize(str(result)) == normalize(expected)
def test_distinct_on_label():
expr = t.name.label('foo').distinct()
result = compute(expr, s)
expected = """SELECT
DISTINCT accounts.name AS foo
FROM accounts
"""
assert normalize(str(result)) == normalize(expected)
@pytest.mark.parametrize('n', [-1, 0, 1])
def test_shift_on_column(n):
expr = t.name.shift(n)
result = compute(expr, s)
expected = """SELECT
lag(accounts.name, :lag_1) over () as name
FROM accounts
"""
assert normalize(str(result)) == normalize(expected)
def test_empty_string_comparison_with_option_type():
expr = nt.amount[nt.name == '']
result = compute(expr, s)
expected = """
SELECT accounts.amount
FROM accounts
WHERE accounts.name = :name_1
"""
assert normalize(str(result)) == normalize(expected)
def test_tail_no_sort():
assert (
normalize(str(compute(t.head(), {t: s}))) ==
normalize(str(compute(t.tail(), {t: s})))
)
def test_tail_of_sort():
expected = normalize(str(compute(
t.sort('id', ascending=False).head(5).sort('id'),
{t: s},
)))
result = normalize(str(compute(t.sort('id').tail(5), {t: s})))
assert expected == result
def test_tail_sort_in_chilren():
expected = normalize(str(compute(
t.name.sort('id', ascending=False).head(5).sort('id'),
{t: s},
)))
result = normalize(str(compute(t.name.sort('id').tail(5), {t: s})))
assert expected == result
def test_selection_inner_inputs():
result = normalize(str(compute(t[t.id == tdate.id], {t: s, tdate: sdate})))
expected = normalize("""
select {a}.name, {a}.amount, {a}.id from {a}, {b} where {a}.id = {b}.id
""").format(a=s.name, b=sdate.name)
assert result == expected
|
{
"content_hash": "1f33622122003ec791b71755ab552a10",
"timestamp": "",
"source": "github",
"line_count": 1908,
"max_line_length": 89,
"avg_line_length": 30.362683438155138,
"alnum_prop": 0.5619174204239453,
"repo_name": "ChinaQuants/blaze",
"id": "bb3933c5a09fefb6df865e74796f38ff3415f38e",
"size": "57932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blaze/compute/tests/test_sql_compute.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "37"
},
{
"name": "Python",
"bytes": "777340"
},
{
"name": "Shell",
"bytes": "35"
}
],
"symlink_target": ""
}
|
"""distutils.command.install
Implements the Distutils 'install' command."""
import sys
import os
from distutils import log
from distutils.core import Command
from distutils.debug import DEBUG
from distutils.sysconfig import get_config_vars
from distutils.errors import DistutilsPlatformError
from distutils.file_util import write_file
from distutils.util import convert_path, subst_vars, change_root
from distutils.util import get_platform
from distutils.errors import DistutilsOptionError
from site import USER_BASE
from site import USER_SITE
HAS_USER_SITE = True
WINDOWS_SCHEME = {
'purelib': '$base/Lib/site-packages',
'platlib': '$base/Lib/site-packages',
'headers': '$base/Include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
}
INSTALL_SCHEMES = {
'unix_prefix': {
'purelib': '$base/lib/python$py_version_short/site-packages',
'platlib': '$platbase/$platlibdir/python$py_version_short/site-packages',
'headers': '$base/include/python$py_version_short$abiflags/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'unix_home': {
'purelib': '$base/lib/python',
'platlib': '$base/$platlibdir/python',
'headers': '$base/include/python/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'nt': WINDOWS_SCHEME,
'pypy': {
'purelib': '$base/site-packages',
'platlib': '$base/site-packages',
'headers': '$base/include/$dist_name',
'scripts': '$base/bin',
'data' : '$base',
},
'pypy_nt': {
'purelib': '$base/site-packages',
'platlib': '$base/site-packages',
'headers': '$base/include/$dist_name',
'scripts': '$base/Scripts',
'data' : '$base',
},
}
# user site schemes
if HAS_USER_SITE:
INSTALL_SCHEMES['nt_user'] = {
'purelib': '$usersite',
'platlib': '$usersite',
'headers': '$userbase/Python$py_version_nodot/Include/$dist_name',
'scripts': '$userbase/Python$py_version_nodot/Scripts',
'data' : '$userbase',
}
INSTALL_SCHEMES['unix_user'] = {
'purelib': '$usersite',
'platlib': '$usersite',
'headers':
'$userbase/include/python$py_version_short$abiflags/$dist_name',
'scripts': '$userbase/bin',
'data' : '$userbase',
}
# The keys to an installation scheme; if any new types of files are to be
# installed, be sure to add an entry to every installation scheme above,
# and to SCHEME_KEYS here.
SCHEME_KEYS = ('purelib', 'platlib', 'headers', 'scripts', 'data')
class install(Command):
description = "install everything from build directory"
user_options = [
# Select installation scheme and set base director(y|ies)
('prefix=', None,
"installation prefix"),
('exec-prefix=', None,
"(Unix only) prefix for platform-specific files"),
('home=', None,
"(Unix only) home directory to install under"),
# Or, just set the base director(y|ies)
('install-base=', None,
"base installation directory (instead of --prefix or --home)"),
('install-platbase=', None,
"base installation directory for platform-specific files " +
"(instead of --exec-prefix or --home)"),
('root=', None,
"install everything relative to this alternate root directory"),
# Or, explicitly set the installation scheme
('install-purelib=', None,
"installation directory for pure Python module distributions"),
('install-platlib=', None,
"installation directory for non-pure module distributions"),
('install-lib=', None,
"installation directory for all module distributions " +
"(overrides --install-purelib and --install-platlib)"),
('install-headers=', None,
"installation directory for C/C++ headers"),
('install-scripts=', None,
"installation directory for Python scripts"),
('install-data=', None,
"installation directory for data files"),
# Byte-compilation options -- see install_lib.py for details, as
# these are duplicated from there (but only install_lib does
# anything with them).
('compile', 'c', "compile .py to .pyc [default]"),
('no-compile', None, "don't compile .py files"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
# Miscellaneous control options
('force', 'f',
"force installation (overwrite any existing files)"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
# Where to install documentation (eventually!)
#('doc-format=', None, "format of documentation to generate"),
#('install-man=', None, "directory for Unix man pages"),
#('install-html=', None, "directory for HTML documentation"),
#('install-info=', None, "directory for GNU info files"),
('record=', None,
"filename in which to record list of installed files"),
]
boolean_options = ['compile', 'force', 'skip-build']
if HAS_USER_SITE:
user_options.append(('user', None,
"install in user site-package '%s'" % USER_SITE))
boolean_options.append('user')
negative_opt = {'no-compile' : 'compile'}
def initialize_options(self):
"""Initializes options."""
# High-level options: these select both an installation base
# and scheme.
self.prefix = None
self.exec_prefix = None
self.home = None
self.user = 0
# These select only the installation base; it's up to the user to
# specify the installation scheme (currently, that means supplying
# the --install-{platlib,purelib,scripts,data} options).
self.install_base = None
self.install_platbase = None
self.root = None
# These options are the actual installation directories; if not
# supplied by the user, they are filled in using the installation
# scheme implied by prefix/exec-prefix/home and the contents of
# that installation scheme.
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_userbase = USER_BASE
self.install_usersite = USER_SITE
self.compile = None
self.optimize = None
# Deprecated
# These two are for putting non-packagized distributions into their
# own directory and creating a .pth file if it makes sense.
# 'extra_path' comes from the setup file; 'install_path_file' can
# be turned off if it makes no sense to install a .pth file. (But
# better to install it uselessly than to guess wrong and not
# install it when it's necessary and would be used!) Currently,
# 'install_path_file' is always true unless some outsider meddles
# with it.
self.extra_path = None
self.install_path_file = 1
# 'force' forces installation, even if target files are not
# out-of-date. 'skip_build' skips running the "build" command,
# handy if you know it's not necessary. 'warn_dir' (which is *not*
# a user option, it's just there so the bdist_* commands can turn
# it off) determines whether we warn about installing to a
# directory not in sys.path.
self.force = 0
self.skip_build = 0
self.warn_dir = 1
# These are only here as a conduit from the 'build' command to the
# 'install_*' commands that do the real work. ('build_base' isn't
# actually used anywhere, but it might be useful in future.) They
# are not user options, because if the user told the install
# command where the build directory is, that wouldn't affect the
# build command.
self.build_base = None
self.build_lib = None
# Not defined yet because we don't know anything about
# documentation yet.
#self.install_man = None
#self.install_html = None
#self.install_info = None
self.record = None
# -- Option finalizing methods -------------------------------------
# (This is rather more involved than for most commands,
# because this is where the policy for installing third-
# party Python modules on various platforms given a wide
# array of user input is decided. Yes, it's quite complex!)
def finalize_options(self):
"""Finalizes options."""
# This method (and its helpers, like 'finalize_unix()',
# 'finalize_other()', and 'select_scheme()') is where the default
# installation directories for modules, extension modules, and
# anything else we care to install from a Python module
# distribution. Thus, this code makes a pretty important policy
# statement about how third-party stuff is added to a Python
# installation! Note that the actual work of installation is done
# by the relatively simple 'install_*' commands; they just take
# their orders from the installation directory options determined
# here.
# Check for errors/inconsistencies in the options; first, stuff
# that's wrong on any platform.
if ((self.prefix or self.exec_prefix or self.home) and
(self.install_base or self.install_platbase)):
raise DistutilsOptionError(
"must supply either prefix/exec-prefix/home or " +
"install-base/install-platbase -- not both")
if self.home and (self.prefix or self.exec_prefix):
raise DistutilsOptionError(
"must supply either home or prefix/exec-prefix -- not both")
if self.user and (self.prefix or self.exec_prefix or self.home or
self.install_base or self.install_platbase):
raise DistutilsOptionError("can't combine user with prefix, "
"exec_prefix/home, or install_(plat)base")
# Next, stuff that's wrong (or dubious) only on certain platforms.
if os.name != "posix":
if self.exec_prefix:
self.warn("exec-prefix option ignored on this platform")
self.exec_prefix = None
# Now the interesting logic -- so interesting that we farm it out
# to other methods. The goal of these methods is to set the final
# values for the install_{lib,scripts,data,...} options, using as
# input a heady brew of prefix, exec_prefix, home, install_base,
# install_platbase, user-supplied versions of
# install_{purelib,platlib,lib,scripts,data,...}, and the
# INSTALL_SCHEME dictionary above. Phew!
self.dump_dirs("pre-finalize_{unix,other}")
if os.name == 'posix':
self.finalize_unix()
else:
self.finalize_other()
self.dump_dirs("post-finalize_{unix,other}()")
# Expand configuration variables, tilde, etc. in self.install_base
# and self.install_platbase -- that way, we can use $base or
# $platbase in the other installation directories and not worry
# about needing recursive variable expansion (shudder).
py_version = sys.version.split()[0]
(prefix, exec_prefix) = get_config_vars('prefix', 'exec_prefix')
try:
abiflags = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
abiflags = ''
self.config_vars = {'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': '%d.%d' % sys.version_info[:2],
'py_version_nodot': '%d%d' % sys.version_info[:2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
'abiflags': abiflags,
'platlibdir': getattr(sys, 'platlibdir', 'lib'),
}
if HAS_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
self.expand_basedirs()
self.dump_dirs("post-expand_basedirs()")
# Now define config vars for the base directories so we can expand
# everything else.
self.config_vars['base'] = self.install_base
self.config_vars['platbase'] = self.install_platbase
if DEBUG:
from pprint import pprint
print("config vars:")
pprint(self.config_vars)
# Expand "~" and configuration variables in the installation
# directories.
self.expand_dirs()
self.dump_dirs("post-expand_dirs()")
# Create directories in the home dir:
if self.user:
self.create_home_path()
# Pick the actual directory to install all modules to: either
# install_purelib or install_platlib, depending on whether this
# module distribution is pure or not. Of course, if the user
# already specified install_lib, use their selection.
if self.install_lib is None:
if self.distribution.has_ext_modules(): # has extensions: non-pure
self.install_lib = self.install_platlib
else:
self.install_lib = self.install_purelib
# Convert directories from Unix /-separated syntax to the local
# convention.
self.convert_paths('lib', 'purelib', 'platlib',
'scripts', 'data', 'headers',
'userbase', 'usersite')
# Deprecated
# Well, we're not actually fully completely finalized yet: we still
# have to deal with 'extra_path', which is the hack for allowing
# non-packagized module distributions (hello, Numerical Python!) to
# get their own directories.
self.handle_extra_path()
self.install_libbase = self.install_lib # needed for .pth file
self.install_lib = os.path.join(self.install_lib, self.extra_dirs)
# If a new root directory was supplied, make all the installation
# dirs relative to it.
if self.root is not None:
self.change_roots('libbase', 'lib', 'purelib', 'platlib',
'scripts', 'data', 'headers')
self.dump_dirs("after prepending root")
# Find out the build directories, ie. where to install from.
self.set_undefined_options('build',
('build_base', 'build_base'),
('build_lib', 'build_lib'))
# Punt on doc directories for now -- after all, we're punting on
# documentation completely!
def dump_dirs(self, msg):
"""Dumps the list of user options."""
if not DEBUG:
return
from distutils.fancy_getopt import longopt_xlate
log.debug(msg + ":")
for opt in self.user_options:
opt_name = opt[0]
if opt_name[-1] == "=":
opt_name = opt_name[0:-1]
if opt_name in self.negative_opt:
opt_name = self.negative_opt[opt_name]
opt_name = opt_name.translate(longopt_xlate)
val = not getattr(self, opt_name)
else:
opt_name = opt_name.translate(longopt_xlate)
val = getattr(self, opt_name)
log.debug(" %s: %s", opt_name, val)
def finalize_unix(self):
"""Finalizes options for posix platforms."""
if self.install_base is not None or self.install_platbase is not None:
if ((self.install_lib is None and
self.install_purelib is None and
self.install_platlib is None) or
self.install_headers is None or
self.install_scripts is None or
self.install_data is None):
raise DistutilsOptionError(
"install-base or install-platbase supplied, but "
"installation scheme is incomplete")
return
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme("unix_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
if self.exec_prefix is not None:
raise DistutilsOptionError(
"must not supply exec-prefix without prefix")
self.prefix = os.path.normpath(sys.prefix)
self.exec_prefix = os.path.normpath(sys.exec_prefix)
else:
if self.exec_prefix is None:
self.exec_prefix = self.prefix
self.install_base = self.prefix
self.install_platbase = self.exec_prefix
self.select_scheme("unix_prefix")
def finalize_other(self):
"""Finalizes options for non-posix platforms"""
if self.user:
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
self.select_scheme(os.name + "_user")
elif self.home is not None:
self.install_base = self.install_platbase = self.home
self.select_scheme("unix_home")
else:
if self.prefix is None:
self.prefix = os.path.normpath(sys.prefix)
self.install_base = self.install_platbase = self.prefix
try:
self.select_scheme(os.name)
except KeyError:
raise DistutilsPlatformError(
"I don't know how to install stuff on '%s'" % os.name)
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
if (hasattr(sys, 'pypy_version_info') and
not name.endswith(('_user', '_home'))):
if os.name == 'nt':
name = 'pypy_nt'
else:
name = 'pypy'
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data',])
def convert_paths(self, *names):
"""Call `convert_path` over `names`."""
for name in names:
attr = "install_" + name
setattr(self, attr, convert_path(getattr(self, attr)))
def handle_extra_path(self):
"""Set `path_file` and `extra_dirs` using `extra_path`."""
if self.extra_path is None:
self.extra_path = self.distribution.extra_path
if self.extra_path is not None:
log.warn(
"Distribution option extra_path is deprecated. "
"See issue27919 for details."
)
if isinstance(self.extra_path, str):
self.extra_path = self.extra_path.split(',')
if len(self.extra_path) == 1:
path_file = extra_dirs = self.extra_path[0]
elif len(self.extra_path) == 2:
path_file, extra_dirs = self.extra_path
else:
raise DistutilsOptionError(
"'extra_path' option must be a list, tuple, or "
"comma-separated string with 1 or 2 elements")
# convert to local form in case Unix notation used (as it
# should be in setup scripts)
extra_dirs = convert_path(extra_dirs)
else:
path_file = None
extra_dirs = ''
# XXX should we warn if path_file and not extra_dirs? (in which
# case the path file would be harmless but pointless)
self.path_file = path_file
self.extra_dirs = extra_dirs
def change_roots(self, *names):
"""Change the install directories pointed by name using root."""
for name in names:
attr = "install_" + name
setattr(self, attr, change_root(self.root, getattr(self, attr)))
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in self.config_vars.items():
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0o700)" % path)
os.makedirs(path, 0o700)
# -- Command execution methods -------------------------------------
def run(self):
"""Runs the command."""
# Obviously have to build before we can install
if not self.skip_build:
self.run_command('build')
# If we built for any other platform, we can't install.
build_plat = self.distribution.get_command_obj('build').plat_name
# check warn_dir - it is a clue that the 'install' is happening
# internally, and not to sys.path, so we don't check the platform
# matches what we are running.
if self.warn_dir and build_plat != get_platform():
raise DistutilsPlatformError("Can't install when "
"cross-compiling")
# Run all sub-commands (at least those that need to be run)
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
if self.path_file:
self.create_path_file()
# write list of installed files, if requested.
if self.record:
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(self.record, outputs),
"writing list of installed files to '%s'" %
self.record)
sys_path = map(os.path.normpath, sys.path)
sys_path = map(os.path.normcase, sys_path)
install_lib = os.path.normcase(os.path.normpath(self.install_lib))
if (self.warn_dir and
not (self.path_file and self.install_path_file) and
install_lib not in sys_path):
log.debug(("modules installed to '%s', which is not in "
"Python's module search path (sys.path) -- "
"you'll have to change the search path yourself"),
self.install_lib)
def create_path_file(self):
"""Creates the .pth file"""
filename = os.path.join(self.install_libbase,
self.path_file + ".pth")
if self.install_path_file:
self.execute(write_file,
(filename, [self.extra_dirs]),
"creating %s" % filename)
else:
self.warn("path file '%s' not created" % filename)
# -- Reporting methods ---------------------------------------------
def get_outputs(self):
"""Assembles the outputs of all the sub-commands."""
outputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
# Add the contents of cmd.get_outputs(), ensuring
# that outputs doesn't contain duplicate entries
for filename in cmd.get_outputs():
if filename not in outputs:
outputs.append(filename)
if self.path_file and self.install_path_file:
outputs.append(os.path.join(self.install_libbase,
self.path_file + ".pth"))
return outputs
def get_inputs(self):
"""Returns the inputs of all the sub-commands"""
# XXX gee, this looks familiar ;-(
inputs = []
for cmd_name in self.get_sub_commands():
cmd = self.get_finalized_command(cmd_name)
inputs.extend(cmd.get_inputs())
return inputs
# -- Predicates for sub-command list -------------------------------
def has_lib(self):
"""Returns true if the current distribution has any Python
modules to install."""
return (self.distribution.has_pure_modules() or
self.distribution.has_ext_modules())
def has_headers(self):
"""Returns true if the current distribution has any headers to
install."""
return self.distribution.has_headers()
def has_scripts(self):
"""Returns true if the current distribution has any scripts to.
install."""
return self.distribution.has_scripts()
def has_data(self):
"""Returns true if the current distribution has any data to.
install."""
return self.distribution.has_data_files()
# 'sub_commands': a list of commands this command might have to run to
# get its work done. See cmd.py for more info.
sub_commands = [('install_lib', has_lib),
('install_headers', has_headers),
('install_scripts', has_scripts),
('install_data', has_data),
('install_egg_info', lambda self:True),
]
|
{
"content_hash": "a5fd9fd89d7c761a1c3e95693572e604",
"timestamp": "",
"source": "github",
"line_count": 677,
"max_line_length": 81,
"avg_line_length": 40.602658788774,
"alnum_prop": 0.5702124563445867,
"repo_name": "ryfeus/lambda-packs",
"id": "400fb45dd0869f3c173a2e16a99894a15c484c18",
"size": "27488",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "Sklearn_x86/source/setuptools/_distutils/command/install.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
}
|
import os
import glideinwms_tarfile
import cStringIO
class FileDoesNotExist(Exception):
"""File does not exist exception
@note: Include the file name in the full_path
@ivar full_path: The full path to the missing file. Includes the file name
"""
def __init__(self, full_path):
message = "The file, %s, does not exist." % full_path
# Call the base class constructor with the parameters it needs
Exception.__init__(self, message)
class GlideinTar:
"""This class provides a container for creating tarballs. The class provides
methods to add files and string data (ends up as a file in the tarball).
The tarball can be written to a file on disk or written to memory.
"""
def __init__(self):
"""Set up the strings dict and the files list
The strings dict will hold string data that is to be added to the tar
file. The key will be the file name and the value will be the file
data. The files list contains a list of file paths that will be added
to the tar file.
"""
self.strings = {}
self.files = []
def add_file(self, filename, arc_dirname):
"""
Add a filepath to the files list
@type filename: string
@param filename: The file path to the file that will eventually be
written to the tarball.
@type arc_dirname: string
@param arc_dirname: This is the directory that the file will show up
under in the tarball
"""
if os.path.exists(filename):
self.files.append((filename, arc_dirname))
else:
raise FileDoesNotExist(filename)
def add_string(self, name, string_data):
"""
Add a string to the string dictionary.
@type name: string
@param name: A string specifying the "filename" within the tarball that
the string_data will be written to.
@type string_data: string
@param string_data: The contents that will be written to a "file" within
the tarball.
"""
self.strings[name] = string_data
def create_tar(self, tf):
"""Takes the provided tar file object and adds all the specified data
to it. The strings dictionary is parsed such that the key name is the
file name and the value is the file data in the tar file.
@type tf: Tar File
@param tf: The Tar File Object that will be written to
"""
for file in self.files:
file, dirname = file
if dirname:
tf.add(file, arcname=os.path.join(dirname, os.path.split(file)[-1]))
else:
tf.add(file)
for filename, string in self.strings.items():
fd_str = cStringIO.StringIO(string)
fd_str.seek(0)
ti = glideinwms_tarfile.TarInfo()
ti.size = len(string)
ti.name = filename
ti.type = glideinwms_tarfile.REGTYPE
ti.mode = 0400
tf.addfile(ti, fd_str)
def create_tar_file(self, archive_full_path, compression="gz"):
"""Creates a tarball and writes it out to the file specified in fd
@Note: we don't have to worry about ReadError, since we don't allow
appending. We only write to a tarball on create.
@param fd: The file that the tarball will be written to
@param compression: The type of compression that should be used
@raise glideinwms_tarfile.CompressionError: This exception can be raised is an
invalid compression type has been passed in
"""
tar_mode = "w:%s" % compression
tf = glideinwms_tarfile.open(archive_full_path, mode=tar_mode)
self.create_tar(tf)
tf.close()
def create_tar_blob(self, compression="gz"):
"""Creates a tarball and writes it out to memory
@Note: we don't have to worry about ReadError, since we don't allow
appending. We only write to a tarball on create.
@param fd: The file that the tarball will be written to
@param compression: The type of compression that should be used
@raise glideinwms_tarfile.CompressionError: This exception can be raised is an
invalid compression type has been passed in
"""
from cStringIO import StringIO
tar_mode = "w:%s" % compression
file_out = StringIO()
tf = glideinwms_tarfile.open(fileobj=file_out, mode=tar_mode)
self.create_tar(tf)
tf.close()
return file_out.getvalue()
def is_tarfile(self, full_path):
"""Checks to see if the tar file specified is valid and can be read.
Returns True if the file is a valid tar file and it can be read.
Returns False if not valid or it cannot be read.
@param full_path: The full path to the tar file. Includes the file name
@return: True/False
"""
return glideinwms_tarfile.is_tarfile(full_path)
|
{
"content_hash": "490fcf097045eb64965e9dc285abf93d",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 86,
"avg_line_length": 38.14393939393939,
"alnum_prop": 0.6190665342601788,
"repo_name": "bbockelm/glideinWMS",
"id": "410709edeacd2c0320ec20f1f9d442deaa2b30a3",
"size": "5035",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/tarSupport.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Awk",
"bytes": "4617"
},
{
"name": "HTML",
"bytes": "380368"
},
{
"name": "JavaScript",
"bytes": "11648"
},
{
"name": "Python",
"bytes": "2005860"
},
{
"name": "Shell",
"bytes": "239244"
},
{
"name": "XSLT",
"bytes": "4667"
}
],
"symlink_target": ""
}
|
"""The tests for mqtt select component."""
import json
from unittest.mock import patch
import pytest
from homeassistant.components import select
from homeassistant.components.mqtt.select import (
CONF_OPTIONS,
MQTT_SELECT_ATTRIBUTES_BLOCKED,
)
from homeassistant.components.select import (
ATTR_OPTION,
ATTR_OPTIONS,
DOMAIN as SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
)
from homeassistant.const import ATTR_ASSUMED_STATE, ATTR_ENTITY_ID, STATE_UNKNOWN
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
select.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "test-topic",
"options": ["milk", "beer"],
}
}
async def test_run_select_setup(hass, mqtt_mock):
"""Test that it fetches the given payload."""
topic = "test/select"
await async_setup_component(
hass,
"select",
{
"select": {
"platform": "mqtt",
"state_topic": topic,
"command_topic": topic,
"name": "Test Select",
"options": ["milk", "beer"],
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, topic, "milk")
await hass.async_block_till_done()
state = hass.states.get("select.test_select")
assert state.state == "milk"
async_fire_mqtt_message(hass, topic, "beer")
await hass.async_block_till_done()
state = hass.states.get("select.test_select")
assert state.state == "beer"
async def test_value_template(hass, mqtt_mock):
"""Test that it fetches the given payload with a template."""
topic = "test/select"
await async_setup_component(
hass,
"select",
{
"select": {
"platform": "mqtt",
"state_topic": topic,
"command_topic": topic,
"name": "Test Select",
"options": ["milk", "beer"],
"value_template": "{{ value_json.val }}",
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, topic, '{"val":"milk"}')
await hass.async_block_till_done()
state = hass.states.get("select.test_select")
assert state.state == "milk"
async_fire_mqtt_message(hass, topic, '{"val":"beer"}')
await hass.async_block_till_done()
state = hass.states.get("select.test_select")
assert state.state == "beer"
async_fire_mqtt_message(hass, topic, '{"val": null}')
await hass.async_block_till_done()
state = hass.states.get("select.test_select")
assert state.state == STATE_UNKNOWN
async def test_run_select_service_optimistic(hass, mqtt_mock):
"""Test that set_value service works in optimistic mode."""
topic = "test/select"
fake_state = ha.State("select.test", "milk")
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=fake_state,
):
assert await async_setup_component(
hass,
select.DOMAIN,
{
"select": {
"platform": "mqtt",
"command_topic": topic,
"name": "Test Select",
"options": ["milk", "beer"],
}
},
)
await hass.async_block_till_done()
state = hass.states.get("select.test_select")
assert state.state == "milk"
assert state.attributes.get(ATTR_ASSUMED_STATE)
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{ATTR_ENTITY_ID: "select.test_select", ATTR_OPTION: "beer"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(topic, "beer", 0, False)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("select.test_select")
assert state.state == "beer"
async def test_run_select_service(hass, mqtt_mock):
"""Test that set_value service works in non optimistic mode."""
cmd_topic = "test/select/set"
state_topic = "test/select"
assert await async_setup_component(
hass,
select.DOMAIN,
{
"select": {
"platform": "mqtt",
"command_topic": cmd_topic,
"state_topic": state_topic,
"name": "Test Select",
"options": ["milk", "beer"],
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, state_topic, "beer")
state = hass.states.get("select.test_select")
assert state.state == "beer"
await hass.services.async_call(
SELECT_DOMAIN,
SERVICE_SELECT_OPTION,
{ATTR_ENTITY_ID: "select.test_select", ATTR_OPTION: "milk"},
blocking=True,
)
mqtt_mock.async_publish.assert_called_once_with(cmd_topic, "milk", 0, False)
state = hass.states.get("select.test_select")
assert state.state == "beer"
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG, MQTT_SELECT_ATTRIBUTES_BLOCKED
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, select.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, select.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, select.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one select per unique_id."""
config = {
select.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"state_topic": "test-topic",
"command_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
"options": ["milk", "beer"],
},
{
"platform": "mqtt",
"name": "Test 2",
"state_topic": "test-topic",
"command_topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
"options": ["milk", "beer"],
},
]
}
await help_test_unique_id(hass, mqtt_mock, select.DOMAIN, config)
async def test_discovery_removal_select(hass, mqtt_mock, caplog):
"""Test removal of discovered select."""
data = json.dumps(DEFAULT_CONFIG[select.DOMAIN])
await help_test_discovery_removal(hass, mqtt_mock, caplog, select.DOMAIN, data)
async def test_discovery_update_select(hass, mqtt_mock, caplog):
"""Test update of discovered select."""
data1 = '{ "name": "Beer", "state_topic": "test-topic", "command_topic": "test-topic", "options": ["milk", "beer"]}'
data2 = '{ "name": "Milk", "state_topic": "test-topic", "command_topic": "test-topic", "options": ["milk", "beer"]}'
await help_test_discovery_update(
hass, mqtt_mock, caplog, select.DOMAIN, data1, data2
)
async def test_discovery_update_unchanged_select(hass, mqtt_mock, caplog):
"""Test update of discovered select."""
data1 = '{ "name": "Beer", "state_topic": "test-topic", "command_topic": "test-topic", "options": ["milk", "beer"]}'
with patch(
"homeassistant.components.mqtt.select.MqttSelect.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, select.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk", "state_topic": "test-topic", "command_topic": "test-topic", "options": ["milk", "beer"]}'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, select.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT select device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT select device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, select.DOMAIN, DEFAULT_CONFIG, payload="milk"
)
async def test_options_attributes(hass, mqtt_mock):
"""Test options attribute."""
topic = "test/select"
await async_setup_component(
hass,
"select",
{
"select": {
"platform": "mqtt",
"state_topic": topic,
"command_topic": topic,
"name": "Test select",
"options": ["milk", "beer"],
}
},
)
await hass.async_block_till_done()
state = hass.states.get("select.test_select")
assert state.attributes.get(ATTR_OPTIONS) == ["milk", "beer"]
async def test_invalid_options(hass, caplog, mqtt_mock):
"""Test invalid options."""
topic = "test/select"
await async_setup_component(
hass,
"select",
{
"select": {
"platform": "mqtt",
"state_topic": topic,
"command_topic": topic,
"name": "Test Select",
"options": "beer",
}
},
)
await hass.async_block_till_done()
assert f"'{CONF_OPTIONS}' must include at least 2 options" in caplog.text
async def test_mqtt_payload_not_an_option_warning(hass, caplog, mqtt_mock):
"""Test warning for MQTT payload which is not a valid option."""
topic = "test/select"
await async_setup_component(
hass,
"select",
{
"select": {
"platform": "mqtt",
"state_topic": topic,
"command_topic": topic,
"name": "Test Select",
"options": ["milk", "beer"],
}
},
)
await hass.async_block_till_done()
async_fire_mqtt_message(hass, topic, "öl")
await hass.async_block_till_done()
assert (
"Invalid option for select.test_select: 'öl' (valid options: ['milk', 'beer'])"
in caplog.text
)
|
{
"content_hash": "22c67b7f43c688f828f977e66f4afcb3",
"timestamp": "",
"source": "github",
"line_count": 459,
"max_line_length": 120,
"avg_line_length": 31.70806100217865,
"alnum_prop": 0.6157757317575924,
"repo_name": "lukas-hetzenecker/home-assistant",
"id": "f2e48e10dc554ad2ecd744753b84abb280f679c7",
"size": "14556",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/mqtt/test_select.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "38023745"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
def test_q_application(q_application):
from PyQt4 import QtGui
assert QtGui.QX11Info.display()
|
{
"content_hash": "d0a64ebff38b7b1b9ff39cd236a23a3f",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 38,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.7475728155339806,
"repo_name": "manahl/pytest-plugins",
"id": "0e1c5125692fa37dedd9f302eb5ffc060fb294ad",
"size": "104",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytest-qt-app/tests/integration/test_q_application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1581"
},
{
"name": "Python",
"bytes": "227066"
},
{
"name": "Shell",
"bytes": "5947"
}
],
"symlink_target": ""
}
|
import ctypes
import pathlib
import atexit
from pkg_resources import resource_filename
from typing import List
lib = ctypes.cdll.LoadLibrary(resource_filename('dex', 'libDex.so'))
def tagged_union(name: str, members: List[type]):
named_members = [(f"t{i}", member) for i, member in enumerate(members)]
payload = type(name + "Payload", (ctypes.Union,), {"_fields_": named_members})
union = type(name, (ctypes.Structure,), {
"_fields_": [("tag", ctypes.c_uint64), ("payload", payload)],
"value": property(
fget=lambda self: getattr(self.payload, f"t{self.tag}"),
fset=lambda self, value: setattr(self.payload, f"t{self.tag}", value)),
"Payload": payload,
})
return union
CLit = tagged_union("Lit", [
ctypes.c_int64,
ctypes.c_int32,
ctypes.c_uint8,
ctypes.c_double,
ctypes.c_float,
ctypes.c_uint32,
ctypes.c_uint64
])
class CRectArray(ctypes.Structure):
_fields_ = [("data", ctypes.c_void_p),
("shape_ptr", ctypes.POINTER(ctypes.c_int64)),
("strides_ptr", ctypes.POINTER(ctypes.c_int64))]
CAtom = tagged_union("CAtom", [CLit, CRectArray])
assert ctypes.sizeof(CAtom) == 4 * 8
class HsAtom(ctypes.Structure): pass
class HsContext(ctypes.Structure): pass
class NativeFunctionObj(ctypes.Structure): pass
class NativeFunctionSignature(ctypes.Structure):
_fields_ = [("arg", ctypes.c_char_p),
("res", ctypes.c_char_p),
("ccall", ctypes.c_char_p)]
class ExportCC:
def __init__(self, value):
self._as_parameter_ = ctypes.c_int32(value)
@classmethod
def from_param(cls, p):
return p._as_parameter_
FlatCC = ExportCC(0)
XLACC = ExportCC(1)
HsAtomPtr = ctypes.POINTER(HsAtom)
HsContextPtr = ctypes.POINTER(HsContext)
CAtomPtr = ctypes.POINTER(CAtom)
NativeFunctionSignaturePtr = ctypes.POINTER(NativeFunctionSignature)
NativeFunction = ctypes.POINTER(NativeFunctionObj)
def dex_func(name, *signature):
argtypes, restype = signature[:-1], signature[-1]
f = getattr(lib, name)
f.restype = restype
f.argtypes = argtypes
return f
init = dex_func('dexInit', None)
fini = dex_func('dexFini', None)
getError = dex_func('dexGetError', ctypes.c_char_p)
createContext = dex_func('dexCreateContext', HsContextPtr)
destroyContext = dex_func('dexDestroyContext', HsContextPtr, None)
forkContext = dex_func('dexForkContext', HsContextPtr, HsContextPtr)
eval = dex_func('dexEval', HsContextPtr, ctypes.c_char_p, ctypes.c_int)
lookup = dex_func('dexLookup', HsContextPtr, ctypes.c_char_p, HsAtomPtr)
freshName = dex_func('dexFreshName', HsContextPtr, ctypes.c_char_p)
print = dex_func('dexPrint', HsContextPtr, HsAtomPtr, ctypes.c_char_p)
toCAtom = dex_func('dexToCAtom', HsAtomPtr, CAtomPtr, ctypes.c_int)
fromCAtom = dex_func('dexFromCAtom', CAtomPtr, HsAtomPtr)
compile = dex_func('dexCompile', HsContextPtr, ExportCC, HsAtomPtr, NativeFunction)
unload = dex_func('dexUnload', HsContextPtr, NativeFunction, None)
getFunctionSignature = dex_func('dexGetFunctionSignature', HsContextPtr, NativeFunction, NativeFunctionSignaturePtr)
freeFunctionSignature = dex_func('dexFreeFunctionSignature', NativeFunctionSignaturePtr, None)
xlaCpuTrampoline = lib.dexXLACPUTrampoline
init()
nofree = False
@atexit.register
def _teardown():
global nofree
fini()
nofree = True # Don't destruct any Haskell objects after the RTS has been shutdown
def as_cstr(x: str):
return ctypes.c_char_p(x.encode('ascii'))
def from_cstr(cx):
return cx.decode('ascii')
def raise_from_dex():
raise RuntimeError(from_cstr(getError()))
|
{
"content_hash": "de561b9ebd1db1c67c9faf8239e3275a",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 117,
"avg_line_length": 33.398148148148145,
"alnum_prop": 0.702800110895481,
"repo_name": "google-research/dex-lang",
"id": "a4ec8e18c22df848110685eed13fdbcc19cbc961",
"size": "3802",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/dex/api.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "665"
},
{
"name": "C++",
"bytes": "12300"
},
{
"name": "CSS",
"bytes": "1734"
},
{
"name": "Emacs Lisp",
"bytes": "1773"
},
{
"name": "HTML",
"bytes": "1261"
},
{
"name": "Haskell",
"bytes": "1673331"
},
{
"name": "JavaScript",
"bytes": "6560"
},
{
"name": "Julia",
"bytes": "25339"
},
{
"name": "Makefile",
"bytes": "15034"
},
{
"name": "Nix",
"bytes": "3324"
},
{
"name": "Python",
"bytes": "101678"
},
{
"name": "Shell",
"bytes": "2729"
}
],
"symlink_target": ""
}
|
from buffer import Buffer
from connection import Connection
from friend import Friend
from ids import GID
from servers import servers
from steamid import SteamID
import web
|
{
"content_hash": "e55f58499304b8712985b6149b94e126",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 33,
"avg_line_length": 24.714285714285715,
"alnum_prop": 0.8497109826589595,
"repo_name": "lunixbochs/vaporbat",
"id": "be0426d7cc217087fe1f590d6c990387002433ea",
"size": "173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vaporbat/steam/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "142376"
},
{
"name": "Shell",
"bytes": "120"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0018_delete_section'),
]
operations = [
migrations.AlterField(
model_name='chapter',
name='color',
field=models.CharField(blank=True, choices=[('info', 'Blue'), ('primary', 'Rose'), ('danger', 'Red'), ('warning', 'Yellow'), ('success', 'Green'), ('default', 'Gray')], default='default', max_length=30, verbose_name='color'),
),
migrations.AlterField(
model_name='chapter',
name='description',
field=models.TextField(blank=True, verbose_name='description'),
),
migrations.AlterField(
model_name='chapter',
name='description_en',
field=models.TextField(blank=True, null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='chapter',
name='description_it',
field=models.TextField(blank=True, null=True, verbose_name='description'),
),
migrations.AlterField(
model_name='chapter',
name='icon',
field=models.CharField(blank=True, max_length=30, verbose_name='icon'),
),
]
|
{
"content_hash": "cd36476c8b47446587952309e1b2764f",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 237,
"avg_line_length": 35.72222222222222,
"alnum_prop": 0.5637636080870918,
"repo_name": "flavoi/diventi",
"id": "84e8b764a360c5549b2f4f05711656300b488a3e",
"size": "1335",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diventi/products/migrations/0019_auto_20190524_0738.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "385265"
},
{
"name": "Procfile",
"bytes": "46"
},
{
"name": "Python",
"bytes": "826530"
}
],
"symlink_target": ""
}
|
import time
from urllib.parse import urljoin
import pytest
import requests
from generic_test_code.common import (
generic_correct_upstream_dest_test,
generic_correct_upstream_request_test,
header_is_absent,
verify_header,
)
from mocker.endpoints.marathon import (
SCHEDULER_APP_ALWAYSTHERE_DIFFERENTPORT,
app_from_template,
)
from mocker.endpoints.mesos import (
SCHEDULER_FWRK_ALWAYSTHERE_DIFFERENTPORT,
SCHEDULER_FWRK_ALWAYSTHERE_ID,
SCHEDULER_FWRK_ALWAYSTHERE_NOWEBUI,
framework_from_template,
)
from mocker.endpoints.mesos_dns import (
EMPTY_SRV,
SCHEDULER_SRV_ALWAYSTHERE_DIFFERENTPORT,
)
from util import GuardedSubprocess, LineBufferFilter, SearchCriteria
class TestServiceStateful:
# Test all the stateful test-cases/tests where AR caching may influence the
# results
def test_if_marathon_apps_are_resolved(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from MesosDNS and Mesos mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[])
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Set non-standard socket for the applicaiton
new_apps = {"apps": [SCHEDULER_APP_ALWAYSTHERE_DIFFERENTPORT, ]}
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data=new_apps)
# Check if the location now resolves correctly to the new app socket
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
"http://127.0.0.15:16001"
)
def test_if_webui_url_is_resolved_using_framework_id(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from MesosDNS and Marathon mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Set non-standard socket for the framework
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[SCHEDULER_FWRK_ALWAYSTHERE_DIFFERENTPORT])
# Check if the location now resolves correctly to the new framework
# socket
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/{}/foo/bar/'.format(SCHEDULER_FWRK_ALWAYSTHERE_ID),
"http://127.0.0.15:16001"
)
def test_if_webui_url_is_resolved_using_framework_name(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from MesosDNS and Marathon mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Set non-standard port for the framework:
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[SCHEDULER_FWRK_ALWAYSTHERE_DIFFERENTPORT])
# Check if the location now resolves correctly to the new framework
# socket
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
"http://127.0.0.15:16001"
)
def test_if_mesos_dns_resolving_works(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from Mesos and Marathon mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[SCHEDULER_FWRK_ALWAYSTHERE_NOWEBUI])
# Set non-standard port for the framework:
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=SCHEDULER_SRV_ALWAYSTHERE_DIFFERENTPORT)
# Check if the location now resolves correctly to the new framework
# socket
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
"http://127.0.0.15:16001"
)
def test_if_marathon_apps_have_prio_over_webui_url(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from MesosDNS
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Make svcapps resolve the app upstream to a different address,
# framework data implicitly has default port (127.0.0.1:16000)
new_apps = {"apps": [SCHEDULER_APP_ALWAYSTHERE_DIFFERENTPORT, ]}
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data=new_apps)
# Check that svcapps resolve to different port
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
"http://127.0.0.15:16001"
)
def test_if_marathon_apps_have_prio_over_mesos_dns(
self, master_ar_process_pertest, mocker, valid_user_header):
# Disable resolving service data using webui
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[SCHEDULER_FWRK_ALWAYSTHERE_NOWEBUI])
# Make svcapps resolve the app upstream to a different address,
# framework data implicitly has default port (127.0.0.1:16000)
new_apps = {"apps": [SCHEDULER_APP_ALWAYSTHERE_DIFFERENTPORT, ]}
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data=new_apps)
# Check that svcapps resolve to different port
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
"http://127.0.0.15:16001"
)
def test_if_webui_url_has_prio_over_mesos_dns(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from Marathon mock
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
# Set a different port for webui-based framework data:
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[SCHEDULER_FWRK_ALWAYSTHERE_DIFFERENTPORT])
# Check that svcapps resolve to different port
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
"http://127.0.0.15:16001"
)
def test_if_webui_url_by_fwrk_id_has_prio_over_webui_url_by_fwrk_name(
self, master_ar_process_pertest, mocker, valid_user_header):
# This one is tricky, we need to create a state-summary entry that has
# a framework entry with "name" field equal to the "id" field of a
# different entry
# Remove the data from Marathon mock
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
# Remove the data from MesosDNS mock
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Fabricate state-summary data needed for the tests
fwrk_a = framework_from_template(
SCHEDULER_FWRK_ALWAYSTHERE_ID,
"scheduler-alwaysthere",
"http://127.0.0.15:16001")
fwrk_b = framework_from_template(
"0535dd9a-2644-4945-a365-6fe0145f103f-0000",
SCHEDULER_FWRK_ALWAYSTHERE_ID,
"http://127.0.0.1:16000")
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[fwrk_a, fwrk_b])
# Check that svcapps resolve to different port
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/{}/foo/bar/'.format(SCHEDULER_FWRK_ALWAYSTHERE_ID),
"http://127.0.0.15:16001"
)
def test_if_webui_url_path_is_normalized(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from Marathon mock
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
# Remove the data from MesosDNS mock
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Test webui_url entry withouth trailing slash:
fwrk = framework_from_template(
SCHEDULER_FWRK_ALWAYSTHERE_ID,
"scheduler-alwaysthere",
"http://127.0.0.15:16001")
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[fwrk])
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
"http://127.0.0.15:16001"
)
generic_correct_upstream_request_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
'/foo/bar/',
http_ver='websockets'
)
# Test webui_url entry with trailing slash:
fwrk = framework_from_template(
SCHEDULER_FWRK_ALWAYSTHERE_ID,
"scheduler-alwaysthere",
"http://127.0.0.15:16001/")
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[fwrk])
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
"http://127.0.0.15:16001"
)
generic_correct_upstream_request_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
'/foo/bar/',
http_ver='websockets'
)
def test_if_broken_json_from_mesos_dns_is_handled(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from Mesos and Marathon mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[SCHEDULER_FWRK_ALWAYSTHERE_NOWEBUI])
# Make MesosDNS mock respond with garbled data
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_encoded_response',
aux_data=b'blah blah duh duh')
# Verify the response:
url = master_ar_process_pertest.make_url_from_path(
'/service/scheduler-alwaysthere/foo/bar/')
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 503
def test_if_broken_response_status_from_mesos_dns_is_handled(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from Mesos and Marathon mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[SCHEDULER_FWRK_ALWAYSTHERE_NOWEBUI])
# Make MesosDNS mock respond with invalid data
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='always_bork',
aux_data=True)
# Verify the response:
url = master_ar_process_pertest.make_url_from_path(
'/service/scheduler-alwaysthere/foo/bar/')
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 503
def test_if_timed_out_response_from_mesos_dns_is_handled(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from Mesos and Marathon mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[SCHEDULER_FWRK_ALWAYSTHERE_NOWEBUI])
# Make MesosDNS mock stall response by 10s
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='always_stall',
aux_data=10)
# Verify the response:
url = master_ar_process_pertest.make_url_from_path(
'/service/scheduler-alwaysthere/foo/bar/')
t_start = time.time()
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
t_spent = time.time() - t_start
# If the timeout was properly enforced by Admin Router, the total time
# spent waiting for response will be less than 10s. If there is no
# timeout - it will be at least 10s.
assert t_spent < 10
assert resp.status_code == 503
def test_if_mesos_dns_subrequest_does_not_pass_auth_header_to_mesos_dns(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from Mesos and Marathon mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[SCHEDULER_FWRK_ALWAYSTHERE_NOWEBUI])
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=SCHEDULER_SRV_ALWAYSTHERE_DIFFERENTPORT)
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='record_requests')
generic_correct_upstream_dest_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
"http://127.0.0.15:16001"
)
r_reqs = mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='get_recorded_requests')
assert len(r_reqs) == 1
header_is_absent(r_reqs[0]['headers'], 'Authorization')
def test_if_no_services_in_cluster_case_is_handled(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from ALL backends:
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[])
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
url = master_ar_process_pertest.make_url_from_path(
'/service/scheduler-alwaysthere/foo/bar/')
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 404
def test_if_only_matching_scheme_redirects_are_adjusted_for_marathon_apps(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from MesosDNS and Mesos mocks w.r.t. resolved service
mocker.send_command(
endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[])
mocker.send_command(
endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Mock TLS-enabled Marathon app
app_dict = app_from_template(
'scheduler-alwaysthere', 443, ip="127.0.0.4", scheme='https')
new_apps = {"apps": [app_dict, ]}
mocker.send_command(
endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data=new_apps)
# Non-matching:
mocker.send_command(
endpoint_id="https://127.0.0.4:443",
func_name='always_redirect',
aux_data="http://127.0.0.1/")
url = master_ar_process_pertest.make_url_from_path(
"/service/scheduler-alwaysthere/foo/bar")
r = requests.get(url, allow_redirects=False, headers=valid_user_header)
assert r.status_code == 307
assert r.headers['Location'] == "http://127.0.0.1/"
# Matching:
mocker.send_command(
endpoint_id="https://127.0.0.4:443",
func_name='always_redirect',
aux_data="https://127.0.0.1/")
url = master_ar_process_pertest.make_url_from_path(
"/service/scheduler-alwaysthere/foo/bar")
r = requests.get(url, allow_redirects=False, headers=valid_user_header)
assert r.status_code == 307
absolute = urljoin(url, r.headers['Location'])
assert absolute == "http://127.0.0.1/service/scheduler-alwaysthere/"
def test_if_only_matching_scheme_redirects_are_adjusted_for_mesos_frameworks(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from MesosDNS and Marathon mocks w.r.t. resolved service
mocker.send_command(
endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(
endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Mock TLS-enabled framework
fwrk = framework_from_template(
SCHEDULER_FWRK_ALWAYSTHERE_ID,
"scheduler-alwaysthere",
"https://127.0.0.4:443/")
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[fwrk])
# Non-matching:
mocker.send_command(
endpoint_id="https://127.0.0.4:443",
func_name='always_redirect',
aux_data="http://127.0.0.1/")
url = master_ar_process_pertest.make_url_from_path(
"/service/scheduler-alwaysthere/foo/bar")
r = requests.get(url, allow_redirects=False, headers=valid_user_header)
assert r.status_code == 307
assert r.headers['Location'] == "http://127.0.0.1/"
# Matching:
mocker.send_command(
endpoint_id="https://127.0.0.4:443",
func_name='always_redirect',
aux_data="https://127.0.0.1/")
url = master_ar_process_pertest.make_url_from_path(
"/service/scheduler-alwaysthere/foo/bar")
r = requests.get(url, allow_redirects=False, headers=valid_user_header)
assert r.status_code == 307
absolute = urljoin(url, r.headers['Location'])
assert absolute == "http://127.0.0.1/service/scheduler-alwaysthere/"
def test_if_scheme_is_honoured_for_marathon_apps(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from MesosDNS and Mesos mocks w.r.t. resolved service
mocker.send_command(
endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[])
mocker.send_command(
endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Mock TLS-enabled Marathon app
app_dict = app_from_template(
'scheduler-alwaysthere', 443, ip="127.0.0.4", scheme='https')
new_apps = {"apps": [app_dict, ]}
mocker.send_command(
endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data=new_apps)
url = master_ar_process_pertest.make_url_from_path("/service/scheduler-alwaysthere/")
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == "https://127.0.0.4:443"
def test_if_scheme_is_honoured_in_mesos_scheduler_entry(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from MesosDNS and Marathon mocks w.r.t. resolved service
mocker.send_command(
endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(
endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Mock TLS-enabled framework
fwrk = framework_from_template(
SCHEDULER_FWRK_ALWAYSTHERE_ID,
"scheduler-alwaysthere",
"https://127.0.0.4:443/")
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[fwrk])
url = master_ar_process_pertest.make_url_from_path("/service/scheduler-alwaysthere/")
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
req_data = resp.json()
assert req_data['endpoint_id'] == "https://127.0.0.4:443"
def test_if_ar_with_empty_cache_waits_for_marathon_during_service_resolve(
self, mocker, nginx_class, valid_user_header):
# Make service endpoint resolve only Marathon-related data:
mocker.send_command(
endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[])
mocker.send_command(
endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Make Mock endpoint stall a little, make sure AR cache update timeouts
# are big enough to swallow it:
backend_request_timeout = 6
refresh_lock_timeout = backend_request_timeout * 2
# Make period cache refreshes so rare that they do not get into
# picture:
ar = nginx_class(cache_first_poll_delay=1200,
cache_poll_period=1200,
cache_expiration=1200,
cache_max_age_soft_limit=1200,
cache_max_age_hard_limit=1800,
cache_backend_request_timeout=backend_request_timeout,
cache_refresh_lock_timeout=refresh_lock_timeout,
)
url = ar.make_url_from_path("/service/scheduler-alwaysthere/")
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='always_stall',
aux_data=backend_request_timeout * 0.5)
# Measure the time it took and the results:
with GuardedSubprocess(ar):
t_start = time.time()
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
t_spent = time.time() - t_start
assert resp.status_code == 200
data = resp.json()
assert data['endpoint_id'] == 'http://127.0.0.1:16000'
# If AR waits for cache during resolve, then time spent should be
# greater than the stall time that has been set. Due to the fact
# that update coroutines are not separated yet, this will be
# slightly higher than: 2 * (backend_request_timeout * 0.5)
# as we have two calls to Marathon (svcapps + marathon leader) from
# the cache code.
assert t_spent > 2 * (backend_request_timeout * 0.5)
def test_if_ar_with_empty_cache_waits_for_mesos_during_service_resolve(
self, mocker, nginx_class, valid_user_header):
# Make service endpoint resolve only Mesos-related data:
mocker.send_command(
endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(
endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Make Mock endpoint stall a little, make sure AR cache update timeouts
# are big enough to swallow it:
backend_request_timeout = 6
refresh_lock_timeout = backend_request_timeout * 2
# Make period cache refreshes so rare that they do not get into
# picture:
ar = nginx_class(cache_first_poll_delay=1200,
cache_poll_period=1200,
cache_expiration=1200,
cache_max_age_soft_limit=1200,
cache_max_age_hard_limit=1800,
cache_backend_request_timeout=backend_request_timeout,
cache_refresh_lock_timeout=refresh_lock_timeout,
)
url = ar.make_url_from_path("/service/scheduler-alwaysthere/")
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='always_stall',
aux_data=backend_request_timeout * 0.5)
# Measure the time it took and the results:
with GuardedSubprocess(ar):
t_start = time.time()
resp = requests.get(url,
allow_redirects=False,
headers=valid_user_header)
t_spent = time.time() - t_start
assert resp.status_code == 200
data = resp.json()
assert data['endpoint_id'] == 'http://127.0.0.1:16000'
assert t_spent > backend_request_timeout * 0.5
def test_if_broken_marathon_prevents_resolving_via_mesos_state_summary(
self, mocker, nginx_class, valid_user_header):
# Bork Marathon Mock, DO NOT touch Mesos Mock:
mocker.send_command(
endpoint_id='http://127.0.0.1:8080',
func_name='always_bork',
aux_data=True)
# Make period cache refreshes so rare that they do not get into
# picture:
ar = nginx_class(cache_first_poll_delay=1200,
cache_poll_period=1200,
cache_expiration=1200,
cache_max_age_soft_limit=1200,
cache_max_age_hard_limit=1800,
)
url = ar.make_url_from_path("/service/scheduler-alwaysthere/")
with GuardedSubprocess(ar):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 503
assert '503 Service Unavailable: invalid Marathon svcapps cache' in resp.text
def test_if_broken_marathon_does_not_prevent_resolving_root_marathon(
self, mocker, nginx_class, valid_user_header):
# Bork Marathon Mock, DO NOT touch Mesos Mock:
mocker.send_command(
endpoint_id='http://127.0.0.1:8080',
func_name='always_bork',
aux_data=True)
# Change the Root Marathon's endpoint address as reported by Mesos mock,
# so that we do not get the reply from the original endpoint (i.e.
# http://127.0.0.1:8080 as it will always respond with broken responses
# (see `mocker.send_command` call above).
fwrk = framework_from_template(
SCHEDULER_FWRK_ALWAYSTHERE_ID,
"marathon",
"http://127.0.0.2:8080/")
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[fwrk])
# Make period cache refreshes so rare that they do not get into
# picture:
ar = nginx_class(cache_first_poll_delay=1200,
cache_poll_period=1200,
cache_expiration=1200,
cache_max_age_soft_limit=1200,
cache_max_age_hard_limit=1800,
)
url = ar.make_url_from_path("/service/marathon/v2/reflect/me")
with GuardedSubprocess(ar):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
data = resp.json()
assert data['endpoint_id'] == 'http://127.0.0.2:8080'
def test_if_broken_marathon_does_not_prevent_resolving_root_metronome(
self, mocker, nginx_class, valid_user_header):
# Bork Marathon Mock, DO NOT touch Mesos Mock:
mocker.send_command(
endpoint_id='http://127.0.0.1:8080',
func_name='always_bork',
aux_data=True)
# Make period cache refreshes so rare that they do not get into
# picture:
ar = nginx_class(cache_first_poll_delay=1200,
cache_poll_period=1200,
cache_expiration=1200,
cache_max_age_soft_limit=1200,
cache_max_age_hard_limit=1800,
)
url = ar.make_url_from_path("/service/metronome/foo/bar")
with GuardedSubprocess(ar):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
data = resp.json()
assert data['endpoint_id'] == 'http://127.0.0.1:9000'
def test_if_broken_mesos_prevents_resolving_via_mesosdns(
self, mocker, nginx_class, valid_user_header):
# Bork Mesos Mock, Make Marathon mock respond with no apps, so that AR
# tries to resolve via Mesos /state-summary
mocker.send_command(
endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(
endpoint_id='http://127.0.0.2:5050',
func_name='always_bork',
aux_data=True)
# Make period cache refreshes so rare that they do not get into
# picture:
ar = nginx_class(cache_first_poll_delay=1200,
cache_poll_period=1200,
cache_expiration=1200,
cache_max_age_soft_limit=1200,
cache_max_age_hard_limit=1800,
)
url = ar.make_url_from_path("/service/scheduler-alwaysthere/")
with GuardedSubprocess(ar):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 503
assert '503 Service Unavailable: invalid Mesos state cache' == resp.text.strip()
def test_if_broken_mesos_does_not_prevent_resolving_via_marathon(
self, mocker, nginx_class, valid_user_header):
# Bork Mesos Mock, Make MesosDNS mock respond with no apps, so that AR
# is able to resolve only via Marathon/we are certain that it resolved
# via Marathon.
mocker.send_command(
endpoint_id='http://127.0.0.2:5050',
func_name='always_bork',
aux_data=True)
mocker.send_command(
endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Make period cache refreshes so rare that they do not get into
# picture:
ar = nginx_class(cache_first_poll_delay=1200,
cache_poll_period=1200,
cache_expiration=1200,
cache_max_age_soft_limit=1200,
cache_max_age_hard_limit=1800,
)
url = ar.make_url_from_path("/service/scheduler-alwaysthere/")
with GuardedSubprocess(ar):
resp = requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
assert resp.status_code == 200
data = resp.json()
assert data['endpoint_id'] == 'http://127.0.0.1:16000'
def test_if_ar_sets_correct_useragent_while_resolving_via_mesosdns(
self, master_ar_process_pertest, mocker, valid_user_header):
# Remove the data from Mesos and Marathon mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data={"apps": []})
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[SCHEDULER_FWRK_ALWAYSTHERE_NOWEBUI])
# Make mesos-dns record requests:
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='record_requests')
url = master_ar_process_pertest.make_url_from_path("/service/scheduler-alwaysthere/")
requests.get(
url,
allow_redirects=False,
headers=valid_user_header)
r_reqs = mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='get_recorded_requests')
assert len(r_reqs) == 1
verify_header(r_reqs[0]['headers'], 'User-Agent', 'Master Admin Router')
@pytest.mark.parametrize(
'label_val,should_rewrite',
[('yes', True),
('true', True),
('1', True),
('make it so', True),
('whatever', True),
('', True), # the label contains empty string
(None, True), # the label is absent
('false', False),
(False, False),
],)
def test_if_req_url_rewriting_can_be_configured(
self,
master_ar_process_pertest,
mocker,
valid_user_header,
label_val,
should_rewrite):
# If `DCOS_SERVICE_REWRITE_REQUEST_URLS` is set to `false` (string) or
# `false` (boolean), Admin Router will not strip the context path and
# the upstream request will be made with the same the URL path as the client
# request has been made. In any other case it the prefix will be stripped
# Remove the data from MesosDNS and Mesos mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[])
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Set non-standard socket for the applicaiton
srv = SCHEDULER_APP_ALWAYSTHERE_DIFFERENTPORT
if label_val is not None:
srv['labels']['DCOS_SERVICE_REWRITE_REQUEST_URLS'] = label_val
new_apps = {"apps": [srv, ]}
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data=new_apps)
# Check if the location now resolves correctly to the new app socket
if should_rewrite:
path_expected = "/foo/bar/"
else:
path_expected = "/service/scheduler-alwaysthere/foo/bar/"
generic_correct_upstream_request_test(
master_ar_process_pertest,
valid_user_header,
'/service/scheduler-alwaysthere/foo/bar/',
path_expected,
http_ver='websockets'
)
@pytest.mark.parametrize(
'label_val,should_buffer',
[('yes', True),
('true', True),
('1', True),
('make it so', True),
('whatever', True),
('', True), # the label contains empty string
(None, True), # the label is absent
('false', False),
(False, False),
],)
def test_if_request_buffering_can_be_configured(
self,
mocker,
nginx_class,
valid_user_header,
label_val,
should_buffer):
# If `DCOS_SERVICE_REQUEST_BUFFERING` is set to `false` (string) or
# `false` (boolean), Admin Router will not buffer the client request before
# sending it to the upstream. In any other case it the request is going
# to be buffered.
# Remove the data from MesosDNS and Mesos mocks w.r.t. resolved service
mocker.send_command(endpoint_id='http://127.0.0.2:5050',
func_name='set_frameworks_response',
aux_data=[])
mocker.send_command(endpoint_id='http://127.0.0.1:8123',
func_name='set_srv_response',
aux_data=EMPTY_SRV)
# Set the DCOS_SERVICE_REQUEST_BUFFERING for the test mock:
srv = SCHEDULER_APP_ALWAYSTHERE_DIFFERENTPORT
if label_val is not None:
srv['labels']['DCOS_SERVICE_REQUEST_BUFFERING'] = label_val
new_apps = {"apps": [srv, ]}
mocker.send_command(endpoint_id='http://127.0.0.1:8080',
func_name='set_apps_response',
aux_data=new_apps)
# In theory it is possible to write a test that really checks if the
# request was buffered or not. It would require talking to the mocked
# endpoint during the test and checking if it is receiving the data as
# it is being sent (there is no buffering) or only after the whole
# request has been uploaded (Nginx buffers the data). Such a feature
# would introduce some extra complexity into the test harness. Simply
# checking if AR is printing the warning to the error log seems to be
# good enough.
filter_regexp = {}
tmp = 'a client request body is buffered to a temporary file'
if label_val in ["false", False]:
filter_regexp[tmp] = SearchCriteria(0, True)
else:
filter_regexp[tmp] = SearchCriteria(1, True)
ar = nginx_class(role="master")
url = ar.make_url_from_path('/service/scheduler-alwaysthere/foo/bar/')
# In order to make Nginx print a warning to the errorlog, the request
# payload needs to be greater than client_body_buffer_size, which by
# default is set to 16k. We use here 2MB for safe measure.
# http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size
payload = {"data": "x" * 1024 * 1024 * 2}
with GuardedSubprocess(ar):
lbf = LineBufferFilter(
filter_regexp,
line_buffer=ar.stderr_line_buffer)
resp = requests.post(
url,
allow_redirects=False,
headers=valid_user_header,
data=payload)
lbf.scan_log_buffer()
assert lbf.extra_matches == {}
assert resp.status_code == 200
|
{
"content_hash": "df5c31165082c8f63ae311f46cb2538a",
"timestamp": "",
"source": "github",
"line_count": 977,
"max_line_length": 93,
"avg_line_length": 43.62538382804504,
"alnum_prop": 0.5541973628642485,
"repo_name": "kensipe/dcos",
"id": "41027cee6a6c92c7bd6eda2be8f4fecef56a3758",
"size": "42687",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "packages/adminrouter/extra/src/test-harness/tests/test_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2529"
},
{
"name": "Dockerfile",
"bytes": "11329"
},
{
"name": "Groovy",
"bytes": "711"
},
{
"name": "HTML",
"bytes": "94641"
},
{
"name": "Lua",
"bytes": "195396"
},
{
"name": "Makefile",
"bytes": "179"
},
{
"name": "PowerShell",
"bytes": "20017"
},
{
"name": "Python",
"bytes": "1468357"
},
{
"name": "Shell",
"bytes": "114905"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1,"../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
import os
def test_hdfs_io():
'''
Test H2O read and write to hdfs
'''
hdfs_name_node = os.getenv("NAME_NODE")
print("Importing hdfs data")
h2o_data = h2o.import_file("hdfs://" + hdfs_name_node + "/datasets/airlines/airlines_all.05p.csv")
print("Spliting data")
for c in ["Month","DayofMonth","IsArrDelayed"]:
h2o_data[c] = h2o_data[c].asfactor()
myX = ["Month","DayofMonth","Distance"]
train,test = h2o_data.split_frame(ratios=[0.9])
print("Exporting file to hdfs")
h2o.export_file(test[:,["Year","DayOfWeek"]], "hdfs://" + hdfs_name_node + "/datasets/exported.csv")
print("Reading file back in and comparing if data is the same")
new_test = h2o.import_file("hdfs://" + hdfs_name_node + "/datasets/exported.csv")
assert((test[:,"DayOfWeek"] - new_test[:,"DayOfWeek"]).sum() == 0)
print("Training")
h2o_glm = H2OGeneralizedLinearEstimator(family="binomial", alpha=0.5, Lambda=0.01)
h2o_glm.train(x=myX, y="IsArrDelayed", training_frame=train) # dont need to train on all features
hdfs_model_path = os.getenv("MODEL_PATH")
print("Saving model")
new_model_path = h2o.save_model(h2o_glm, "hdfs://" + hdfs_name_node + "/" + hdfs_model_path)
print("Loading back model")
new_model = h2o.load_model(new_model_path)
print("Running predictions")
preds = new_model.predict(test)
if __name__ == "__main__":
pyunit_utils.standalone_test(test_hdfs_io)
else:
test_hdfs_io()
|
{
"content_hash": "67eb4f61e6b46e18fcb13b1e6806d571",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 104,
"avg_line_length": 34.5531914893617,
"alnum_prop": 0.6527093596059114,
"repo_name": "h2oai/h2o-dev",
"id": "d818cb7507701d6ec1fa56a401ba5d0377c1be6b",
"size": "1624",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_hdfs/pyunit_INTERNAL_HDFS_import_export.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "162399"
},
{
"name": "CoffeeScript",
"bytes": "267048"
},
{
"name": "Emacs Lisp",
"bytes": "6465"
},
{
"name": "HTML",
"bytes": "140849"
},
{
"name": "Java",
"bytes": "6216622"
},
{
"name": "JavaScript",
"bytes": "38932"
},
{
"name": "Jupyter Notebook",
"bytes": "5585408"
},
{
"name": "Makefile",
"bytes": "34105"
},
{
"name": "Python",
"bytes": "2644394"
},
{
"name": "R",
"bytes": "1848754"
},
{
"name": "Rebol",
"bytes": "7059"
},
{
"name": "Ruby",
"bytes": "3506"
},
{
"name": "Scala",
"bytes": "22830"
},
{
"name": "Shell",
"bytes": "47513"
},
{
"name": "TeX",
"bytes": "579960"
}
],
"symlink_target": ""
}
|
import json
import sys
from typing import cast, Dict, List, Union
import looker_sdk
from looker_sdk import models, error
import sdk_exceptions
sdk = looker_sdk.init31("../looker.ini")
def main() -> None:
"""Given a look id, obtain the query behind it and run it with the desired
filter values.
https://docs.looker.com/reference/api-and-integration/api-reference/v3.1/query#implementation_notes_9 # noqa: B950
shows an example of how filters are defined in the posted body. To set the
same filter in this example, the script needs to be run as follows:
$ python run_look_with_filters.py 5 category.name socks
"""
look_id = sys.argv[1] if len(sys.argv) > 1 else ""
filter_args = iter(sys.argv[2:])
filters: Dict[str, str] = {}
if not (look_id and len(sys.argv[2:]) > 0 and len(sys.argv[2:]) % 2 == 0):
raise sdk_exceptions.ArgumentError(
"Please provide: <lookId> <filter_1> <filter_value_1> "
"<filter_2> <filter_value_2> ..."
)
for filter_name in filter_args:
filters[filter_name] = next(filter_args)
query = get_look_query(int(look_id))
results = run_query_with_filter(query, filters)
print(f"Query results with filters={filters}:\n{results}", end="\n\n")
def get_look_query(id: int) -> models.Query:
"""Returns the query associated with a given look id."""
try:
look = sdk.look(id)
except error.SDKError:
raise sdk_exceptions.NotFoundError(f"Error getting Look {id}")
else:
query = look.query
assert isinstance(query, models.Query)
return query
TJson = List[Dict[str, Union[str, int, float, bool, None]]]
def run_query_with_filter(query: models.Query, filters: Dict[str, str]) -> TJson:
"""Runs the specified query with the specified filters."""
request = create_query_request(query, filters)
try:
json_ = sdk.run_inline_query("json", request, cache=False)
except error.SDKError:
raise sdk_exceptions.RunInlineQueryError("Error running query")
else:
json_resp = cast(TJson, json.loads(json_))
return json_resp
def create_query_request(q: models.Query, filters: Dict[str, str]) -> models.WriteQuery:
return models.WriteQuery(
model=q.model,
view=q.view,
fields=q.fields,
pivots=q.pivots,
fill_fields=q.fill_fields,
filters=filters,
sorts=q.sorts,
limit=q.limit,
column_limit=q.column_limit,
total=q.total,
row_total=q.row_total,
subtotals=q.subtotals,
dynamic_fields=q.dynamic_fields,
query_timezone=q.query_timezone,
)
main()
|
{
"content_hash": "0fb4ac86b7d5f5eb644f6711efc0b86d",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 119,
"avg_line_length": 30.443181818181817,
"alnum_prop": 0.6424038820455393,
"repo_name": "looker-open-source/sdk-examples",
"id": "74c67271d4f2d60d3ee5ddecfa422ef51ad1e2d9",
"size": "2679",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/run_look_with_filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "672"
},
{
"name": "Dockerfile",
"bytes": "308"
},
{
"name": "HTML",
"bytes": "1843"
},
{
"name": "Python",
"bytes": "87898"
},
{
"name": "Ruby",
"bytes": "27873"
},
{
"name": "Shell",
"bytes": "1118"
},
{
"name": "Swift",
"bytes": "538353"
},
{
"name": "TypeScript",
"bytes": "41064"
}
],
"symlink_target": ""
}
|
import requests
import urllib
import csv
import os
import sys
from time import time
import argparse
from py_data_getter import data_getter
from py_db import db
db = db('nba_shots')
def initiate(start_year, end_year):
start_time = time()
print "-------------------------"
print "shots_Breakdown.py"
for year in range(start_year,end_year+1):
season_start = year
season_id = str(season_start)+str(season_start%100+1).zfill(2)[-2:]
print season_id
process(season_id)
end_time = time()
elapsed_time = float(end_time - start_time)
print "time elapsed (in seconds): " + str(elapsed_time)
print "time elapsed (in minutes): " + str(elapsed_time/60.0)
print "shots_Breakdown.py"
print "-------------------------"
def process(season_id):
for _type in ('League', 'Team', 'Player'):
print '\t' + _type
query_id = _type
if query_id == 'League':
query_id = "'00' AS League"
query = """SELECT *
FROM(
SELECT %s_id, season_id, season_type, shot_zone_basic, shot_zone_area,
COUNT(*) AS attempts,
AVG(shot_distance) AS avg_dist,
SUM(CASE WHEN event_type = "Made Shot" THEN 1 ELSE 0 END) AS makes,
SUM(CASE WHEN event_type = "Made Shot" AND shot_type = '2PT Field Goal' THEN 2
WHEN event_type = "Made Shot" AND shot_type = '3PT Field Goal' THEN 3
ELSE 0 END) AS points
FROM shots
WHERE season_id = %s
GROUP BY %s_id, season_id, season_type, shot_zone_basic, shot_zone_area
) a
JOIN(
SELECT %s_id, season_id, season_type,
COUNT(DISTINCT game_id) AS games
FROM shots
WHERE season_id = %s
GROUP BY %s_id, season_id, season_type
) g USING (%s_id, season_id, season_type)
UNION
SELECT *
FROM(
SELECT %s_id, season_id, season_type, shot_zone_basic, 'all' AS shot_zone_area,
COUNT(*) AS attempts,
AVG(shot_distance) AS avg_dist,
SUM(CASE WHEN event_type = "Made Shot" THEN 1 ELSE 0 END) AS makes,
SUM(CASE WHEN event_type = "Made Shot" AND shot_type = '2PT Field Goal' THEN 2
WHEN event_type = "Made Shot" AND shot_type = '3PT Field Goal' THEN 3
ELSE 0 END) AS points
FROM shots
WHERE season_id = %s
GROUP BY %s_id, season_id, season_type, shot_zone_basic
) a
JOIN(
SELECT %s_id, season_id, season_type,
COUNT(DISTINCT game_id) AS games
FROM shots
WHERE season_id = %s
GROUP BY %s_id, season_id, season_type
) g USING (%s_id, season_id, season_type)
UNION
SELECT *
FROM(
SELECT %s_id, season_id, season_type, 'all' AS shot_zone_basic, 'all' AS shot_zone_area,
COUNT(*) AS attempts,
AVG(shot_distance) AS avg_dist,
SUM(CASE WHEN event_type = "Made Shot" THEN 1 ELSE 0 END) AS makes,
SUM(CASE WHEN event_type = "Made Shot" AND shot_type = '2PT Field Goal' THEN 2
WHEN event_type = "Made Shot" AND shot_type = '3PT Field Goal' THEN 3
ELSE 0 END) AS points
FROM shots
WHERE season_id = %s
GROUP BY %s_id, season_id, season_type
) a
JOIN(
SELECT %s_id, season_id, season_type,
COUNT(DISTINCT game_id) AS games
FROM shots
WHERE season_id = %s
GROUP BY %s_id, season_id, season_type
) g USING (%s_id, season_id, season_type)
ORDER BY %s_id ASC, season_type ASC, season_id ASC, shot_zone_basic ASC, shot_zone_area ASC;
"""
q = query % (query_id, season_id, _type, query_id, season_id, _type, _type, query_id, season_id, _type, query_id, season_id, _type, _type, query_id, season_id, _type, query_id, season_id, _type, _type, _type)
# raw_input(q)
res = db.query(q)
entries = []
_id = '%s_id' % (_type.lower())
for row in res:
type_id, season_id, season_type, shot_zone_basic, shot_zone_area, attempts, avg_dist, makes, points, games = row
entry = {_id:type_id, "season_id":season_id, "season_type":season_type, "shot_zone_basic":shot_zone_basic, "shot_zone_area":shot_zone_area, "attempts":attempts, "avg_dist":avg_dist, "makes":makes, "points":points, "games":games}
entries.append(entry)
table = "shots_%s_Breakdown" % (_type)
if entries != []:
for i in range(0, len(entries), 1000):
db.insertRowDict(entries[i: i + 1000], table, insertMany=True, replace=True, rid=0,debug=1)
db.conn.commit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--start_year',type=int,default=2018)
parser.add_argument('--end_year',type=int,default=2018)
args = parser.parse_args()
initiate(args.start_year, args.end_year)
|
{
"content_hash": "fde0d4850257d4052939ba3405ad79f3",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 243,
"avg_line_length": 33.388489208633096,
"alnum_prop": 0.6136608489549666,
"repo_name": "Connor-R/nba_shot_charts",
"id": "0b327eaf79e341b3f3ff8f439f066bb469435a0d",
"size": "4641",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "processing/shots_Breakdown.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "187128"
},
{
"name": "Shell",
"bytes": "6963"
}
],
"symlink_target": ""
}
|
'''
Copyright (c) 2015 Joaquin Duo - File under MIT License
Code Licensed under MIT License. See LICENSE file.
To enable explicit relative importing in __main__, you simply import
this package before any relative import
Usage:
------
To enable explicit relative importing in __main__, you simply import
this package before any relative import
import relative_import
from .my_pkg import foo, bar
...
There is no need to call any init function using this module.
Make sure your PYTHONPATH is correctly set to solve the relative path of the
submodule/subpackage.
'''
import rel_imp
rel_imp.init_implicitly()
|
{
"content_hash": "54949c26745c459bdd9fd85edf27967a",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 76,
"avg_line_length": 24.03846153846154,
"alnum_prop": 0.7504,
"repo_name": "joaduo/rel_imp",
"id": "58a36c886f1ffff7dd1bc8a17c17f6b217785678",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "relative_import.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12939"
},
{
"name": "Shell",
"bytes": "3923"
}
],
"symlink_target": ""
}
|
import subprocess
import time
import praw
from hashlib import sha1
from flask import Flask
from flask import Response
from flask import request
from cStringIO import StringIO
from base64 import b64encode
from base64 import b64decode
from ConfigParser import ConfigParser
import OAuth2Util
import os
import markdown
import bleach
# encoding=utf8
import sys
from participantCollection import ParticipantCollection
reload(sys)
sys.setdefaultencoding('utf8')
# Edit me!
activeCommentHashFiles = [ 'retiredcommenthashes.txt',
'../stayclean-2016-april/retiredcommenthashes.txt',
'../stayclean-2016-march/retiredcommenthashes.txt',
'../stayclean-2016-february/retiredcommenthashes.txt' ]
flaskport = 8700
app = Flask(__name__)
app.debug = True
# commentHashesAndComments = {}
def loginOAuthAndReturnRedditSession():
redditSession = praw.Reddit(user_agent='Test Script by /u/foobarbazblarg')
o = OAuth2Util.OAuth2Util(redditSession, print_log=True, configfile="../reddit-oauth-credentials.cfg")
# TODO: Testing comment of refresh. We authenticate fresh every time, so presumably no need to do o.refresh().
# o.refresh(force=True)
return redditSession
def getSubmissionForRedditSession(redditSession):
submission = redditSession.get_submission(submission_id=challengePageSubmissionId)
# submission.replace_more_comments(limit=None, threshold=0)
return submission
def retiredCommentHashes():
answer = []
for filename in activeCommentHashFiles:
with open(filename, "r") as commentHashFile:
# return commentHashFile.readlines()
answer += commentHashFile.read().splitlines()
return answer
@app.route('/interesting-and-problematic-users')
def interestingAndProblematicUsers():
# TODO: left off here.
# global commentHashesAndComments
global submission
# commentHashesAndComments = {}
stringio = StringIO()
stringio.write('<html>\n<head>\n</head>\n\n')
redditSession = loginOAuthAndReturnRedditSession()
unreadMessages = redditSession.get_unread(limit=None)
retiredHashes = retiredCommentHashes()
i = 1
stringio.write('<iframe name="invisibleiframe" style="display:none;"></iframe>\n')
stringio.write("<h3>")
stringio.write("my unread messages")
stringio.write("</h3>\n\n")
for unreadMessage in unreadMessages:
i += 1
commentHash = sha1()
if unreadMessage.__class__ == praw.objects.Comment:
# This next line takes 2 seconds. It must need to do an HTTPS transaction to get the permalink.
# Not much we can do about that, I guess.
# print int(round(time.time() * 1000))
commentHash.update(unreadMessage.permalink)
# print int(round(time.time() * 1000))
else:
commentHash.update(str(unreadMessage.author))
commentHash.update(unreadMessage.body.encode('utf-8'))
commentHash = commentHash.hexdigest()
if commentHash not in retiredHashes:
# commentHashesAndComments[commentHash] = unreadMessage
authorName = str(unreadMessage.author) # can be None if author was deleted. So check for that and skip if it's None.
# participant = ParticipantCollection().participantNamed(authorName)
stringio.write("<hr>\n")
stringio.write('<font color="blue"><b>')
stringio.write(authorName)
stringio.write('</b></font><br>')
if unreadMessage.__class__ == praw.objects.Comment:
stringio.write('<small><font color="gray">' + bleach.clean(unreadMessage.submission.title) + '</font></small><br>')
else:
stringio.write('<b>' + bleach.clean(unreadMessage.subject) + '</b><br>')
stringio.write(bleach.clean(markdown.markdown(unreadMessage.body.encode('utf-8')), tags=['p']))
stringio.write("\n<br><br>\n\n")
stringio.write('</html>')
pageString = stringio.getvalue()
stringio.close()
return Response(pageString, mimetype='text/html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=flaskport)
|
{
"content_hash": "0ada34e5b91b21d1476b29cba475bc4c",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 131,
"avg_line_length": 38.25454545454546,
"alnum_prop": 0.6739543726235742,
"repo_name": "foobarbazblarg/stayclean",
"id": "34e0488f3d3ca2998001a6aba57c0e308e211523",
"size": "4334",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "stayclean-2018/serve-posts-and-comments-from-interesting-users.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4232161"
},
{
"name": "Shell",
"bytes": "52056"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import AuthenticationForm
from django.http import HttpResponse
from django.http import JsonResponse
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_protect, ensure_csrf_cookie
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.http import require_POST, require_GET, require_safe
from rest_framework import generics, permissions
from authentication.serializers import UserSerializer
@require_safe
@ensure_csrf_cookie
def get_csrf_token(request):
return HttpResponse('')
@require_POST
@sensitive_post_parameters("password")
@csrf_protect
@never_cache
def login_view(request, authentication_form=AuthenticationForm):
form = authentication_form(request, data=request.POST)
if form.is_valid():
login(request, form.get_user())
return HttpResponse('')
if form.non_field_errors() is None:
errors = form.errors
else:
errors = {"global": "Username not recognized or password incorrect"}
return JsonResponse(errors, status=401)
@require_GET
def logout_view(request):
logout(request)
return HttpResponse("")
class UserRegistration(generics.CreateAPIView):
permission_classes = [
permissions.AllowAny # Or anonymous users can't register
]
serializer_class = UserSerializer
def create(self, request, *args, **kwargs):
response = super().create(request, *args, **kwargs)
user = authenticate(**request.data)
if user.is_active:
login(request, user)
return response
|
{
"content_hash": "73450ad38beeb4980a470229894d2cd9",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 80,
"avg_line_length": 28.116666666666667,
"alnum_prop": 0.7326615293420272,
"repo_name": "BenjaminSchubert/HttpInfrastructure",
"id": "75e5aacc6537de0be3a8a4b0e056bc80176a89d0",
"size": "1687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamic-server/src/httpInfrastructure/authentication/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2921"
},
{
"name": "HTML",
"bytes": "10127"
},
{
"name": "JavaScript",
"bytes": "1532"
},
{
"name": "Python",
"bytes": "13056"
},
{
"name": "Shell",
"bytes": "42"
},
{
"name": "TypeScript",
"bytes": "18729"
}
],
"symlink_target": ""
}
|
"""
This module contains AWS SNS hook
"""
import json
from airflow.contrib.hooks.aws_hook import AwsHook
def _get_message_attribute(o):
if isinstance(o, bytes):
return {'DataType': 'Binary', 'BinaryValue': o}
if isinstance(o, str):
return {'DataType': 'String', 'StringValue': o}
if isinstance(o, (int, float)):
return {'DataType': 'Number', 'StringValue': str(o)}
if hasattr(o, '__iter__'):
return {'DataType': 'String.Array', 'StringValue': json.dumps(o)}
raise TypeError('Values in MessageAttributes must be one of bytes, str, int, float, or iterable; '
'got {}'.format(type(o)))
class AwsSnsHook(AwsHook):
"""
Interact with Amazon Simple Notification Service.
"""
def __init__(self, *args, **kwargs):
self.conn = None
super(AwsSnsHook, self).__init__(*args, **kwargs)
def get_conn(self):
"""
Get an SNS connection
"""
self.conn = self.get_client_type('sns')
return self.conn
def publish_to_target(self, target_arn, message, subject=None, message_attributes=None):
"""
Publish a message to a topic or an endpoint.
:param target_arn: either a TopicArn or an EndpointArn
:type target_arn: str
:param message: the default message you want to send
:param message: str
:param subject: subject of message
:type subject: str
:param message_attributes: additional attributes to publish for message filtering. This should be
a flat dict; the DataType to be sent depends on the type of the value:
- bytes = Binary
- str = String
- int, float = Number
- iterable = String.Array
:type message_attributes: dict
"""
conn = self.get_conn()
publish_kwargs = {
'TargetArn': target_arn,
'MessageStructure': 'json',
'Message': json.dumps({
'default': message
}),
}
# Construct args this way because boto3 distinguishes from missing args and those set to None
if subject:
publish_kwargs['Subject'] = subject
if message_attributes:
publish_kwargs['MessageAttributes'] = {
key: _get_message_attribute(val) for key, val in message_attributes.items()
}
return conn.publish(**publish_kwargs)
|
{
"content_hash": "b407e0cb2a2896a5c7acbe07f78c6cd4",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 105,
"avg_line_length": 32.31578947368421,
"alnum_prop": 0.5850977198697068,
"repo_name": "owlabs/incubator-airflow",
"id": "ae6429356921df221d5ac7f2740768a1763f3bd6",
"size": "3268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "airflow/contrib/hooks/aws_sns_hook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "57045"
},
{
"name": "HTML",
"bytes": "147187"
},
{
"name": "JavaScript",
"bytes": "1370838"
},
{
"name": "Mako",
"bytes": "1037"
},
{
"name": "Python",
"bytes": "1647566"
},
{
"name": "Shell",
"bytes": "18823"
}
],
"symlink_target": ""
}
|
"""Tests for functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import sys
import time
import numpy as np
from tensorflow.core.framework import function_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import graph_to_function_def
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.framework.errors import InvalidArgumentError
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
def _OptimizerOptions():
for cse in [False, True]:
for inline in [False, True]:
for cfold in [False, True]:
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=cse,
do_function_inlining=inline,
do_constant_folding=cfold)))
if cse:
cfg.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
else:
cfg.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
if inline:
cfg.graph_options.rewrite_options.function_optimization = (
rewriter_config_pb2.RewriterConfig.ON)
else:
cfg.graph_options.rewrite_options.function_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
if cfold:
cfg.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.ON)
else:
cfg.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
yield cfg
class FunctionTest(test.TestCase):
"""Test methods for verifying Function support.
These test methods are used as mix-ins in two test cases: with
and without C API support.
"""
def testIdentity(self):
@function.Defun(dtypes.float32, func_name="MyIdentity")
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], self.evaluate(call))
@test_util.run_deprecated_v1
def testIdentityImplicitDeref(self):
@function.Defun(dtypes.float32, func_name="MyIdentity")
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
var = variables.VariableV1([18.0])
call = MyIdentityFunc(var._ref()) # pylint: disable=protected-access
self.assertEqual("MyIdentity", call.op.name)
for cfg in _OptimizerOptions():
with session.Session(config=cfg) as sess:
self.evaluate(var.initializer)
self.assertAllEqual([18.0], self.evaluate(call))
def testIdentityOutputName(self):
@function.Defun(
dtypes.float32, func_name="MyIdentity", out_names=["my_result_name"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
call = MyIdentityFunc([18.0])
self.assertEqual("MyIdentity", call.op.name)
with session.Session() as sess:
self.assertAllEqual([18.0], self.evaluate(call))
def testTooManyOutputNames(self):
@function.Defun(
dtypes.float32,
func_name="MyIdentity",
out_names=["my_result1", "my_result2"])
def MyIdentityFunc(a):
return a
with ops.Graph().as_default():
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
(r"output names must be either empty or equal in size to outputs. "
"output names size = 2 outputs size = 1")):
MyIdentityFunc([18.0])
def testDefineFunction2Args(self):
@function.Defun(dtypes.float32, dtypes.float32, func_name="APlus2B")
def APlus2B(a, b):
return a + b * 2
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], self.evaluate(call))
def testFunctionWithNoOutput(self):
@function.Defun(dtypes.float32, dtypes.float32)
def APlus2B(a, b):
c = a + b * 2 # Create some ops to have nodes in the body
print(c) # Using 'print' to make lint happy
with ops.Graph().as_default():
# Call function. There should be no exceptions.
APlus2B([1.0], [2.0])
def testDefineFunction2ArgsOutputName(self):
@function.Defun(
dtypes.float32,
dtypes.float32,
func_name="APlus2B",
out_names=["my_result_name"])
def APlus2B(a, b):
return a + b * 2
# APlus2B is stateless.
self.assertEqual([], APlus2B.stateful_ops)
with ops.Graph().as_default():
call = APlus2B([1.0], [2.0])
self.assertEqual("APlus2B", call.op.name)
with session.Session() as sess:
self.assertAllEqual([5.0], self.evaluate(call))
def testDefineFunctionDuplicateOutputs(self):
@function.Defun(dtypes.float32, func_name="Duplicate")
def Duplicate(a):
b = a + 1.0
return b, b
g = ops.Graph()
with g.as_default():
Duplicate([3.0])
func_sig = g.as_graph_def().library.function[0].signature
# The names given to both outputs should be different
# even though the same tensor is emitted to both.
out_names = [a.name for a in func_sig.output_arg]
self.assertEqual(2, len(out_names))
self.assertNotEqual(out_names[0], out_names[1])
def testGradientFunc(self):
@function.Defun(dtypes.float32, func_name="XSquarePlusOneFn")
def XSquarePlusOne(x):
return x * x + 1.0
@function.Defun(dtypes.float32, dtypes.float32)
def XSquarePlusOneGrad(x, dy):
dx = functional_ops.symbolic_gradient(
input=[x, dy], Tout=[dtypes.float32], f="XSquarePlusOneFn", name="dx")
return dx
g = ops.Graph()
with g.as_default():
call_f = XSquarePlusOne([2.0])
call_g = XSquarePlusOneGrad([2.0], [0.1])
with session.Session() as sess:
self.assertAllClose([5.0], self.evaluate(call_f))
self.assertAllClose([0.4], self.evaluate(call_g))
def testTanhSymGrad(self):
@function.Defun(dtypes.float32)
def Forward(x):
return math_ops.reduce_sum(math_ops.tanh(x))
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y = Forward(x)
dx = gradients_impl.gradients([y], [x])
inp = np.array([-1, 1, 2, -2], dtype=np.float32)
feed = {x: inp}
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True)))
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dx, feed)
self.assertAllClose(1 - np.square(np.tanh(inp)), out)
def testCustomGradient(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def XentLossGrad(logits, labels, dloss):
dlogits = array_ops.reshape(dloss, [-1, 1]) * (
nn_ops.softmax(logits) - labels)
dlabels = array_ops.zeros_like(labels)
# Takes exp(dlogits) to differentiate it from the "correct" gradient.
return math_ops.exp(dlogits), dlabels
@function.Defun(dtype, dtype, grad_func=XentLossGrad)
def XentLoss(logits, labels):
return math_ops.reduce_sum(labels * math_ops.log(nn_ops.softmax(logits)),
1)
g = ops.Graph()
with g.as_default():
logits = array_ops.placeholder(dtype)
labels = array_ops.placeholder(dtype)
loss = XentLoss(logits, labels)
dlogits = gradients_impl.gradients([loss], [logits])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
prob = np.exp(x) / np.sum(np.exp(x), 1, keepdims=1)
y = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
with session.Session(graph=g, config=cfg) as sess:
out, = sess.run(dlogits, {logits: x, labels: y})
self.assertAllClose(out, np.exp(prob - y))
def testCustomGradientError(self):
dtype = dtypes.float32
@function.Defun(dtype, dtype, dtype)
def Grad(x, dy, dz):
# Should have returned 1 result.
return x, dy + dz
@function.Defun(dtype, grad_func=Grad)
def Forward(x):
return x, x
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(dtype)
out = math_ops.add_n(Forward(inp))
dinp = gradients_impl.gradients(out, [inp])
x = np.random.uniform(-10., 10., size=(4, 9)).astype(np.float32)
with session.Session(graph=g) as sess:
with self.assertRaisesRegexp(
errors_impl.InvalidArgumentError,
"SymGrad expects to return 1.*but get 2.*instead"):
_ = sess.run(dinp, {inp: x})
def testSymGradShape(self):
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtypes.float32, [25, 4])
y = array_ops.placeholder(dtypes.float32, [200, 100])
dz = array_ops.placeholder(dtypes.float32, [1])
# We assume Foo is a function of (x, y) -> (z) Then, Foo's
# gradient function is (x, y, dz) -> (dx, dy). dx's shape
# should be the same as x's; and dy's shape should be the same
# as y's.
dx, dy = functional_ops.symbolic_gradient(
input=[x, y, dz], Tout=[dtypes.float32] * 2, f="Foo")
self.assertEqual(x.get_shape(), dx.get_shape())
self.assertEqual(y.get_shape(), dy.get_shape())
@test_util.run_deprecated_v1
def testSymGradAttr(self):
@function.Defun(noinline=True)
def Foo(x):
return x * 2
self.assertTrue(
Foo.instantiate([dtypes.float32]).definition.attr["_noinline"].b)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(3.0)
y = Foo(x)
dx, = gradients_impl.gradients(y, [x])
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
with self.session(graph=g, config=cfg):
self.assertAllClose(y.eval(), 6.)
self.assertAllClose(dx.eval(), 2.)
def _testZNoDepOnY(self, use_const_grad_ys):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y): # pylint: disable=unused-argument
return x * 2
with ops.Graph().as_default():
# z = Foo(x, y). z doe
x = constant_op.constant(1.0)
y = constant_op.constant(2.0)
z = Foo(x, y)
if use_const_grad_ys:
dx, dy = gradients_impl.gradients([z], [x, y], grad_ys=[1.0])
else:
dx, dy = gradients_impl.gradients([z], [x, y])
with session.Session() as sess:
dx_val, dy_val = self.evaluate([dx, dy])
self.assertEqual([2.0], dx_val)
self.assertEqual([0.0], dy_val)
def testZNoDepOnY(self):
self._testZNoDepOnY(False)
def testZNoDepOnYConstGradYs(self):
# Tests for constant folding of grad_ys
self._testZNoDepOnY(True)
def testDefineFunctionNoArgs(self):
@function.Defun(func_name="AConstant")
def AConstant():
return constant_op.constant([42])
with ops.Graph().as_default():
call = AConstant()
self.assertEqual("AConstant", call.op.name)
with session.Session() as sess:
self.assertAllEqual([42], self.evaluate(call))
def testDefineFunctionNames(self):
@function.Defun(dtypes.float32, func_name="Foo")
def Foo(a):
return a + 1
with ops.Graph().as_default():
call1 = Foo([1.0])
self.assertEqual("Foo", call1.op.name)
call2 = Foo([1.0])
self.assertEqual("Foo_1", call2.op.name)
# pylint: disable=unexpected-keyword-arg
call3 = Foo([1.0], name="mine")
self.assertEqual("mine", call3.op.name)
with ops.name_scope("my"):
call4 = Foo([1.0], name="precious")
self.assertEqual("my/precious", call4.op.name)
def testNoOp(self):
@function.Defun(dtypes.float32)
def Foo(x):
y = logging_ops.Print(x, [], "Hello")
with ops.control_dependencies([y]):
z = control_flow_ops.no_op()
with ops.control_dependencies([z]):
return x * 2
with ops.Graph().as_default(), self.cached_session():
z = Foo(constant_op.constant(3.0))
self.assertAllEqual(z.eval(), 6.0)
def testAssertOp(self):
@function.Defun(dtypes.float32)
def Foo(x):
check = gen_logging_ops._assert(math_ops.greater(x, 0), [x])
with ops.control_dependencies([check]):
return x * 2
# Foo contains a stateful op (Assert).
self.assertEqual([("Assert", "Assert")], Foo.stateful_ops)
g = ops.Graph()
with g.as_default(), self.cached_session():
self.assertAllEqual(Foo(constant_op.constant(3.0)).eval(), 6.0)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion failed.*-3"):
self.assertAllEqual(Foo(constant_op.constant(-3.0)).eval(), 6.0)
@test_util.run_deprecated_v1
def testAssertWrapper(self):
@function.Defun(dtypes.float32)
def MyFn(x):
with ops.control_dependencies(
[control_flow_ops.Assert(math_ops.less_equal(x, 10.0), [x])]):
return array_ops.identity(x)
with self.cached_session():
self.assertEqual(1.0, MyFn(1.0).eval())
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
_ = MyFn(100.0).eval()
@test_util.run_deprecated_v1
def testWhileLoopCallsFunc(self):
with self.session(use_gpu=True) as sess:
@function.Defun(dtypes.float32)
def Times2(x):
constant_two = constant_op.constant(2, dtypes.int32)
two_on_gpu = math_ops.cast(constant_two, dtypes.float32)
return x * two_on_gpu
def Body(x):
x2 = Times2(x)
x2.set_shape([])
return x2
loop = control_flow_ops.while_loop(lambda x: x < 1e5, Body, [1.0])
ans = self.evaluate(loop)
self.assertAllClose(ans, 131072.)
@test_util.run_deprecated_v1
def testControlFlowStrictness(self):
"""Inlined functions must not execute in a untaken control flow branch."""
@function.Defun(dtypes.int32)
def AssertFail(x):
# Assertion that always fails and does not have a data dependency on `x`.
assert_false = control_flow_ops.Assert(False, [42])
with ops.control_dependencies([assert_false]):
return array_ops.identity(x)
with ops.device("CPU"):
pred = array_ops.placeholder(dtypes.bool)
x = array_ops.placeholder(dtypes.int32)
cond = control_flow_ops.cond(pred, lambda: x + 1, lambda: AssertFail(x))
# pylint: disable=unnecessary-lambda
loop = control_flow_ops.while_loop(lambda y: pred,
lambda y: AssertFail(y), [x])
# pylint: enable=unnecessary-lambda
rewriter_config = rewriter_config_pb2.RewriterConfig(
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
# Enables inlining.
config = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True),
rewrite_options=rewriter_config))
with session.Session(config=config) as sess:
# Since the 'False' branch is not taken, the assertion should not fire.
self.assertEqual(4, sess.run(cond, {pred: True, x: 3}))
# The assertion should still fire if the False branch is taken.
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
sess.run(cond, {pred: False, x: 3})
# Similarly for loops.
self.assertEqual(3, sess.run(loop, {pred: False, x: 3}))
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"assertion"):
sess.run(loop, {pred: True, x: 3})
@test_util.run_deprecated_v1
def testVar(self):
@function.Defun(dtypes.float32)
def Foo(x):
return x * x + 1
g = ops.Graph()
with g.as_default():
v = variables.Variable(constant_op.constant(10.0))
z = Foo(v)
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(z.eval(), 101.)
@test_util.run_deprecated_v1
def testResourceVarAsImplicitInput(self):
g = ops.Graph()
with g.as_default(), ops.device("cpu:0"):
expected_type = dtypes.float32
expected_shape = tensor_shape.TensorShape((4, 4))
v = variable_scope.get_variable(
"var", expected_shape, expected_type, use_resource=True)
@function.Defun()
def Foo():
captured = array_ops.identity(v)
self.assertEqual(expected_type, captured.dtype)
self.assertEqual(expected_shape, captured.shape)
return captured, array_ops.shape(captured)
expected_val = v.value()
actual_val, actual_shape = Foo()
with self.session(graph=g):
v.initializer.run()
self.assertAllEqual(expected_val.eval(), self.evaluate(actual_val))
self.assertAllEqual(expected_shape, self.evaluate(actual_shape))
def testDefineErrors(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "can not return None"):
@function.Defun()
def TwoNone():
return None, None
_ = TwoNone.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def DefaultArg(unused_a=12):
return constant_op.constant([1])
_ = DefaultArg.definition
with self.assertRaisesRegexp(ValueError, "are not supported"):
@function.Defun()
def KwArgs(**unused_kwargs):
return constant_op.constant([1])
_ = KwArgs.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(dtypes.float32)
def PlusMinusV2(a, b):
return a + b, b - a
_ = PlusMinusV2.definition
with self.assertRaisesRegexp(ValueError, "specified input types"):
@function.Defun(dtypes.float32, dtypes.float32, dtypes.float32)
def PlusMinusV3(a, b):
return a + b, b - a
_ = PlusMinusV3.definition
def testCallErrors(self):
@function.Defun()
def Const():
return constant_op.constant(1)
@function.Defun(dtypes.int32)
def PlusOne(a):
return a + 1
@function.Defun(dtypes.int32, dtypes.int32)
def PlusMinus(a, b):
return a + b, b - a
with ops.Graph().as_default():
_ = Const()
# pylint: disable=too-many-function-args
# pylint: disable=unexpected-keyword-arg
# pylint: disable=no-value-for-parameter
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1)
with self.assertRaisesRegexp(ValueError, "arguments: 0"):
_ = Const(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne()
_ = PlusOne(1)
with self.assertRaisesRegexp(ValueError, "arguments: 1"):
_ = PlusOne(1, 2)
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus()
with self.assertRaisesRegexp(ValueError, "arguments: 2"):
_ = PlusMinus(1)
_ = PlusMinus(1, 2)
_ = PlusOne(1, name="p1")
with self.assertRaisesRegexp(ValueError, "Unknown keyword arguments"):
_ = PlusOne(1, device="/device:GPU:0")
def testFunctionDecorator(self):
@function.Defun(dtypes.float32, func_name="Minus1")
def Minus1(b):
return b - 1.0
with ops.Graph().as_default():
call1 = Minus1([2.])
self.assertTrue(isinstance(Minus1, function._DefinedFunction))
self.assertEqual(Minus1.name, "Minus1")
# pylint: disable=unexpected-keyword-arg
call2 = Minus1(call1, name="next")
# pylint: enable=unexpected-keyword-arg
self.assertEqual("next", call2.op.name)
with session.Session() as sess:
self.assertAllEqual([1], self.evaluate(call1))
self.assertAllEqual([0], self.evaluate(call2))
def testNestedFunction(self):
@function.Defun(dtypes.float32)
def Cube(x):
return x * x * x
@function.Defun(dtypes.float32, dtypes.float32)
def CubeXPlusY(x, y):
return Cube(x) + y
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.cached_session():
self.assertAllEqual(z.eval(), 25.0)
def testNestedDefinedFunction(self):
@function.Defun(dtypes.float32, dtypes.float32)
def CubeXPlusY(x, y):
@function.Defun(dtypes.float32)
def Cube(x):
return x * x * x
return Cube(x) + y
with ops.Graph().as_default():
z = CubeXPlusY(3.0, -2.0)
with self.cached_session():
self.assertAllEqual(z.eval(), 25.0)
def testUnusedFunction(self):
invoked = False
# pylint: disable=unused-variable
@function.Defun()
def Unused():
invoked = True
return constant_op.constant(42.)
self.assertFalse(invoked)
g = ops.Graph()
with g.as_default():
@function.Defun()
def Unused2():
invoked = True
return constant_op.constant(7.)
constant_op.constant(3.)
# pylint: enable=unused-variable
self.assertFalse(invoked)
gdef = g.as_graph_def()
self.assertEqual(0, len(gdef.library.function))
@test_util.run_deprecated_v1
def testReduction(self):
g = ops.Graph()
# BN0 is computing batch normed matrix along rows.
def BN0(x):
mean = math_ops.reduce_mean(x, [0])
var = math_ops.reduce_mean(math_ops.square(x - mean)) # biased var
rstd = math_ops.rsqrt(var + 1e-8)
return (x - mean) * rstd
# Wraps BatchNorm in a tf function.
@function.Defun(dtypes.float32)
def BN1(x):
return BN0(x)
with g.as_default():
x = array_ops.placeholder(dtypes.float32)
y0 = BN0(x) # A plain graph
y1 = BN1(x) # A tf function
dx0, = gradients_impl.gradients([y0], [x])
dx1, = gradients_impl.gradients([y1], [x])
# Both should produce the same result and gradient.
with self.session(graph=g) as sess:
vals = sess.run([y0, y1, dx0, dx1], {x: np.random.uniform(size=(3, 7))})
self.assertAllClose(vals[0], vals[1])
self.assertAllClose(vals[2], vals[3])
@test_util.run_deprecated_v1
def testCapture(self):
g = ops.Graph()
with g.as_default():
w = variables.Variable(constant_op.constant([[1.0]]))
b = variables.Variable(constant_op.constant([2.0]))
# Foo() captures w and b.
@function.Defun(dtypes.float32)
def Foo(x):
# Plus() captures b.
@function.Defun(dtypes.float32)
def Plus(y):
return y + b
return Plus(math_ops.matmul(w, x))
y = Foo(constant_op.constant([[10.]]))
@function.Defun()
def Bar():
return w
z = Bar()
with self.session(graph=g):
variables.global_variables_initializer().run()
self.assertAllEqual(y.eval(), [[12.0]])
self.assertAllEqual(z.eval(), [[1.0]])
def testCaptureControls(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant([10.0])
x = logging_ops.Print(x, [x], "outer")
@function.Defun(dtypes.float32)
def Foo(y):
with ops.control_dependencies([x]):
y = logging_ops.Print(y, [y], "inner")
return y
with self.assertRaisesRegexp(ValueError, "not an element of this graph."):
# NOTE: We still do not support capturing control deps.
_ = Foo(x)
@test_util.run_deprecated_v1
def testCaptureInWhileLoop(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
@function.Defun()
def Foo():
return control_flow_ops.while_loop(lambda i: i < 10, lambda i: i + x,
[0])
y = Foo()
with self.session(graph=g) as sess:
self.assertEqual(self.evaluate(y), 10)
@test_util.run_deprecated_v1
def testCaptureInCond(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
@function.Defun(dtypes.bool)
def Foo(pred):
return control_flow_ops.cond(pred, lambda: x, lambda: x + 1)
y = Foo(True)
z = Foo(False)
with self.session(graph=g) as sess:
self.assertEqual(self.evaluate(y), 1)
self.assertEqual(self.evaluate(z), 2)
def testStableName(self):
@function.Defun()
def Foo(x, y, z):
return math_ops.tanh(math_ops.matmul(x, y) + z)
if sys.byteorder == "big":
self.assertEqual("Foo_kEdkAG8SJvg",
Foo.instantiate([dtypes.float32] * 3).name)
else:
self.assertEqual("Foo_aCYSbwBkR5A",
Foo.instantiate([dtypes.float32] * 3).name)
@test_util.run_deprecated_v1
def testSignatureHash(self):
# Foo.Inner and Bar.Inner have identical function body but have
# different signatures. They should be treated as two different functions.
@function.Defun()
def Foo(x):
@function.Defun()
def Inner(x):
return x + 10.
return Inner(x)
@function.Defun()
def Bar(x):
@function.Defun()
def Inner(x, unused_y, unused_z):
return x + 10.
return Inner(x, 2., 3.)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(10.0)
y = Foo(x)
z = Bar(x)
with self.session(graph=g) as sess:
v0, v1 = self.evaluate([y, z])
self.assertAllEqual(v0, 20.)
self.assertAllEqual(v1, 20.)
def testShapeFunction(self):
@function.Defun(
dtypes.float32, shape_func=lambda op: [op.inputs[0].get_shape()])
def Foo(x):
return x + 1.0
@function.Defun(
shape_func=lambda op: [[1] + op.inputs[0].get_shape().as_list()])
def Bar(x):
return array_ops.stack([x])
g = ops.Graph()
with g.as_default():
x = Foo([1.0, 2.0])
self.assertEqual(x.get_shape().as_list(), [2])
y = Bar(array_ops.zeros([1, 2, 3]))
self.assertAllEqual(y.get_shape().as_list(), [1, 1, 2, 3])
@test_util.run_deprecated_v1
def testVariableReuse(self):
def LinearWithReuse(input_tensor, reuse=None):
size = input_tensor.shape.dims[1]
with variable_scope.variable_scope("linear", reuse=reuse):
w = variable_scope.get_variable(
"w", shape=[size, size], dtype=input_tensor.dtype)
return math_ops.matmul(input_tensor, w)
@function.Defun(dtypes.float32)
def Foo(inputs):
inputs = array_ops.reshape(inputs, [32, 100])
hidden = LinearWithReuse(inputs)
return LinearWithReuse(hidden, reuse=True)
input_op = array_ops.placeholder(shape=[32, 100], dtype=dtypes.float32)
output_op = Foo(input_op)
global_vars = variables.global_variables()
self.assertEqual(len(global_vars), 1)
self.assertEqual(global_vars[0].name, "linear/w:0")
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
output_val = sess.run(
output_op, feed_dict={input_op: np.random.rand(32, 100)})
self.assertEqual(output_val.shape, (32, 100))
@test_util.run_deprecated_v1
def testFunctionCallInDifferentVariableScopes(self):
@function.Defun(dtypes.float32)
def Foo(inputs):
var = variable_scope.get_variable(
"var",
shape=[10],
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
return inputs + var
input_op = array_ops.placeholder(shape=[10], dtype=dtypes.float32)
with variable_scope.variable_scope("vs1"):
out1_op = Foo(input_op)
with variable_scope.variable_scope("vs2"):
out2_op = Foo(input_op)
global_vars = variables.global_variables()
self.assertEqual(len(global_vars), 1)
self.assertEqual(global_vars[0].name, "vs1/var:0")
with session.Session() as sess:
self.evaluate(variables.global_variables_initializer())
out1, out2 = sess.run(
[out1_op, out2_op], feed_dict={input_op: np.linspace(1, 10, 10)})
self.assertAllEqual(out1, np.linspace(2, 11, 10))
self.assertAllEqual(out2, np.linspace(2, 11, 10))
def testTwoInputsSameOp(self):
g = ops.Graph()
with g.as_default():
m = array_ops.placeholder(dtypes.float32)
s, u, v = linalg_ops.svd(m)
ss = math_ops.reduce_sum(s)
uu = math_ops.reduce_sum(u)
vv = math_ops.reduce_sum(v)
result = ss + uu + vv
f = graph_to_function_def.graph_to_function_def(
g,
g.get_operations()[1:], # skip the placeholder
[s, u, v],
[result])
self.assertEqual(len(f.signature.input_arg), 3)
def testGradientWithIntegerFunctionArgument(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(t, x):
return x[t]
g = ops.Graph()
with g.as_default():
inp = array_ops.placeholder(dtypes.float32)
t = constant_op.constant(0, dtypes.int32)
out = Foo(t, inp)
dinp, = gradients_impl.gradients(out, [inp])
x = np.zeros((2,)).astype(np.float32)
with session.Session(graph=g) as sess:
self.assertAllClose(
np.array([1.0, 0.0]).astype(np.float32), sess.run(dinp, {inp: x}))
@test_util.run_deprecated_v1
def testFunctionMarkedStateful(self):
@function.Defun(dtypes.int32, dtypes.float32)
def Foo(t, x):
return x[t]
@function.Defun(dtypes.int64)
def Bar(x):
return x
# NOTE(mrry): All functions are currently considered stateless by the
# runtime, so we simulate a "stateful" function.
# TODO(b/70565970): Remove this hack when we are able to build stateful
# functions using the API.
# pylint: disable=protected-access
Foo._signature.is_stateful = True
Bar._signature.is_stateful = True
# pylint: enable=protected-access
result_1 = Foo(3, [1.0, 2.0, 3.0, 4.0])
result_2 = Bar(constant_op.constant(100, dtype=dtypes.int64))
with session.Session() as sess:
self.assertEqual(4.0, self.evaluate(result_1))
self.assertEqual(100, self.evaluate(result_2))
self.assertEqual((4.0, 100), sess.run((result_1, result_2)))
@test_util.run_deprecated_v1
def testStatefulFunction(self):
@function.Defun()
def FunctionWithStatelessOp():
return constant_op.constant(42.0)
@function.Defun()
def FunctionWithStatefulOp():
return random_ops.random_uniform([100], maxval=10, dtype=dtypes.int32)
@function.Defun()
def FunctionWithStatelessFunctionCall():
return FunctionWithStatelessOp()
@function.Defun()
def FunctionWithStatefulFunctionCall():
return FunctionWithStatefulOp()
# Test that the `is_stateful` bit is propagated.
self.assertFalse(FunctionWithStatelessOp.definition.signature.is_stateful)
self.assertTrue(FunctionWithStatefulOp.definition.signature.is_stateful)
self.assertFalse(
FunctionWithStatelessFunctionCall.definition.signature.is_stateful)
self.assertTrue(
FunctionWithStatefulFunctionCall.definition.signature.is_stateful)
# Ensure that two invocations of the same random-number-generating
# function produce different results.
result1 = FunctionWithStatefulFunctionCall()
result2 = FunctionWithStatefulFunctionCall()
# Statefulness affects how the function is treated by the various
# optimization passes, so run the test in each optimizer
# configuration.
for config in _OptimizerOptions():
with session.Session(config=config) as sess:
val1, val2 = sess.run((result1, result2))
self.assertFalse(all(val1 == val2))
val3, val4 = sess.run((result1, result2))
self.assertFalse(all(val3 == val1))
self.assertFalse(all(val4 == val2))
def testStatefulFunctionWithWhitelisting(self):
t = random_ops.random_uniform([100], maxval=10, dtype=dtypes.int32)
@function.Defun(capture_by_value=True)
def StatefulFn():
return t + constant_op.constant(3, dtype=dtypes.int32)
# First time we try to capture a stateful RandomUniform op.
with self.assertRaisesRegexp(ValueError, "Cannot capture a stateful node"):
res = StatefulFn()
# This time we whitelist this op, so that its recreated.
@function.Defun(capture_by_value=True, whitelisted_stateful_ops=set([t.op]))
def StatefulFn2():
return t + constant_op.constant(3, dtype=dtypes.int32)
res = StatefulFn2()
with session.Session() as sess:
r = sess.run(res)
for i in r:
self.assertGreaterEqual(i, 3)
@test_util.run_deprecated_v1
def testSameFunctionOnTwoDevices(self):
@function.Defun(dtypes.float32)
def AddOne(x):
return x + 1.0
with ops.device("/cpu:0"):
f_0 = AddOne(41.0)
with ops.device("/cpu:1"):
f_1 = AddOne(43.0)
for config in _OptimizerOptions():
config.device_count["CPU"] = 2
with session.Session(config=config) as sess:
self.assertEqual(42.0, self.evaluate(f_0))
self.assertEqual(44.0, self.evaluate(f_1))
self.assertEqual((42.0, 44.0), sess.run((f_0, f_1)))
@test_util.run_deprecated_v1
def testGuaranteedConstsAreCaptured(self):
var = variables.Variable(1.0)
const = array_ops.guarantee_const(var)
also_const = array_ops.identity(const)
still_const = array_ops.identity(also_const)
not_const = still_const + var
also_not_const = array_ops.placeholder(dtypes.float32)
@function.Defun()
def CapturesGuaranteedConst():
output = const + also_const + still_const + not_const + also_not_const
first, second, third, fourth, fifth = function.get_extra_args()
self.assertEqual("GuaranteeConst", first.consumers()[0].node_def.op)
self.assertEqual("GuaranteeConst", second.consumers()[0].node_def.op)
self.assertEqual("GuaranteeConst", third.consumers()[0].node_def.op)
self.assertNotEqual("GuaranteeConst", fourth.consumers()[0].node_def.op)
self.assertNotEqual("GuaranteeConst", fifth.consumers()[0].node_def.op)
return output
with self.session(use_gpu=False) as sess:
self.evaluate(var.initializer)
_ = sess.run(CapturesGuaranteedConst(), {also_not_const: 1.0})
@test_util.run_deprecated_v1
def testSameFunctionDifferentGrads(self):
def PartOne(x):
# Default grad is dx = dy * 2
@function.Defun(dtypes.float32)
def Foo(x):
return x * 2
return Foo(x)
def PartTwo(x):
@function.Defun(dtypes.float32, dtypes.float32)
def Bar(x, dy):
return x + dy # crazy backprop
@function.Defun(dtypes.float32, grad_func=Bar)
def Foo(x):
return x * 2
return Foo(x)
def PartThree(x):
def Bar(op, dy):
return op.inputs[0] * dy / 2 # crazy backprop
@function.Defun(dtypes.float32, python_grad_func=Bar)
def Foo(x):
return x * 2
return Foo(x)
g = ops.Graph()
with g.as_default():
x = constant_op.constant(100.)
x0 = x
y0 = PartOne(x0)
dx0, = gradients_impl.gradients(ys=[y0], xs=[x0])
x1 = x
y1 = PartTwo(x1)
dx1, = gradients_impl.gradients(ys=[y1], xs=[x1])
x2 = x
y2 = PartThree(x2)
dx2, = gradients_impl.gradients(ys=[y2], xs=[x2])
with self.session(graph=g) as sess:
v0, v1, v2 = self.evaluate([dx0, dx1, dx2])
self.assertAllEqual(v0, 2.)
self.assertAllEqual(v1, 101.)
self.assertAllEqual(v2, 50.)
class FunctionsFromProtos(test.TestCase):
def expectFunctionsEqual(self, func, grad_func=None, new_func=None):
if new_func is None:
# Make a copy of func.definition to avoid any bugs masked by using the
# same object
serialized_fdef = func.definition.SerializeToString()
# Serialize and then deserialize `func` to create `new_func`
fdef = function_pb2.FunctionDef.FromString(serialized_fdef)
new_func = function._from_definition(fdef, grad_func=grad_func)
self.assertEqual(func.name, new_func.name)
self.assertEqual(func.definition, new_func.definition)
self.assertEqual(func.grad_func_name, new_func.grad_func_name)
self.assertEqual(func.declared_input_types, new_func.declared_input_types)
self.assertEqual(func.captured_inputs, new_func.captured_inputs)
@test_util.run_deprecated_v1
def testBasic(self):
@function.Defun(dtypes.float32, dtypes.float32)
def Foo(x, y):
return x + y
self.expectFunctionsEqual(Foo)
def testGradFunc(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G(x, dy):
return x * dy
@function.Defun(dtypes.float32, grad_func=G)
def F(x):
return math_ops.exp(x) - math_ops.exp(-x)
self.expectFunctionsEqual(F, grad_func=G)
def testCapturedInputs(self):
c = constant_op.constant(10, dtypes.int64)
@function.Defun(dtypes.int64)
def Foo(x):
return x + c
new_func = function._from_definition(Foo.definition)
self.assertEqual(Foo.name, new_func.name)
self.assertEqual(Foo.definition, new_func.definition)
self.assertEqual(Foo.grad_func_name, new_func.grad_func_name)
# Captured inputs are added as regular inputs to the function definition
self.assertEqual(new_func.declared_input_types,
Foo.declared_input_types + (dtypes.int64,))
self.assertEqual(len(new_func.captured_inputs), 0)
def testNestedFunctions(self):
@function.Defun(dtypes.float32)
def Outer(x):
@function.Defun(dtypes.float32)
def Inner(y):
return y + 1
return Inner(Inner(x))
self.expectFunctionsEqual(Outer)
def testFromLibrary(self):
# Define some functions with different gradient functions. Note that many of
# the below functions are identical since function bodies don't matter for
# this test.
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32, dtypes.float32)
def G2(x, dy):
return x * dy
# F1 and F2 have the same gradient function
@function.Defun(dtypes.float32, grad_func=G1)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32, grad_func=G1)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F3 has a different gradient function
@function.Defun(dtypes.float32, grad_func=G2)
def F3(x):
return math_ops.exp(x) - math_ops.exp(-x)
# F4 has no gradient function
@function.Defun(dtypes.float32)
def F4(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Instantiate all functions
g = ops.Graph()
with g.as_default():
c = constant_op.constant(1.0, dtypes.float32)
f1 = F1(c)
f2 = F2(c)
f3 = F3(c)
f4 = F4(c)
gradients_impl.gradients([f1, f2, f3, f4], c)
library = g.as_graph_def().library
new_funcs = function.from_library(library)
def CheckNewFunc(func):
new_func = [f for f in new_funcs if f.name == func.name]
self.assertEqual(len(new_func), 1)
self.expectFunctionsEqual(func, new_func=new_func[0])
CheckNewFunc(G1)
CheckNewFunc(G2)
CheckNewFunc(F1)
CheckNewFunc(F2)
CheckNewFunc(F3)
CheckNewFunc(F4)
def testFromLibraryEmptyLib(self):
library = function_pb2.FunctionDefLibrary()
self.assertEqual(len(function.from_library(library)), 0)
def testFromLibraryMissingFuncDef(self):
@function.Defun(dtypes.float32, dtypes.float32)
def G1(x, dy):
return x * dy
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
gradient = function_pb2.GradientDef()
gradient.function_name = F1.name
gradient.gradient_func = G1.name
# Create invalid function def that is missing G1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([F1.definition])
with self.assertRaisesRegexp(
ValueError,
"FunctionDefLibrary missing 'G1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function.from_library(library)
# Create invalid function def that is missing F1 function def
library = function_pb2.FunctionDefLibrary()
library.gradient.extend([gradient])
library.function.extend([G1.definition])
with self.assertRaisesRegexp(
ValueError,
"FunctionDefLibrary missing 'F1_[0-9a-zA-Z]{8,11}' FunctionDef"):
function.from_library(library)
def testFromLibraryCyclicGradFuncs(self):
@function.Defun(dtypes.float32)
def F1(x):
return math_ops.exp(x) - math_ops.exp(-x)
@function.Defun(dtypes.float32)
def F2(x):
return math_ops.exp(x) - math_ops.exp(-x)
# Create invalid function def library where F1 has gradient function F2 and
# F2 has gradient function F1
library = function_pb2.FunctionDefLibrary()
library.function.extend([F1.definition, F2.definition])
gradient1 = function_pb2.GradientDef()
gradient1.function_name = F1.name
gradient1.gradient_func = F2.name
gradient2 = function_pb2.GradientDef()
gradient2.function_name = F2.name
gradient2.gradient_func = F1.name
library.gradient.extend([gradient1, gradient2])
with self.assertRaisesRegexp(
ValueError, "FunctionDefLibrary contains cyclic gradient functions!"):
function.from_library(library)
def testExperimentalAttrs(self):
@function.Defun(dtypes.int32, experimental_tag="tag_value")
def FunctionWithStrAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=123)
def FunctionWithIntAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=123.0)
def FunctionWithFloatAttr(i):
return array_ops.identity(i)
@function.Defun(dtypes.int32, experimental_tag=True)
def FunctionWithBoolAttr(i):
return array_ops.identity(i)
self.assertTrue("experimental_tag" in FunctionWithStrAttr.definition.attr)
self.assertEqual(FunctionWithStrAttr.definition.attr["experimental_tag"].s,
b"tag_value")
self.assertTrue("experimental_tag" in FunctionWithIntAttr.definition.attr)
self.assertEqual(FunctionWithIntAttr.definition.attr["experimental_tag"].i,
123)
self.assertTrue("experimental_tag" in FunctionWithFloatAttr.definition.attr)
self.assertEqual(
FunctionWithFloatAttr.definition.attr["experimental_tag"].f, 123.0)
self.assertTrue("experimental_tag" in FunctionWithBoolAttr.definition.attr)
self.assertEqual(FunctionWithBoolAttr.definition.attr["experimental_tag"].b,
True)
class FunctionOverloadTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
@function.Defun()
def Sinh(x):
return 1 / 2. * (math_ops.exp(x) - math_ops.exp(-x))
g = ops.Graph()
with g.as_default():
x = Sinh(constant_op.constant(0.25, dtypes.float32))
y = Sinh(constant_op.constant(0.25, dtypes.float64))
with self.session(graph=g):
self.assertAllClose(x.eval(), np.sinh(0.25))
self.assertAllClose(y.eval(), np.sinh(0.25))
def testGradient(self):
@function.Defun(func_name="Spec")
def G(x, dy):
return x * dy
@function.Defun(grad_func=G)
def F(x):
return math_ops.exp(x) - math_ops.exp(-x)
for dtype in [dtypes.float32, dtypes.float64]:
g = ops.Graph()
with g.as_default():
x = constant_op.constant(0.25, dtype)
y = F(x)
dx, = gradients_impl.gradients(y, x)
with self.session(graph=g):
self.assertAllClose(dx.eval(), 0.25)
def testDocString(self):
@function.Defun()
def Foo(x):
"""Successor of x."""
return x + 1
g = ops.Graph()
with g.as_default():
_ = Foo(1)
self.assertEqual(g.as_graph_def().library.function[0].signature.description,
"Successor of x.")
class FunctionCaptureByValueTest(test.TestCase):
@test_util.run_deprecated_v1
def testCaptureByValue(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant([[1.0]])
b = constant_op.constant([2.0])
# Foo() captures w and b.
@function.Defun(dtypes.float32, capture_by_value=True)
def Foo(x):
# Plus() captures b.
@function.Defun(dtypes.float32, capture_by_value=True)
def Plus(y):
return y + b
self.assertEqual(0, len(Plus.captured_inputs))
return Plus(math_ops.matmul(w, x))
y = Foo(constant_op.constant([[10.]]))
self.assertEqual(0, len(Foo.captured_inputs))
with self.session(graph=g):
self.assertAllEqual(y.eval(), [[12.0]])
class UnrollLSTMTest(test.TestCase):
BATCH_SIZE = 16
LSTM_DIMS = 32
NUM_UNROLL = 20
def _Weights(self):
dims = self.LSTM_DIMS
return random_ops.random_uniform([2 * dims, 4 * dims], -1, 1, seed=123456)
def _Input(self):
return random_ops.random_uniform(
[self.NUM_UNROLL, self.BATCH_SIZE, self.LSTM_DIMS], seed=654321)
# Helper to construct a LSTM cell graph.
@classmethod
def LSTMCell(cls, x, mprev, cprev, weights):
xm = array_ops.concat([x, mprev], 1)
i_i, i_g, f_g, o_g = array_ops.split(
value=math_ops.matmul(xm, weights), num_or_size_splits=4, axis=1)
new_c = math_ops.sigmoid(f_g) * cprev + math_ops.sigmoid(
i_g) * math_ops.tanh(i_i)
new_c = math_ops.maximum(math_ops.minimum(new_c, 50.0), -50.0)
new_m = math_ops.sigmoid(o_g) * math_ops.tanh(new_c)
return new_m, new_c
def _BuildForward(self, weights, inp, mode="cell"):
def Loop(cell, w, i):
x = array_ops.unstack(i, self.NUM_UNROLL)
m = array_ops.zeros_like(x[0])
c = array_ops.zeros_like(x[0])
for i in range(self.NUM_UNROLL):
m, c = cell(x[i], m, c, w)
return m
cell = UnrollLSTMTest.LSTMCell
if mode == "complete":
# Constructs the complete graph in python.
return Loop(cell, weights, inp)
cell = function.Defun(dtypes.float32, dtypes.float32, dtypes.float32,
dtypes.float32)(
cell)
if mode == "cell":
# Just represent the LSTM as a function.
return Loop(cell, weights, inp)
if mode == "loop":
# Wraps the whole loop as a function.
@function.Defun(dtypes.float32, dtypes.float32)
def LSTMLoop(w, i):
return Loop(cell, w, i)
return LSTMLoop(weights, inp)
if mode == "loop10":
# Wraps 10 lstm steps into one function, and the whole loop
# into another calling the formers.
# Groups 10 steps at a time.
@function.Defun(dtypes.float32, dtypes.float32, dtypes.float32,
*([dtypes.float32] * 10))
def Loop10(w, m, c, *args):
for x in args:
m, c = cell(x, m, c, w)
return m, c
@function.Defun(dtypes.float32, dtypes.float32)
def LSTMLoop10(weights, inp):
x = array_ops.unstack(inp, self.NUM_UNROLL)
m = array_ops.zeros_like(x[0])
c = array_ops.zeros_like(x[0])
assert self.NUM_UNROLL % 10 == 0
for i in range(0, self.NUM_UNROLL, 10):
m, c = Loop10(weights, m, c, *x[i:i + 10])
return m
return LSTMLoop10(weights, inp)
def testUnrollLSTM(self):
# Run one step of the unrolled lstm graph.
def RunForward(mode, cfg=None):
tf_logging.info("mode = %s", mode)
g = ops.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
gdef = g.as_graph_def()
finish = time.time()
tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start,
len(str(gdef)), len(gdef.SerializeToString()))
with g.as_default(), session.Session(config=cfg) as sess:
return self.evaluate(m)
mv0 = RunForward("complete")
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
mv1 = RunForward("cell", cfg)
mv2 = RunForward("loop", cfg)
mv3 = RunForward("loop10", cfg)
self.assertAllClose(mv0, mv1, rtol=1e-4)
self.assertAllClose(mv0, mv2, rtol=1e-4)
self.assertAllClose(mv0, mv3, rtol=1e-4)
def testUnrollLSTMGrad(self):
# Run one step of the unrolled lstm graph.
def RunForwardBackward(mode, cfg=None):
tf_logging.info("mode = %s", mode)
g = ops.Graph()
start = time.time()
with g.as_default():
weights = self._Weights()
inp = self._Input()
m = self._BuildForward(weights, inp, mode)
loss = math_ops.reduce_sum(math_ops.square(m))
dw = gradients_impl.gradients([loss], [weights])
gdef = g.as_graph_def()
finish = time.time()
tf_logging.info("time: %f txt size: %d gdef bin size: %d", finish - start,
len(str(gdef)), len(gdef.SerializeToString()))
with g.as_default(), session.Session(config=cfg) as sess:
return self.evaluate(dw)
d0 = RunForwardBackward("complete")
for cfg in _OptimizerOptions():
tf_logging.info("cfg = %s", cfg)
d1 = RunForwardBackward("cell", cfg)
d2 = RunForwardBackward("loop", cfg)
d3 = RunForwardBackward("loop10", cfg)
self.assertAllClose(d0, d1, rtol=1e-4, atol=1e-4)
self.assertAllClose(d0, d2, rtol=1e-4, atol=1e-4)
self.assertAllClose(d0, d3, rtol=1e-4, atol=1e-4)
class FunctionInlineControlTest(test.TestCase):
def testFoo(self):
dtype = dtypes.float32
cfg = config_pb2.ConfigProto(
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0,
do_common_subexpression_elimination=True,
do_function_inlining=True,
do_constant_folding=True)))
cell_func_call_pattern = re.compile(r"Cell[^/]*\(")
for noinline in [False, True]:
@function.Defun(dtype, noinline=noinline)
def Cell(v):
# If v is a vector [n, 1], x is a big square matrix.
x = math_ops.tanh(v + array_ops.transpose(v, [1, 0]))
return math_ops.reduce_sum(x, 1, keepdims=True)
@function.Defun(dtype)
def Forward(x):
for _ in range(10):
# pylint: disable=cell-var-from-loop
x = Cell(x)
return math_ops.reduce_sum(x, [0, 1])
self.assertEqual(noinline, Cell.definition.attr["_noinline"].b)
g = ops.Graph()
with g.as_default():
x = array_ops.placeholder(dtype)
y = Forward(x)
dx, = gradients_impl.gradients([y], [x])
np.random.seed(321)
inp = np.random.uniform(-1, 1, [16, 1]).astype(np.float32)
run_metadata = config_pb2.RunMetadata()
with session.Session(graph=g, config=cfg) as sess:
ans = sess.run(
[y, dx], {x: inp},
run_metadata=run_metadata,
options=config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE))
print(ans[0], np.sum(ans[1]))
self.assertAllClose(ans[0], 255.971, rtol=1e-3)
self.assertAllClose(np.sum(ans[1]), 13.0408, rtol=1e-3)
def MetadataHasCell(run_metadata):
for dev_stats in run_metadata.step_stats.dev_stats:
for node_stats in dev_stats.node_stats:
if cell_func_call_pattern.search(node_stats.timeline_label):
return True
return False
self.assertEqual(MetadataHasCell(run_metadata), noinline)
class ModuleFunctionTest(test.TestCase):
@test_util.run_deprecated_v1
def testBasic(self):
@function.Defun(*[dtypes.float32] * 3)
def LinearWithCApi(w, b, x):
return nn_ops.relu(math_ops.matmul(x, w) + b)
@function.Defun(*[dtypes.float32] * 5)
def Linear2WithCApi(w1, b1, w2, b2, x):
return LinearWithCApi(w2, b2, LinearWithCApi(w1, b1, x))
with ops.Graph().as_default():
a, b, c, d, e = [
constant_op.constant([[_]], dtype=dtypes.float32) for _ in range(5)
]
y = LinearWithCApi(a, b, c)
z = Linear2WithCApi(a, b, c, d, e)
with session.Session() as sess:
self.assertAllEqual([[1]], self.evaluate(y))
self.assertAllEqual([[5]], self.evaluate(z))
class VariableHoistingTest(test.TestCase):
def _testSimpleModel(self, use_forward_func, use_resource=False):
def _Model(x):
w = variable_scope.get_variable(
"w", (64, 64),
initializer=init_ops.random_uniform_initializer(seed=312),
use_resource=use_resource)
b = variable_scope.get_variable(
"b", (64),
initializer=init_ops.zeros_initializer(),
use_resource=use_resource),
return math_ops.sigmoid(math_ops.matmul(x, w) + b)
@function.Defun()
def Model(x):
return _Model(x)
cvars = []
@function.Defun()
def Grad(x, y0):
if use_forward_func:
y = Model(x)
else:
y = _Model(x)
loss = math_ops.reduce_mean(
math_ops.reduce_sum(y0 * math_ops.log(y), 1), 0)
arg_w, arg_b = function.get_extra_args()
self.assertEqual(arg_w.get_shape(), tensor_shape.TensorShape([64, 64]))
self.assertEqual(arg_b.get_shape(), tensor_shape.TensorShape([64]))
dw, db = gradients_impl.gradients(loss, [arg_w, arg_b])
cvars.extend(function.get_extra_vars())
return loss, dw, db
g = ops.Graph()
with g.as_default():
x = random_ops.random_normal([64, 64], seed=100)
y0 = random_ops.random_normal([64, 64], seed=200)
with variable_scope.variable_scope("Foo"):
loss, dw, db = Grad(x, y0)
self.assertEqual(2, len(cvars))
w, b = cvars[:2]
self.assertEqual("Foo/w", w.op.name)
self.assertEqual("Foo/b", b.op.name)
with self.session(graph=g) as sess:
self.evaluate(variables.global_variables_initializer())
w, b, x, y0, loss, dw, db = self.evaluate([w, b, x, y0, loss, dw, db])
self.assertAllEqual(w.shape, (64, 64))
self.assertAllClose(np.sum(w), 2050.44)
self.assertAllEqual(b.shape, (64,))
self.assertAllClose(np.sum(b), 0.0)
self.assertAllClose(loss, -2.27, rtol=1e-2)
self.assertAllEqual(dw.shape, (64, 64))
self.assertAllClose(np.sum(dw), -1.04, rtol=1e-2)
self.assertAllEqual(db.shape, (64,))
self.assertAllClose(np.sum(db), 0.509, rtol=1e-2)
@test_util.run_deprecated_v1
def testBasic(self):
self._testSimpleModel(True)
self._testSimpleModel(False)
@test_util.run_deprecated_v1
def testBasicResource(self):
self._testSimpleModel(True, use_resource=True)
self._testSimpleModel(False, use_resource=True)
class DevicePlacementTest(test.TestCase):
def testNoDeviceGraph(self):
with ops.Graph().as_default():
@function.Defun(*[dtypes.float32] * 2)
def Matmul(a, b):
return math_ops.matmul(a, b)
Matmul(1., 2.)
gdef = ops.get_default_graph().as_graph_def()
self.assertAllEqual(len(gdef.library.function), 1)
fdef = gdef.library.function[0]
for node in fdef.node_def:
self.assertAllEqual(node.device, "")
def testNestedDevices(self):
with ops.Graph().as_default(), ops.device("CPU:0"):
@function.Defun(*[dtypes.float32] * 2)
def Matmul(a, b):
return math_ops.matmul(a, b)
with ops.device("CPU:1"):
@function.Defun(*[dtypes.float32] * 2)
def Divide(a, b):
return math_ops.divide(a, b)
Divide(Matmul(1., 2.), 3.)
gdef = ops.get_default_graph().as_graph_def()
matmul_fdef = [
f for f in gdef.library.function if "Matmul" in f.signature.name
]
divide_fdef = [
f for f in gdef.library.function if "Divide" in f.signature.name
]
self.assertAllEqual(len(matmul_fdef), 1)
self.assertAllEqual(len(divide_fdef), 1)
for node in matmul_fdef[0].node_def:
self.assertAllEqual(node.device, "/device:CPU:0")
for node in divide_fdef[0].node_def:
self.assertAllEqual(node.device, "/device:CPU:1")
def _testNestedDeviceWithSameFunction(self, func_name):
def MatmulWrap(a, b):
@function.Defun(
func_name=func_name, *[dtypes.int32] * 2)
def Matmul(a, b):
return math_ops.matmul(a, b)
return Matmul(a, b)
with ops.Graph().as_default(), ops.device("CPU:0"):
c = MatmulWrap(1, 2)
with ops.device("CPU:1"):
MatmulWrap(c, 3)
gdef = ops.get_default_graph().as_graph_def()
devices = []
for node in gdef.library.function[0].node_def:
devices.append(node.device)
for node in gdef.library.function[1].node_def:
devices.append(node.device)
self.assertAllEqual(sorted(devices), ["/device:CPU:0", "/device:CPU:1"])
def testFunctionWithName(self):
with self.assertRaises(InvalidArgumentError) as cm:
self._testNestedDeviceWithSameFunction("MatmulTest")
self.assertEqual(
cm.exception.message,
"Cannot add function \'MatmulTest\' because a different "
"function with the same name already exists.")
def testFunctionWithoutName(self):
self._testNestedDeviceWithSameFunction(None)
if __name__ == "__main__":
test.main()
|
{
"content_hash": "82cddc4379b03f96da7f1c31427aaf87",
"timestamp": "",
"source": "github",
"line_count": 1850,
"max_line_length": 80,
"avg_line_length": 31.96108108108108,
"alnum_prop": 0.6326275199567041,
"repo_name": "jendap/tensorflow",
"id": "7543376bcf274dc6edf821e19838c4aa574826ff",
"size": "59816",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/framework/function_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "606044"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "55619540"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "78675"
},
{
"name": "Go",
"bytes": "1383418"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "900190"
},
{
"name": "Jupyter Notebook",
"bytes": "2510235"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "77367"
},
{
"name": "Objective-C",
"bytes": "16140"
},
{
"name": "Objective-C++",
"bytes": "102889"
},
{
"name": "PHP",
"bytes": "14644"
},
{
"name": "Pascal",
"bytes": "399"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "45358371"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "530065"
},
{
"name": "Smarty",
"bytes": "25609"
}
],
"symlink_target": ""
}
|
Experiment(description='More thorough version with more data',
data_dir='../data/tsdlr-400/',
max_depth=10,
random_order=True,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=4,
max_jobs=400,
verbose=False,
make_predictions=False,
skip_complete=True,
results_dir='../results/2013-08-31-time-series/',
iters=250,
base_kernels='StepTanh,BurstTanhSE,Per,Cos,Lin,SE,Const,MT5',
zero_mean=True,
random_seed=0,
period_heuristic=5)
|
{
"content_hash": "7a9dcd24b523e489a19634729df4ae6c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 72,
"avg_line_length": 33.473684210526315,
"alnum_prop": 0.5141509433962265,
"repo_name": "codeaudit/gpss-research",
"id": "b9be50377915bd2dab46c07ae6c885f71433c2f4",
"size": "636",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "experiments/2013-08-31-time-series.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1284"
},
{
"name": "C++",
"bytes": "42466"
},
{
"name": "CSS",
"bytes": "77"
},
{
"name": "FORTRAN",
"bytes": "141064"
},
{
"name": "HTML",
"bytes": "194628"
},
{
"name": "M",
"bytes": "2972"
},
{
"name": "Makefile",
"bytes": "2229"
},
{
"name": "Mathematica",
"bytes": "203613"
},
{
"name": "Matlab",
"bytes": "872710"
},
{
"name": "OpenEdge ABL",
"bytes": "4146"
},
{
"name": "Python",
"bytes": "571852"
},
{
"name": "R",
"bytes": "5180"
},
{
"name": "Shell",
"bytes": "8364"
},
{
"name": "TeX",
"bytes": "3594635"
}
],
"symlink_target": ""
}
|
from typing import Any, List, Union, Iterable, Optional
import bonsai
class LDAPValueList(list):
"""
Modified list that tracks added and deleted values. It also contains
only unique elements. The elements are compared to their lower-cased
string representations.
A new LDAPValueList can be created optionally from an existing
sequence object.
:param items: a sequence object.
:raises ValueError: if `items` has a non-unique element.
"""
__slots__ = ("__deleted", "__added", "__status")
def __init__(self, items: Optional[Iterable[Any]] = None) -> None:
super().__init__()
self.__added = [] # type: List[str]
self.__deleted = [] # type: List[str]
self.__status = 0
if items:
for item in items:
self.append(item)
@staticmethod
def __balance(lst1: List[str], lst2: List[str], value: Any) -> None:
"""
Balancing between the two list (__added, __deleted),
make sure the same element is not in both lists.
"""
try:
lst1.remove(value)
except ValueError:
lst2.append(value)
def _append_unchecked(self, value: Any) -> None:
super().append(value)
def _remove_unchecked(self, value: Any) -> None:
try:
super().remove(value)
except ValueError:
pass
@property
def _status_dict(self) -> dict:
return {
"@status": self.__status,
"@added": self.__added.copy(),
"@deleted": self.__deleted.copy(),
}
@_status_dict.setter
def _status_dict(self, value: Any) -> None:
raise TypeError("Can not change _status_dict")
def __contains__(self, item: Any) -> bool:
return bonsai.utils._unique_contains(self, item)[0]
def __delitem__(self, idx: Union[int, slice]) -> None:
old_value = super().__getitem__(idx)
if type(idx) == slice:
for item in old_value:
self.__balance(self.__added, self.__deleted, item)
else:
self.__balance(self.__added, self.__deleted, old_value)
super().__delitem__(idx)
def __mul__(self, value: Any) -> "LDAPValueList":
raise TypeError("Cannot multiple LDAPValueList.")
def __add__(self, other: Iterable) -> "LDAPValueList":
if type(other) != list and type(other) != LDAPValueList:
raise TypeError("Can only concatenate list and LDAPValueList.")
new_list = self.copy()
new_list.extend(other)
return new_list
def __iadd__(self, other: Iterable) -> "LDAPValueList":
if type(other) != list and type(other) != LDAPValueList:
raise TypeError("Can only concatenate list and LDAPValueList.")
self.extend(other)
return self
def __setitem__(self, idx: Union[int, slice], value: Any) -> None:
old_value = self[idx]
if type(idx) == slice:
for item in value:
if item in self:
raise ValueError("%r is already in the list." % item)
for item in old_value:
self.__balance(self.__added, self.__deleted, item)
for item in value:
self.__balance(self.__deleted, self.__added, item)
else:
if value in self:
raise ValueError("%r is already in the list." % value)
self.__balance(self.__added, self.__deleted, old_value)
self.__balance(self.__deleted, self.__added, value)
super().__setitem__(idx, value)
def append(self, item: Any) -> None:
"""
Add a unique item to the end of the LDAPValueList.
:param item: New item.
:raises ValueError: if the `item` is not unique.
"""
if item in self:
raise ValueError("%r is already in the list." % item)
self.__balance(self.__deleted, self.__added, item)
self.__status = 1
super().append(item)
def extend(self, items: Iterable[Any]) -> None:
"""
Extend the LDAPValueList by appending all the items in the given
list. All element in `items` must be unique and also not
represented in the LDAPValueList.
:param items: List of new items.
:raises ValueError: if any of the items is already in the list.
"""
for item in items:
if item in self:
raise ValueError("%r is already in the list." % item)
for item in items:
self.__balance(self.__deleted, self.__added, item)
self.__status = 1
super().extend(items)
def insert(self, idx: int, value: Any) -> None:
"""
Insert a unique item at a given position.
:param int idx: the position.
:param value: the new item.
:raises ValueError: if the `item` is not unique.
"""
if value in self:
raise ValueError("%r is already in the list." % value)
self.__balance(self.__deleted, self.__added, value)
self.__status = 1
super().insert(idx, value)
def remove(self, value: Any) -> None:
"""
Remove the first item from the LDAPValueList whose value is `value`.
:param value: the item to be removed.
:raises ValueError: if `value` is not int the list.
"""
contain, obj = bonsai.utils._unique_contains(self, value)
if not contain:
raise ValueError("%r is not in the list." % value)
super().remove(obj)
self.__status = 1
self.__balance(self.__added, self.__deleted, obj)
def pop(self, idx: int = -1) -> Any:
"""
Remove the item at the given position in the LDAPValueList, and
return it. If no index is specified, pop() removes and returns the
last item in the list.
:param int idx: optional index.
"""
value = super().pop(idx)
self.__balance(self.__added, self.__deleted, value)
self.__status = 1
return value
def clear(self) -> None:
""" Remove all items from the LDAPValueList. """
del self[:]
def copy(self) -> "LDAPValueList":
"""
Return a shallow copy of the LDAPValueList. This includes
the status and the previously added and deleted items.
:rtype: LDAPValueList
:return: The copy of the LDAPValueList.
"""
new_list = LDAPValueList()
for item in self:
new_list._append_unchecked(item)
new_list.__added = self.__added.copy()
new_list.__deleted = self.__deleted.copy()
new_list.__status = self.__status
return new_list
@property
def added(self) -> List:
""" List of the added values. """
return self.__added
@added.setter
def added(self, value: Any) -> None:
raise ValueError("Added attribute cannot be changed.")
@property
def deleted(self) -> List:
""" List of the deleted values. """
return self.__deleted
@deleted.setter
def deleted(self, value: Any) -> None:
raise ValueError("Deleted attribute cannot be changed.")
@property
def status(self) -> int:
"""
The status of the LDAPValueList. The status can be:
- 0: unchanged.
- 1: added or deleted item to list.
- 2: replaced the entire list.
"""
return self.__status
@status.setter
def status(self, value: int) -> None:
if type(value) != int:
raise TypeError("Status must be int.")
if value > -1 and value < 3:
self.__status = value
else:
raise ValueError("Status must be between 0 and 2.")
|
{
"content_hash": "48d939d9ca957b0132fb9dcb8b642079",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 76,
"avg_line_length": 33.25213675213675,
"alnum_prop": 0.5612389153065158,
"repo_name": "Noirello/bonsai",
"id": "24ec4e0223d065118e3c9d65187ad1afcfdee994",
"size": "7781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/bonsai/ldapvaluelist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "224338"
},
{
"name": "Python",
"bytes": "131162"
},
{
"name": "Shell",
"bytes": "1912"
}
],
"symlink_target": ""
}
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
import datetime
from django.utils import timezone
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.models import User
from helper import random_alphanumeric as ran
from django.core.mail import send_mail, EmailMessage
from easy_ecom import settings_sensitive
# Create your views here.
|
{
"content_hash": "ac06af750f30d26789e53d4973191c4e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 59,
"avg_line_length": 41.38461538461539,
"alnum_prop": 0.8420074349442379,
"repo_name": "bharathramh92/easy-ecom",
"id": "3d0a71ed45abead43d927dc2e339d4b2e6f2ac7f",
"size": "538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "store/views.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "10768"
},
{
"name": "Python",
"bytes": "113947"
}
],
"symlink_target": ""
}
|
import json
import os.path
import requests
from html.parser import HTMLParser
from argparse import ArgumentParser
emojiListURL = 'http://unicode.org/emoji/charts/full-emoji-list.html'
class EmojiParser(HTMLParser):
def __init__(self, imgPath):
HTMLParser.__init__(self)
self.imgpath = imgPath
self.nextData = None
self.Categories = []
self.Emojis = []
self.nextChar = []
self.nextFilename = None
return
def handle_starttag (self, tag, attrs):
if tag == 'th' and ('class', 'bighead') in attrs:
self.nextData = 'category'
elif tag == 'td' and ('class', 'code') in attrs:
self.nextData = 'code'
elif tag == 'td' and ('class', 'name') in attrs:
self.nextData = 'name'
return
def handle_data(self, data):
if self.nextData == 'category':
print('Reading Category: {}'.format(data))
self.Categories.append([data, [data.split()[0].lower()]])
self.nextData = None
elif self.nextData == 'code':
codes = []
fileBase = []
for code in data.split():
splt = code.split('+')[-1]
codes.append(int(splt, 16))
if splt != '200D' and splt != 'FE0F':
fileBase.append(splt)
filename = '-'.join(fileBase).lower() + '.png'
if os.path.exists(os.path.join(self.imgpath, filename)):
self.nextFilename = os.path.join('.', 'png', filename)
self.nextChar.append(codes)
self.nextData = None
elif self.nextData == 'name':
if self.nextFilename != None:
self.nextChar.append(data)
self.nextChar.append(self.nextFilename)
self.nextChar.append(self.Categories[-1][-1][0])
self.Emojis.append(self.nextChar)
# print(self.nextChar)
self.nextChar = []
self.nextFilename = None
self.nextData = None
return
parser = ArgumentParser(description='Configure the SmartKey.')
parser.add_argument('--emojione-path', dest='imgPath', required=True)
parser.add_argument('--full-emoji-list', dest='emojiList')
args = parser.parse_args()
htmlData = None
if args.emojiList:
print('Loading Emoji List from \'{}\''.format(args.htmlFile))
htmlData = open(args.htmlFile, 'r').read()
else:
print('Loading Emoji List from \'{}\''.format(emojiListURL))
htmlData = requests.get(emojiListURL, timeout=30).content.decode('utf-8')
emojiParser = EmojiParser(args.imgPath)
emojiParser.feed(htmlData)
print('Found {} Emojis in {} Categories.'.format(len(emojiParser.Emojis), len(emojiParser.Categories)))
allCat = []
for cat in emojiParser.Categories:
allCat.append(cat[-1][0])
emojiParser.Categories.insert(0, ['All', allCat])
json.dump(emojiParser.Categories, open('category.json', 'w'), indent=2)
json.dump(emojiParser.Emojis, open('emoji.json', 'w'), indent=2)
|
{
"content_hash": "9496d8d48d956841fa6061385ec25cee",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 103,
"avg_line_length": 35.705882352941174,
"alnum_prop": 0.5940691927512356,
"repo_name": "andresth/emojiselector",
"id": "ceb9d825a34c6ce738425c3a093f6df3c0365fdf",
"size": "3079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/listcreator.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7379"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
requires = [
'colormath',
'pyserial',
'requests',
'simplejson']
setup(
name='Lightbox',
version='1.0.1',
description=('Python library and JSON HTTP interface for controlling multi-'
'output RGB devices with color transitions in LAB space.'),
long_description=README,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Topic :: Home Automation',
],
author='Elmer de Looff',
author_email='elmer.delooff@gmail.com',
url='https://github.com/Frack/Lightbox',
keywords='lightbox light led colormath',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
)
|
{
"content_hash": "77c2175e52ecfec0caff92f0982ed54f",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 80,
"avg_line_length": 28.02857142857143,
"alnum_prop": 0.6472986748216106,
"repo_name": "edelooff/Lightbox",
"id": "f6af55c191dbfff2273361f728ac9eae281535be",
"size": "981",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "4413"
},
{
"name": "C",
"bytes": "307"
},
{
"name": "C++",
"bytes": "6321"
},
{
"name": "CSS",
"bytes": "6368"
},
{
"name": "HTML",
"bytes": "5768"
},
{
"name": "JavaScript",
"bytes": "38586"
},
{
"name": "Python",
"bytes": "63461"
}
],
"symlink_target": ""
}
|
from .Radius_client import Radius_client
from time import time
from functools import reduce
class RadiusAuthorisation(Radius_client):
def do_auth(self, username, caller, callee, h323_cid, sip_cid, remote_ip, res_cb, \
realm = None, nonce = None, uri = None, response = None, extra_attributes = None):
sip_cid = str(sip_cid)
attributes = None
if None not in (realm, nonce, uri, response):
attributes = [('User-Name', username), ('Digest-Realm', realm), \
('Digest-Nonce', nonce), ('Digest-Method', 'INVITE'), ('Digest-URI', uri), \
('Digest-Algorithm', 'MD5'), ('Digest-User-Name', username), ('Digest-Response', response)]
else:
attributes = [('User-Name', remote_ip), ('Password', 'cisco')]
if caller == None:
caller = ''
attributes.extend((('Calling-Station-Id', caller), ('Called-Station-Id', callee), ('h323-conf-id', h323_cid), \
('call-id', sip_cid), ('h323-remote-address', remote_ip), ('h323-session-protocol', 'sipv2')))
if extra_attributes != None:
for a, v in extra_attributes:
attributes.append((a, v))
message = 'sending AAA request:\n'
message += reduce(lambda x, y: x + y, ['%-32s = \'%s\'\n' % (x[0], str(x[1])) for x in attributes])
self.global_config['_sip_logger'].write(message, call_id = sip_cid)
Radius_client.do_auth(self, attributes, self._process_result, res_cb, sip_cid, time())
def _process_result(self, results, res_cb, sip_cid, btime):
delay = time() - btime
rcode = results[1]
if rcode in (0, 1):
if rcode == 0:
message = 'AAA request accepted (delay is %.3f), processing response:\n' % delay
else:
message = 'AAA request rejected (delay is %.3f), processing response:\n' % delay
if len(results[0]) > 0:
message += reduce(lambda x, y: x + y, ['%-32s = \'%s\'\n' % x for x in results[0]])
else:
message = 'Error sending AAA request (delay is %.3f)\n' % delay
self.global_config['_sip_logger'].write(message, call_id = sip_cid)
res_cb(results)
|
{
"content_hash": "7ff9db3d3313fcb73cf7f6e4e3c2fdd5",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 119,
"avg_line_length": 54.21951219512195,
"alnum_prop": 0.5677013045434098,
"repo_name": "hgascon/pulsar",
"id": "7b1e8cdef85d8c0558cc6ef2c6180a5b8aba1e22",
"size": "3348",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pulsar/core/sippy/RadiusAuthorisation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "550357"
},
{
"name": "R",
"bytes": "1007"
},
{
"name": "Shell",
"bytes": "4616"
}
],
"symlink_target": ""
}
|
"""
Tests for the integration test suite itself.
"""
import logging
import os
import subprocess
from collections import defaultdict
from pathlib import Path
from typing import Set
import yaml
from get_test_group import patterns_from_group
__maintainer__ = 'adam'
__contact__ = 'tools-infra-team@mesosphere.io'
log = logging.getLogger(__file__)
def _tests_from_pattern(ci_pattern: str) -> Set[str]:
"""
From a CI pattern, get all tests ``pytest`` would collect.
"""
tests = set([]) # type: Set[str]
args = [
'pytest',
'--disable-pytest-warnings',
'--collect-only',
ci_pattern,
'-q',
]
# Test names will not be in ``stderr`` so we ignore that.
result = subprocess.run(
args=args,
stdout=subprocess.PIPE,
env={**os.environ, **{'PYTHONIOENCODING': 'UTF-8'}},
)
output = result.stdout
for line in output.splitlines():
if b'error in' in line:
message = (
'Error collecting tests for pattern "{ci_pattern}". '
'Full output:\n'
'{output}'
).format(
ci_pattern=ci_pattern,
output=output,
)
raise Exception(message)
# Whitespace is important to avoid confusing pytest warning messages
# with test names. For example, the pytest output may contain '3 tests
# deselected' which would conflict with a test file called
# test_agent_deselected.py if we ignored whitespace.
if (
line and
# Some tests show warnings on collection.
b' warnings' not in line and
# Some tests are skipped on collection.
b'skipped in' not in line and
# Some tests are deselected by the ``pytest.ini`` configuration.
b' deselected' not in line and
not line.startswith(b'no tests ran in')
):
tests.add(line.decode())
return tests
def test_test_groups() -> None:
"""
The test suite is split into various "groups".
This test confirms that the groups together contain all tests, and each
test is collected only once.
"""
test_group_file = Path('test_groups.yaml')
test_group_file_contents = test_group_file.read_text()
test_groups = yaml.load(test_group_file_contents)['groups']
test_patterns = []
for group in test_groups:
test_patterns += patterns_from_group(group_name=group)
# Turn this into a list otherwise we can't cannonically state whether every test was collected _exactly_ once :-)
tests_to_patterns = defaultdict(list) # type: Mapping[str, List]
for pattern in test_patterns:
tests = _tests_from_pattern(ci_pattern=pattern)
for test in tests:
tests_to_patterns[test].append(pattern)
errs = []
for test_name, patterns in tests_to_patterns.items():
message = (
'Test "{test_name}" will be run once for each pattern in '
'{patterns}. '
'Each test should be run only once.'
).format(
test_name=test_name,
patterns=patterns,
)
if len(patterns) != 1:
assert len(patterns) != 1, message
errs.append(message)
if errs:
for message in errs:
log.error(message)
raise Exception("Some tests are not collected exactly once, see errors.")
all_tests = _tests_from_pattern(ci_pattern='')
assert tests_to_patterns.keys() - all_tests == set()
assert all_tests - tests_to_patterns.keys() == set()
|
{
"content_hash": "4cfb2f38183ec48aa346229ee6adf245",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 118,
"avg_line_length": 32.348214285714285,
"alnum_prop": 0.5956389732266077,
"repo_name": "mesosphere-mergebot/dcos",
"id": "cc57a137728a5fedfebf271e4d933206bec04fb7",
"size": "3623",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "packages/dcos-integration-test/extra/test_meta.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "2529"
},
{
"name": "Dockerfile",
"bytes": "11421"
},
{
"name": "Groovy",
"bytes": "711"
},
{
"name": "HTML",
"bytes": "94837"
},
{
"name": "Lua",
"bytes": "194962"
},
{
"name": "Makefile",
"bytes": "179"
},
{
"name": "PowerShell",
"bytes": "20017"
},
{
"name": "Python",
"bytes": "1493853"
},
{
"name": "Shell",
"bytes": "105879"
}
],
"symlink_target": ""
}
|
import pytest
pytestmark = pytest.mark.django_db
def test_admin_interface(client):
public_urls = [
'/admin/login/',
'/',
'/about/',
'/privacy/',
]
for url in public_urls:
response = client.get('/admin/login/')
assert response.status_code == 200
|
{
"content_hash": "611dceb1f3ab3755b5a5900e65ee85a3",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 46,
"avg_line_length": 20.533333333333335,
"alnum_prop": 0.5551948051948052,
"repo_name": "vipul-sharma20/fossevents.in",
"id": "6d8ea06d731f88a010626fb826d7fb1ed2750cbf",
"size": "308",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/integrations/test_urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1234"
},
{
"name": "HTML",
"bytes": "6001"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "43719"
},
{
"name": "Shell",
"bytes": "2183"
}
],
"symlink_target": ""
}
|
try:
# Python 2.7
from collections import OrderedDict
except:
# Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current
from gluon.storage import Storage
def config(settings):
"""
Template settings for a hosted environment to allow NGOs to share data
Prototype being developed for CAR (Central African Republic)
"""
T = current.T
#settings.base.system_name = T("Magnu")
#settings.base.system_name_short = T("Magnu")
settings.base.system_name = T("Sahana")
settings.base.system_name_short = T("Sahana")
# PrePopulate data
settings.base.prepopulate = ("Magnu", "default/users")
# Theme (folder to use for views/layout.html)
settings.base.theme = "Magnu"
# Authentication settings
# Should users be allowed to register themselves?
#settings.security.self_registration = False
# Do new users need to verify their email address?
settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
#settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
# @ToDo:
#settings.auth.registration_requests_location = "L0"
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
# Restrict the Location Selector to just certain countries
# NB This can also be over-ridden for specific contexts later
# e.g. Activities filtered to those of parent Project
settings.gis.countries = ("CF",) # Initially, will change
# Show LatLon boxes in the Location Selector
settings.gis.latlon_selector = True
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to open Location represent links in a Popup Window
settings.gis.popup_location_link = True
# Uncomment to Disable the Postcode selector in the LocationSelector
settings.gis.postcode_selector = False # @ToDo: Vary by country (include in the gis_config!)
# Uncomment to show the Print control:
# http://eden.sahanafoundation.org/wiki/UserGuidelines/Admin/MapPrinting
settings.gis.print_button = True
# L10n settings
# Languages used in the deployment (used for Language Toolbar & GIS Locations)
# http://www.loc.gov/standards/iso639-2/php/code_list.php
settings.L10n.languages = OrderedDict([
# ("ar", "العربية"),
# ("bs", "Bosanski"),
("en", "English"),
("fr", "Français"),
# ("de", "Deutsch"),
# ("el", "ελληνικά"),
# ("es", "Español"),
# ("it", "Italiano"),
# ("ja", "日本語"),
# ("km", "ភាសាខ្មែរ"),
# ("ko", "한국어"),
# ("ne", "नेपाली"), # Nepali
# ("prs", "دری"), # Dari
# ("ps", "پښتو"), # Pashto
# ("pt", "Português"),
# ("pt-br", "Português (Brasil)"),
# ("ru", "русский"),
# ("tet", "Tetum"),
# ("tl", "Tagalog"),
# ("ur", "اردو"),
# ("vi", "Tiếng Việt"),
# ("zh-cn", "中文 (简体)"),
# ("zh-tw", "中文 (繁體)"),
])
# Default language for Language Toolbar (& GIS Locations in future)
#settings.L10n.default_language = "en"
# Uncomment to Hide the language toolbar
#settings.L10n.display_toolbar = False
# @ToDO:These should be modified per-country
# Default timezone for users
settings.L10n.utc_offset = "UTC +0100"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
#settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
#settings.L10n.thousands_separator = ","
# Uncomment this to Translate Organisation Names/Acronyms
settings.L10n.translate_org_organisation = True
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
# 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations
#
#settings.security.policy = 7 # Organisation-ACLs
# Resources which can be directly added to the main map
settings.gis.poi_create_resources = \
(dict(c="event",
f="incident_report",
table="event_incident_report",
type="point",
label=T("Add Incident"),
layer="Incident Reports",
),
#dict(c="gis",
# f="poi",
# table="gis_poi",
# type="polygon",
# label=T("Add Area"),
# layer="Areas",
# ),
#dict(c="gis",
# f="poi",
# table="gis_poi",
# type="line",
# label=T("Add Route"),
# layer="Routes",
# ),
)
# RSS feeds
#settings.frontpage.rss = [
# {"title": "Eden",
# # Trac timeline
# "url": "http://eden.sahanafoundation.org/timeline?ticket=on&changeset=on&milestone=on&wiki=on&max=50&daysback=90&format=rss"
# },
# {"title": "Twitter",
# # @SahanaFOSS
# #"url": "https://search.twitter.com/search.rss?q=from%3ASahanaFOSS" # API v1 deprecated, so doesn't work, need to use 3rd-party service, like:
# "url": "http://www.rssitfor.me/getrss?name=@SahanaFOSS"
# # Hashtag
# #url: "http://search.twitter.com/search.atom?q=%23eqnz" # API v1 deprecated, so doesn't work, need to use 3rd-party service, like:
# #url: "http://api2.socialmention.com/search?q=%23eqnz&t=all&f=rss"
# }
#]
settings.org.groups = "Coalition / Consortium"
# @ToDo: Once we go global
# Enable the use of Organisation Branches
#settings.org.branches = True
# Show branches as tree rather than as table
#settings.org.branches_tree_view = True
#settings.org.autocomplete = True
# Uncomment this to allow multiple site contacts per site (e.g. if needing a separate contact per sector)
settings.hrm.site_contact_unique = False
# -----------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
s3db = current.s3db
table = s3db[tablename]
list_fields = s3db.get_config(tablename, "list_fields")
list_fields.insert(2, (T("French Name"), "name.name_l10n"))
list_fields.insert(4, (T("French Acronym"), "name.acronym_l10n"))
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -----------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = T("Home"),
restricted = False, # Use ACLs to control access to this module
access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = T("Administration"),
#description = "Site Administration",
restricted = True,
module_type = None # No Menu
)),
("errors", Storage(
name_nice = T("Ticket Viewer"),
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
#("sync", Storage(
# name_nice = T("Synchronization"),
# #description = "Synchronization",
# restricted = True,
# access = "|1|", # Only Administrators can see this module in the default menu & access the controller
# module_type = None # This item is handled separately for the menu
#)),
#("tour", Storage(
# name_nice = T("Guided Tour Functionality"),
# module_type = None,
#)),
#("translate", Storage(
# name_nice = T("Translation Functionality"),
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = T("Map"),
#description = "Situation Awareness & Geospatial Analysis",
restricted = True,
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = T("Person Registry"),
#description = "Central point to record details on People",
restricted = True,
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = T("Organizations"),
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
restricted = True,
module_type = 1
)),
#("hrm", Storage(
# name_nice = T("Staff"),
# #description = "Human Resources Management",
# restricted = True,
# module_type = 2,
#)),
#("vol", Storage(
# name_nice = T("Volunteers"),
# #description = "Human Resources Management",
# restricted = True,
# module_type = 2,
#)),
("cms", Storage(
name_nice = T("Content Management"),
#description = "Content Management System",
restricted = True,
module_type = 10,
)),
("doc", Storage(
name_nice = T("Documents"),
#description = "A library of digital resources, such as photos, documents and reports",
restricted = True,
module_type = 10,
)),
("msg", Storage(
name_nice = T("Messaging"),
#description = "Sends & Receives Alerts via Email & SMS",
restricted = True,
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("security", Storage(
name_nice = T("Security"),
#description = "Security Management System",
restricted = True,
module_type = 10,
)),
("supply", Storage(
name_nice = T("Supply Chain Management"),
#description = "Used within Inventory Management, Request Management and Asset Management",
restricted = True,
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
restricted = True,
module_type = 4
)),
("asset", Storage(
name_nice = T("Assets"),
#description = "Recording and Assigning Assets",
restricted = True,
module_type = 5,
)),
# Vehicle depends on Assets
("vehicle", Storage(
name_nice = T("Vehicles"),
#description = "Manage Vehicles",
restricted = True,
module_type = 10,
)),
#("req", Storage(
# name_nice = T("Requests"),
# #description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
# restricted = True,
# module_type = 10,
#)),
("project", Storage(
name_nice = T("Projects"),
#description = "Tracking of Projects, Activities and Tasks",
restricted = True,
module_type = 2
)),
("cr", Storage(
name_nice = T("Camps"),
#description = "Tracks the location, capacity and breakdown of victims in Shelters",
restricted = True,
module_type = 10
)),
("hms", Storage(
name_nice = T("Hospitals"),
#description = "Helps to monitor status of hospitals",
restricted = True,
module_type = 10
)),
("transport", Storage(
name_nice = T("Transport"),
restricted = True,
module_type = 10,
)),
("stats", Storage(
name_nice = T("Statistics"),
#description = "Manages statistics",
restricted = True,
module_type = None,
)),
("water", Storage(
name_nice = T("Water"),
#description = "Flood Gauges show water levels in various parts of the country",
restricted = True,
module_type = 10
)),
("event", Storage(
name_nice = T("Events"),
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
restricted = True,
module_type = 10,
)),
#("dvr", Storage(
# name_nice = T("Disaster Victim Registry"),
# #description = "Allow affected individuals & households to register to receive compensation and distributions",
# restricted = True,
# module_type = 10,
#)),
])
# END =========================================================================
|
{
"content_hash": "8c5808c2f05a6f53e73381323c6151a9",
"timestamp": "",
"source": "github",
"line_count": 361,
"max_line_length": 152,
"avg_line_length": 39.78116343490305,
"alnum_prop": 0.5604066569180419,
"repo_name": "flavour/Turkey",
"id": "cbb5a45539c2e70edf3d11dabf4c956270f5567d",
"size": "14485",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "modules/templates/Magnu/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2021594"
},
{
"name": "HTML",
"bytes": "1310585"
},
{
"name": "JavaScript",
"bytes": "19245058"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "500"
},
{
"name": "Python",
"bytes": "28627483"
},
{
"name": "Ruby",
"bytes": "2051"
},
{
"name": "Shell",
"bytes": "4860"
},
{
"name": "XSLT",
"bytes": "2678742"
}
],
"symlink_target": ""
}
|
from IECore import *
import sys
import unittest
class LensDistortOpTest(unittest.TestCase):
def testDistortOpWithStandardLensModel(self):
# The lens model and parameters to use.
o = CompoundObject()
o["lensModel"] = StringData( "StandardRadialLensModel" )
o["distortion"] = DoubleData( 0.2 )
o["anamorphicSqueeze"] = DoubleData( 1. )
o["curvatureX"] = DoubleData( 0.2 )
o["curvatureY"] = DoubleData( 0.5 )
o["quarticDistortion"] = DoubleData( .1 )
# The input image to read.
r = EXRImageReader("test/IECore/data/exrFiles/uvMapWithDataWindow.100x100.exr")
img = r.read()
# Create the Op and set it's parameters.
op = LensDistortOp()
op["input"] = img
op["mode"] = LensModel.Undistort
op['lensModel'].setValue(o)
# Run the Op.
out = op()
r = EXRImageReader("test/IECore/data/exrFiles/uvMapWithDataWindowDistorted.100x100.exr")
img2 = r.read()
self.assertEqual( img.displayWindow, img2.displayWindow )
|
{
"content_hash": "c002b211f6393beb450a8227803ca12d",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 90,
"avg_line_length": 27.6,
"alnum_prop": 0.6935817805383023,
"repo_name": "DoubleNegativeVisualEffects/cortex",
"id": "1b883118ebcd2c5bb011d066a48d3c61ef300e29",
"size": "2750",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "test/IECore/LensDistortOpTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from . import test_access_rights
checks = [
test_access_rights,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
{
"content_hash": "b6add75ce50d9f3db9229a2a5c372bd2",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 65,
"avg_line_length": 19.714285714285715,
"alnum_prop": 0.7391304347826086,
"repo_name": "diogocs1/comps",
"id": "c79181f77104c59e58e6d7f6708df65cc4f943a9",
"size": "1124",
"binary": false,
"copies": "168",
"ref": "refs/heads/master",
"path": "web/addons/portal_project/tests/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "701"
},
{
"name": "CSS",
"bytes": "856533"
},
{
"name": "HTML",
"bytes": "299671"
},
{
"name": "Java",
"bytes": "620166"
},
{
"name": "JavaScript",
"bytes": "5844302"
},
{
"name": "Makefile",
"bytes": "21002"
},
{
"name": "PHP",
"bytes": "14259"
},
{
"name": "Python",
"bytes": "10647376"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "17746"
},
{
"name": "XSLT",
"bytes": "120278"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [("data_finder", "0004_loggedpostcode_view_used")]
operations = [
migrations.CreateModel(
name="ElectionNotificationSignup",
fields=[
(
"id",
models.AutoField(
primary_key=True,
verbose_name="ID",
serialize=False,
auto_created=True,
),
),
(
"created",
django_extensions.db.fields.CreationDateTimeField(
auto_now_add=True, verbose_name="created"
),
),
(
"modified",
django_extensions.db.fields.ModificationDateTimeField(
auto_now=True, verbose_name="modified"
),
),
("postcode", models.CharField(max_length=100)),
("email", models.EmailField(max_length=254)),
("join_list", models.BooleanField(default=False)),
],
options={
"abstract": False,
"ordering": ("-modified", "-created"),
"get_latest_by": "modified",
},
)
]
|
{
"content_hash": "5fe72078da42d5e985337075ce08aebf",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 74,
"avg_line_length": 32.52173913043478,
"alnum_prop": 0.43783422459893045,
"repo_name": "DemocracyClub/UK-Polling-Stations",
"id": "f1b6ed3f82d4b141b085c1e05d30bec00da2529e",
"size": "1520",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "polling_stations/apps/data_finder/migrations/0005_electionnotificationsignup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "32"
},
{
"name": "HTML",
"bytes": "85540"
},
{
"name": "JavaScript",
"bytes": "3399"
},
{
"name": "Procfile",
"bytes": "49"
},
{
"name": "Python",
"bytes": "1111337"
},
{
"name": "SCSS",
"bytes": "5742"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('checkout', '0013_auto_20180112_1421'),
]
operations = [
migrations.AlterField(
model_name='item',
name='title',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='checkout.Papel'),
),
]
|
{
"content_hash": "7647e74019cfcaa5e77f9f88f2418c06",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 102,
"avg_line_length": 24.263157894736842,
"alnum_prop": 0.631236442516269,
"repo_name": "CoutinhoElias/danibraz",
"id": "7ffe6ebd24d1e19a2a182563d01e704f7046ebe7",
"size": "534",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "danibraz/checkout/migrations/0014_auto_20180112_1431.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "321689"
},
{
"name": "HTML",
"bytes": "151507"
},
{
"name": "JavaScript",
"bytes": "1107690"
},
{
"name": "Python",
"bytes": "209840"
},
{
"name": "Shell",
"bytes": "4240"
}
],
"symlink_target": ""
}
|
import unittest
from subprocess import check_output
def run(json):
return check_output(["./trost", "-j", "jsons/%s" % json]).strip()
class Quantum_Test(unittest.TestCase):
def test_print(self):
self.assertEqual("नमस्ते, Hello, Привет, 您好, ہیلو", run("print.json"))
def test_iadd(self):
self.assertEqual("42", run("iadd.json"))
def test_fadd(self):
self.assertEqual("44.21", run("fadd.json"))
def test_mixed_add(self):
self.assertEqual("43.34", run("mixed_add.json"))
def test_scat(self):
self.assertEqual("Hello, World!", run("scat.json"))
def test_function(self):
self.assertEqual("17", run("function.json"))
def test_bool(self):
self.assertEqual("true", run("bool.json"))
def test_if_else(self):
self.assertEqual("lesser", run("if_else.json"))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "dca02c218ae2ac185e2937d26dcdb3a7",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 25.166666666666668,
"alnum_prop": 0.6070640176600441,
"repo_name": "trost-lang/quantum",
"id": "4e6769a588c0f338d275b938e3085749db890725",
"size": "957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_quantum.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "4348"
},
{
"name": "C",
"bytes": "2174886"
},
{
"name": "C++",
"bytes": "559747"
},
{
"name": "CMake",
"bytes": "9539"
},
{
"name": "GAP",
"bytes": "9146"
},
{
"name": "Groff",
"bytes": "5490"
},
{
"name": "HTML",
"bytes": "145927"
},
{
"name": "Makefile",
"bytes": "79708"
},
{
"name": "Python",
"bytes": "2582"
},
{
"name": "QMake",
"bytes": "1100"
},
{
"name": "Shell",
"bytes": "1627"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import unicodecsv as csv
from django.conf.urls import url
from django.contrib import messages
from django.forms.forms import pretty_name
from django.forms.models import inlineformset_factory, modelform_factory
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.html import format_html
from .forms import ImportCSVForm
class UploadCSVAdminMixin(object):
change_form_template = 'admin/inline_csv_importer/change_form.html'
def get_urls(self):
urls = super(UploadCSVAdminMixin, self).get_urls()
my_urls = [
url(
r'^(\d+)/import-inline-csv/$',
self.import_inline_csv,
name='import-inline-csv'
),
]
return my_urls + urls
def format_csv_inline(self):
""" Outputs formatted csv_inline. """
csv_inline = {}
for line in self.csv_inline:
csv_inline['name'] = self.csv_inline[0][0]
csv_inline.update(self.csv_inline[0][1])
return csv_inline
def do_checks(self):
"""
Do some checks to make sure that defined tupe or lists is in the right format.
"""
message = None
if not hasattr(self, 'csv_inline'):
message = format_html(
'Please define <b>csv_inline</b> if you want import from csv.'
)
elif not isinstance(self.csv_inline[0], (list, tuple)):
message = format_html(
'{}.csv_inline must be list or tuple.'.format(self.__class__.__name__)
)
elif len(self.csv_inline) > 1:
message = format_html(
'{}.csv_inline can\'t be more than one set.'.format(self.__class__.__name__)
)
elif not self.csv_inline[0][1].get('inline'):
message = format_html(
'{}.csv_inline please define <b>inline</b>.'.format(self.__class__.__name__)
)
return message
def get_inline_model_form(self):
""" Build model form for inline model. """
return modelform_factory(
model=self.pretty_csv_inline['inline'].model,
fields=self.pretty_csv_inline['fields']
)
def build_formset(self, model_form, extra=0):
""" Build formset. """
formset = inlineformset_factory(
parent_model=self.model,
model=self.pretty_csv_inline['inline'].model,
form=model_form,
extra=extra,
)
return formset
def import_inline_csv(self, request, obj_id):
form = None
formset = None
initial_data = []
headers = []
# Do checks on defined csv_inline fieldset.
message = self.do_checks()
if message:
messages.error(request, message)
return HttpResponseRedirect('../')
self.pretty_csv_inline = self.format_csv_inline()
opts = {
'verbose_name': self.model._meta.verbose_name,
'verbose_name_plural': self.model._meta.verbose_name_plural,
'app_label': self.model._meta.app_label,
'object_name': self.model._meta.model_name,
}
confirmed = request.POST.get('confirmed', False)
if request.method == 'POST':
# Build inline formset.
model_form = self.get_inline_model_form()
if request.FILES.get('csv_file'):
csv_file = request.FILES['csv_file']
csv_file = csv.reader(csv_file)
# Skip headers
next(csv_file, None)
# Make headers pretty.
headers = map(pretty_name, self.pretty_csv_inline['fields'])
for row in csv_file:
# Zip values from csv row to defined fields in csv_inline
zipped_data = dict(zip(self.pretty_csv_inline['fields'], row))
initial_data.append(zipped_data)
# Build formset.
formset = self.build_formset(model_form, extra=len(initial_data))
formset = formset(initial=initial_data)
else:
formset = self.build_formset(model_form)
formset = formset(request.POST)
if formset.is_valid():
obj = self.get_object(request, obj_id)
formset.instance = obj
formset.save()
messages.success(request, 'Imported successfully.')
return HttpResponseRedirect('../')
else:
form = ImportCSVForm()
if self.pretty_csv_inline.get('help_text'):
form['csv_file'].help_text = self.pretty_csv_inline['help_text']
return render_to_response(
'admin/inline_csv_importer/inline_csv_importer.html',
{
'title': 'Import data',
'root_path': 'admin',
'app_label': opts['app_label'],
'opts': opts,
'form': form,
'confirmed': confirmed,
'formset': formset,
'headers': headers,
'initial_data': initial_data,
},
RequestContext(request)
)
|
{
"content_hash": "9eca05811a07c8f7c54b98e856e73f43",
"timestamp": "",
"source": "github",
"line_count": 161,
"max_line_length": 92,
"avg_line_length": 33.45962732919255,
"alnum_prop": 0.5418600334137739,
"repo_name": "zatan/django-inline-csv-importer",
"id": "04e944e81ffe6e37550e9779afdfc951547eaeaa",
"size": "5411",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inline_csv_importer/mixins.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "2942"
},
{
"name": "Python",
"bytes": "7077"
}
],
"symlink_target": ""
}
|
import exceptions as exc
import functools
import pecan
from oslo_log import log as logging
from pecan import rest
from nca47.common.i18n import _
LOG = logging.getLogger(__name__)
def expose(function):
"""
Packaging pecan RestController expose method. Resolving WSGi request body.
"""
@pecan.expose('json')
@functools.wraps(function)
def decorated_function(self, *args, **kwargs):
func = functools.partial(function, self, pecan.request)
try:
func = func(*args, **kwargs)
except Exception:
pecan.response.status = 500
return {"ret_code": 500, "ret_msg": "Bad Method Request"}
return func
return decorated_function
class BaseRestController(rest.RestController):
"""
A base class implement pecan RestController.
"""
@property
def response(self):
return pecan.response
@pecan.expose('json')
@expose
def post(self, req, *args, **kwargs):
LOG.debug(_('args: %(args)s, kwargs: %(kwargs)s'),
{"args": args, "kwargs": kwargs})
"""
operation = args[0]
req = pecan.request
if operation == 'add':
return self.create(req, *args, **kwargs)
elif operation == 'del':
return self.remove(req, *args, **kwargs)
elif operation == 'upd':
return self.update(req, *args, **kwargs)
elif operation == 'get':
return self.show(req, *args, **kwargs)
elif operation == 'getall':
return self.list(req, *args, **kwargs)
elif operation == 'addif':
return self.addif(req, *args, **kwargs)
elif operation == 'delif':
return self.delif(req, *args, **kwargs)
else:
pecan.abort(404)
"""
try:
operation = args[0]
req = pecan.request
if operation == 'addif':
return self.addif(req, *args, **kwargs)
elif operation == 'delif':
return self.delif(req, *args, **kwargs)
except Exception as e:
pass
return self.create(req, *args, **kwargs)
@expose
def put(self, req, id, *args, **kwargs):
LOG.debug(_('id: %(id)s, args: %(args)s, kwargs: %(kwargs)s'),
{"id": id, "args": args, "kwargs": kwargs})
return self.update(req, id, *args, **kwargs)
@expose
def delete(self, req, id, *args, **kwargs):
LOG.debug(_('id: %(id)s, args: %(args)s, kwargs: %(kwargs)s'),
{"id": id, "args": args, "kwargs": kwargs})
return self.remove(req, id, *args, **kwargs)
@expose
def get_all(self, req, *args, **kwargs):
LOG.debug(_('args: %(args)s, kwargs: %(kwargs)s'),
{"args": args, "kwargs": kwargs})
return self.list(req, *args, **kwargs)
@expose
def get_one(self, req, id, *args, **kwargs):
LOG.debug(_('id: %(id)s, args: %(args)s, kwargs: %(kwargs)s'),
{"id": id, "args": args, "kwargs": kwargs})
return self.show(req, id, *args, **kwargs)
def create(self, req, *args, **kwargs):
raise exc.NotImplementedError
def update(self, req, id, *args, **kwargs):
raise exc.NotImplementedError
def remove(self, req, id, *args, **kwargs):
raise exc.NotImplementedError
def list(self, req, *args, **kwargs):
raise exc.NotImplementedError
def show(self, req, id, *args, **kwargs):
raise exc.NotImplementedError
|
{
"content_hash": "808aa09f2333e832da7dfb0bceb4ac94",
"timestamp": "",
"source": "github",
"line_count": 112,
"max_line_length": 78,
"avg_line_length": 32.705357142857146,
"alnum_prop": 0.5350805350805351,
"repo_name": "WosunOO/nca_xianshu",
"id": "aad2d8eca3c1e49c0133185974d76f27634c50af",
"size": "3663",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nca47/api/controllers/v1/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "723807"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
urlpatterns = patterns('',
)
|
{
"content_hash": "6c2e079224ad78d5425f9847aa76605f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 51,
"avg_line_length": 20.25,
"alnum_prop": 0.7530864197530864,
"repo_name": "hfercc/mese2014",
"id": "6736aeccd43cd9925ba052c655b1f9cb824876e7",
"size": "81",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "securities/funds/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "103122"
},
{
"name": "JavaScript",
"bytes": "1054910"
},
{
"name": "Python",
"bytes": "1121791"
},
{
"name": "Shell",
"bytes": "2381"
}
],
"symlink_target": ""
}
|
import importlib
import types
from js9 import j
class SourceLoader:
"""
holds the logic of building the code of a service actions
"""
def __init__(self, service):
self._module = None
self._source = None
self.service = service
def _load(self):
"""
load all action module
"""
path = self._source_path()
loader = importlib.machinery.SourceFileLoader(self.service.model.key, path)
self._module = types.ModuleType(loader.name)
loader.exec_module(self._module)
@property
def source(self):
"""
rebuild source code
"""
if self._source is None:
actions_model = []
for model in self.service.model.actions.values():
actions_model.append(j.core.jobcontroller.db.actions.get(model.actionKey))
self._source = "from js9 import j\n"
for model in actions_model:
if model.imports == '':
continue
source += '{}\n'.format('\n'.join(model.imports))
tmpl = """
def {name}({args}):
{code}
"""
for model in actions_model:
tmpl = j.data.text.strip(tmpl)
code = model.dbobj.code
code = j.data.text.indent(code, 4)
self._source += tmpl.format(
name=model.dbobj.name,
args=model.argsText,
code=code
)
return self._source
def _source_path(self):
"""
write code into a file
"""
path = j.sal.fs.joinPaths(
j.dirs.TMPDIR,
"actions",
self.service.model.dbobj.actorName,
self.service.model.key + ".py"
)
j.sal.fs.createDir(j.sal.fs.getParent(path))
j.sal.fs.writeFile(path, self.source)
return path
def get_method(self, name):
"""
get a specific method from the action module
"""
if self._module is None:
self._load()
return getattr(self._module, name)
|
{
"content_hash": "b7bcf0b5b419808a57459413d14a474d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 90,
"avg_line_length": 27.76923076923077,
"alnum_prop": 0.5069252077562327,
"repo_name": "Jumpscale/ays9",
"id": "8abb25f3ee4e83eb906665af5d1041b9aaa5cc43",
"size": "2166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "JumpScale9AYS/jobcontroller/SourceLoader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "235840"
},
{
"name": "Cap'n Proto",
"bytes": "20377"
},
{
"name": "HTML",
"bytes": "1974"
},
{
"name": "JavaScript",
"bytes": "4324209"
},
{
"name": "Python",
"bytes": "691623"
},
{
"name": "RAML",
"bytes": "3933753"
},
{
"name": "Shell",
"bytes": "3824"
}
],
"symlink_target": ""
}
|
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "intown.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
{
"content_hash": "ef89594b8e27047073229e2edda5ce59",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 70,
"avg_line_length": 25.22222222222222,
"alnum_prop": 0.7092511013215859,
"repo_name": "brainless/intown",
"id": "084c8c0a23528d25e3312e1adc3b4f2d6e0d8566",
"size": "249",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intown/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "854"
},
{
"name": "HTML",
"bytes": "11608"
},
{
"name": "Python",
"bytes": "16086"
}
],
"symlink_target": ""
}
|
from sentry.testutils import TestCase
from sentry.utils.data_scrubber import SensitiveDataFilter
VARS = {
'foo': 'bar',
'password': 'hello',
'the_secret': 'hello',
'a_password_here': 'hello',
'api_key': 'secret_key',
'apiKey': 'secret_key',
}
class SensitiveDataFilterTest(TestCase):
def _check_vars_sanitized(self, vars, proc):
"""
Helper to check that keys have been sanitized.
"""
self.assertTrue('foo' in vars)
self.assertEquals(vars['foo'], 'bar')
self.assertTrue('password' in vars)
self.assertEquals(vars['password'], proc.MASK)
self.assertTrue('the_secret' in vars)
self.assertEquals(vars['the_secret'], proc.MASK)
self.assertTrue('a_password_here' in vars)
self.assertEquals(vars['a_password_here'], proc.MASK)
self.assertTrue('api_key' in vars)
self.assertEquals(vars['api_key'], proc.MASK)
self.assertTrue('apiKey' in vars)
self.assertEquals(vars['apiKey'], proc.MASK)
def test_stacktrace(self):
data = {
'sentry.interfaces.Stacktrace': {
'frames': [{'vars': VARS}],
}
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('sentry.interfaces.Stacktrace' in data)
stack = data['sentry.interfaces.Stacktrace']
self.assertTrue('frames' in stack)
self.assertEquals(len(stack['frames']), 1)
frame = stack['frames'][0]
self.assertTrue('vars' in frame)
self._check_vars_sanitized(frame['vars'], proc)
def test_http(self):
data = {
'sentry.interfaces.Http': {
'data': VARS,
'env': VARS,
'headers': VARS,
'cookies': VARS,
}
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('sentry.interfaces.Http' in data)
http = data['sentry.interfaces.Http']
for n in ('data', 'env', 'headers', 'cookies'):
self.assertTrue(n in http)
self._check_vars_sanitized(http[n], proc)
def test_extra(self):
data = {
'extra': VARS
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('extra' in data)
self._check_vars_sanitized(data['extra'], proc)
def test_querystring_as_string(self):
data = {
'sentry.interfaces.Http': {
'query_string': 'foo=bar&password=hello&the_secret=hello'
'&a_password_here=hello&api_key=secret_key',
}
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('sentry.interfaces.Http' in data)
http = data['sentry.interfaces.Http']
self.assertEquals(
http['query_string'],
'foo=bar&password=%(m)s&the_secret=%(m)s'
'&a_password_here=%(m)s&api_key=%(m)s' % dict(m=proc.MASK))
def test_querystring_as_string_with_partials(self):
data = {
'sentry.interfaces.Http': {
'query_string': 'foo=bar&password&baz=bar',
}
}
proc = SensitiveDataFilter()
proc.apply(data)
self.assertTrue('sentry.interfaces.Http' in data)
http = data['sentry.interfaces.Http']
self.assertEquals(http['query_string'], 'foo=bar&password&baz=bar' % dict(m=proc.MASK))
def test_sanitize_additional_sensitive_fields(self):
additional_sensitive_dict = {
'fieldy_field': 'value',
'moar_other_field': 'another value'
}
data = {
'extra': dict(VARS.items() + additional_sensitive_dict.items())
}
proc = SensitiveDataFilter(additional_sensitive_dict.keys())
proc.apply(data)
for field in additional_sensitive_dict.keys():
self.assertEquals(data['extra'][field], proc.MASK)
self._check_vars_sanitized(data['extra'], proc)
def test_sanitize_credit_card(self):
proc = SensitiveDataFilter()
result = proc.sanitize('foo', '4242424242424242')
self.assertEquals(result, proc.MASK)
def test_sanitize_credit_card_amex(self):
# AMEX numbers are 15 digits, not 16
proc = SensitiveDataFilter()
result = proc.sanitize('foo', '424242424242424')
self.assertEquals(result, proc.MASK)
def test_sanitize_credit_card_within_value(self):
proc = SensitiveDataFilter()
result = proc.sanitize('foo', "'4242424242424242'")
self.assertEquals(result, proc.MASK)
proc = SensitiveDataFilter()
result = proc.sanitize('foo', "foo 4242424242424242")
self.assertEquals(result, proc.MASK)
def test_sanitize_url(self):
proc = SensitiveDataFilter()
result = proc.sanitize('foo', 'pg://matt:pass@localhost/1')
self.assertEquals(result, 'pg://matt:%s@localhost/1' % proc.MASK)
# Make sure we don't mess up any other url.
# This url specifically if passed through urlunsplit(urlsplit()),
# it'll change the value.
result = proc.sanitize('foo', 'postgres:///path')
self.assertEquals(result, 'postgres:///path')
def test_sanitize_http_body(self):
data = {
'sentry.interfaces.Http': {
'data': '{"email":"zzzz@gmail.com","password":"zzzzz"}',
},
}
proc = SensitiveDataFilter()
result = proc.apply(data)
self.assertTrue('sentry.interfaces.Http' in data)
http = data['sentry.interfaces.Http']
self.assertEquals(http['data'], proc.MASK)
def test_does_not_fail_on_non_string(self):
data = {
'extra': {
'foo': 1,
},
}
proc = SensitiveDataFilter()
result = proc.apply(data)
self.assertEquals(data['extra'], {'foo': 1})
|
{
"content_hash": "a3b7f91c7efa85da42e26addac97c865",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 95,
"avg_line_length": 32.66120218579235,
"alnum_prop": 0.5710222519658692,
"repo_name": "korealerts1/sentry",
"id": "2f0cae307753c898343d68f18946bea5f3cd96b2",
"size": "6002",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/sentry/utils/test_data_scrubber.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "160813"
},
{
"name": "HTML",
"bytes": "193921"
},
{
"name": "JavaScript",
"bytes": "415670"
},
{
"name": "Makefile",
"bytes": "2832"
},
{
"name": "Python",
"bytes": "6777474"
}
],
"symlink_target": ""
}
|
"""
Tests for projectq.backends._circuits._plot.py.
To generate the baseline images,
run the tests with '--mpl-generate-path=baseline'
Then run the tests simply with '--mpl'
"""
from copy import deepcopy
import pytest
import projectq.backends._circuits._plot as _plot
# ==============================================================================
class PseudoCanvas:
def __init__(self):
pass
def draw(self):
pass
def get_renderer(self):
return
class PseudoFigure:
def __init__(self):
self.canvas = PseudoCanvas()
self.dpi = 1
class PseudoBBox:
def __init__(self, width, height):
self.width = width
self.height = height
class PseudoText:
def __init__(self, text):
self.text = text
self.figure = PseudoFigure()
def get_window_extent(self, *args):
return PseudoBBox(len(self.text), 1)
def remove(self):
pass
class PseudoTransform:
def __init__(self):
pass
def inverted(self):
return self
def transform_bbox(self, bbox):
return bbox
class PseudoAxes:
def __init__(self):
self.figure = PseudoFigure()
self.transData = PseudoTransform()
def add_patch(self, x):
return x
def text(self, x, y, text, *args, **kwargse):
return PseudoText(text)
# ==============================================================================
@pytest.fixture(scope="module")
def plot_params():
params = deepcopy(_plot._DEFAULT_PLOT_PARAMS)
params.update([('units_per_inch', 1)])
return params
@pytest.fixture
def axes():
return PseudoAxes()
# ==============================================================================
@pytest.mark.parametrize('gate_str', ['X', 'Swap', 'Measure', 'Y', 'Rz(1.00)'])
def test_gate_width(axes, gate_str, plot_params):
width = _plot.gate_width(axes, gate_str, plot_params)
if gate_str == 'X':
assert width == 2 * plot_params['not_radius'] / plot_params['units_per_inch']
elif gate_str == 'Swap':
assert width == 2 * plot_params['swap_delta'] / plot_params['units_per_inch']
elif gate_str == 'Measure':
assert width == plot_params['mgate_width']
else:
assert width == len(gate_str) + 2 * plot_params['gate_offset']
def test_calculate_gate_grid(axes, plot_params):
qubit_lines = {0: [('X', [0], []), ('X', [0], []), ('X', [0], []), ('X', [0], [])]}
gate_grid = _plot.calculate_gate_grid(axes, qubit_lines, plot_params)
assert len(gate_grid) == 5
assert gate_grid[0] > plot_params['labels_margin']
width = [gate_grid[i + 1] - gate_grid[i] for i in range(4)]
# Column grid is given by:
# |---*---|---*---|---*---|---*---|
# |-- w --|-- w --|-- w --|.5w|
column_spacing = plot_params['column_spacing']
ref_width = _plot.gate_width(axes, 'X', plot_params)
for w in width[:-1]:
assert ref_width + column_spacing == pytest.approx(w)
assert 0.5 * ref_width + column_spacing == pytest.approx(width[-1])
def test_create_figure(plot_params):
fig, axes = _plot.create_figure(plot_params)
def test_draw_single_gate(axes, plot_params):
with pytest.raises(RuntimeError):
_plot.draw_gate(axes, 'MyGate', 2, [0, 0, 0], [0, 1, 3], [], plot_params)
_plot.draw_gate(axes, 'MyGate', 2, [0, 0, 0], [0, 1, 2], [], plot_params)
def test_draw_simple(plot_params):
qubit_lines = {
0: [
('X', [0], []),
('Z', [0], []),
('Z', [0], [1]),
('Swap', [0, 1], []),
('Measure', [0], []),
],
1: [None, None, None, None, None],
}
fig, axes = _plot.to_draw(qubit_lines)
units_per_inch = plot_params['units_per_inch']
not_radius = plot_params['not_radius']
control_radius = plot_params['control_radius']
swap_delta = plot_params['swap_delta']
wire_height = plot_params['wire_height'] * units_per_inch
mgate_width = plot_params['mgate_width']
labels = []
text_gates = []
measure_gates = []
for text in axes.texts:
if text.get_text() == '$|0\\rangle$':
labels.append(text)
elif text.get_text() == ' ':
measure_gates.append(text)
else:
text_gates.append(text)
assert all(label.get_position()[0] == pytest.approx(plot_params['x_offset']) for label in labels)
assert abs(labels[1].get_position()[1] - labels[0].get_position()[1]) == pytest.approx(wire_height)
# X gate
x_gate = [obj for obj in axes.collections if obj.get_label() == 'NOT'][0]
# find the filled circles
assert x_gate.get_paths()[0].get_extents().width == pytest.approx(2 * not_radius)
assert x_gate.get_paths()[0].get_extents().height == pytest.approx(2 * not_radius)
# find the vertical bar
x_vertical = x_gate.get_paths()[1]
assert len(x_vertical) == 2
assert x_vertical.get_extents().width == 0.0
assert x_vertical.get_extents().height == pytest.approx(2 * plot_params['not_radius'])
# Z gate
assert len(text_gates) == 1
assert text_gates[0].get_text() == 'Z'
assert text_gates[0].get_position()[1] == pytest.approx(2 * wire_height)
# CZ gate
cz_gate = [obj for obj in axes.collections if obj.get_label() == 'CZ'][0]
# find the filled circles
for control in cz_gate.get_paths()[:-1]:
assert control.get_extents().width == pytest.approx(2 * control_radius)
assert control.get_extents().height == pytest.approx(2 * control_radius)
# find the vertical bar
cz_vertical = cz_gate.get_paths()[-1]
assert len(cz_vertical) == 2
assert cz_vertical.get_extents().width == 0.0
assert cz_vertical.get_extents().height == pytest.approx(wire_height)
# Swap gate
swap_gate = [obj for obj in axes.collections if obj.get_label() == 'SWAP'][0]
# find the filled circles
for qubit in swap_gate.get_paths()[:-1]:
assert qubit.get_extents().width == pytest.approx(2 * swap_delta)
assert qubit.get_extents().height == pytest.approx(2 * swap_delta)
# find the vertical bar
swap_vertical = swap_gate.get_paths()[-1]
assert len(swap_vertical) == 2
assert swap_vertical.get_extents().width == 0.0
assert swap_vertical.get_extents().height == pytest.approx(wire_height)
# Measure gate
measure_gate = [obj for obj in axes.collections if obj.get_label() == 'Measure'][0]
assert measure_gate.get_paths()[0].get_extents().width == pytest.approx(mgate_width)
assert measure_gate.get_paths()[0].get_extents().height == pytest.approx(0.9 * mgate_width)
def test_draw_advanced(plot_params):
qubit_lines = {0: [('X', [0], []), ('Measure', [0], [])], 1: [None, None]}
with pytest.raises(RuntimeError):
_plot.to_draw(qubit_lines, qubit_labels={1: 'qb1', 2: 'qb2'})
with pytest.raises(RuntimeError):
_plot.to_draw(qubit_lines, drawing_order={0: 0, 1: 2})
with pytest.raises(RuntimeError):
_plot.to_draw(qubit_lines, drawing_order={1: 1, 2: 0})
# --------------------------------------------------------------------------
_, axes = _plot.to_draw(qubit_lines)
for text in axes.texts:
assert text.get_text() == r'$|0\rangle$'
# NB numbering of wire starts from bottom.
_, axes = _plot.to_draw(qubit_lines, qubit_labels={0: 'qb0', 1: 'qb1'}, drawing_order={0: 0, 1: 1})
assert [axes.texts[qubit_id].get_text() for qubit_id in range(2)] == ['qb0', 'qb1']
positions = [axes.texts[qubit_id].get_position() for qubit_id in range(2)]
assert positions[1][1] > positions[0][1]
_, axes = _plot.to_draw(qubit_lines, qubit_labels={0: 'qb2', 1: 'qb3'}, drawing_order={0: 1, 1: 0})
assert [axes.texts[qubit_id].get_text() for qubit_id in range(2)] == ['qb2', 'qb3']
positions = [axes.texts[qubit_id].get_position() for qubit_id in range(2)]
assert positions[1][1] < positions[0][1]
|
{
"content_hash": "ad80606a72ed76c3225eda945a0eac45",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 103,
"avg_line_length": 32.17741935483871,
"alnum_prop": 0.5715538847117795,
"repo_name": "ProjectQ-Framework/ProjectQ",
"id": "3e81d23d1b346f147885ebc3211f5489ef27f0ef",
"size": "8600",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "projectq/backends/_circuits/_plot_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "158833"
},
{
"name": "Python",
"bytes": "1483141"
}
],
"symlink_target": ""
}
|
import webkitpy.thirdparty.unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.workspace import Workspace
from webkitpy.common.system.executive_mock import MockExecutive
class WorkspaceTest(unittest.TestCase):
def test_find_unused_filename(self):
filesystem = MockFileSystem({
"dir/foo.jpg": "",
"dir/foo-1.jpg": "",
"dir/foo-2.jpg": "",
})
workspace = Workspace(filesystem, None)
self.assertEqual(workspace.find_unused_filename("bar", "bar", "bar"), "bar/bar.bar")
self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=1), None)
self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg", search_limit=2), None)
self.assertEqual(workspace.find_unused_filename("dir", "foo", "jpg"), "dir/foo-3.jpg")
def test_create_zip(self):
workspace = Workspace(None, MockExecutive(should_log=True))
expected_logs = "MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path\n"
class MockZipFile(object):
def __init__(self, path):
self.filename = path
archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs)
self.assertEqual(archive.filename, "/zip/path")
def test_create_zip_exception(self):
workspace = Workspace(None, MockExecutive(should_log=True, should_throw=True))
expected_logs = """MOCK run_command: ['zip', '-9', '-r', '/zip/path', '.'], cwd=/source/path
Workspace.create_zip failed in /source/path:
MOCK ScriptError
output: MOCK output of child process
"""
class MockZipFile(object):
def __init__(self, path):
self.filename = path
archive = OutputCapture().assert_outputs(self, workspace.create_zip, ["/zip/path", "/source/path", MockZipFile], expected_logs=expected_logs)
self.assertIsNone(archive)
|
{
"content_hash": "91806286672ab45f38482fe5703e02a0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 149,
"avg_line_length": 47.97727272727273,
"alnum_prop": 0.6575082899099952,
"repo_name": "lordmos/blink",
"id": "cc0de52fac6f24440fae578b6ed57cdc5ee00524",
"size": "3638",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Tools/Scripts/webkitpy/common/system/workspace_unittest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "6433"
},
{
"name": "C",
"bytes": "753714"
},
{
"name": "C++",
"bytes": "40028043"
},
{
"name": "CSS",
"bytes": "539440"
},
{
"name": "F#",
"bytes": "8755"
},
{
"name": "Java",
"bytes": "18650"
},
{
"name": "JavaScript",
"bytes": "25700387"
},
{
"name": "Objective-C",
"bytes": "426711"
},
{
"name": "PHP",
"bytes": "141755"
},
{
"name": "Perl",
"bytes": "901523"
},
{
"name": "Python",
"bytes": "3748305"
},
{
"name": "Ruby",
"bytes": "141818"
},
{
"name": "Shell",
"bytes": "9635"
},
{
"name": "XSLT",
"bytes": "49328"
}
],
"symlink_target": ""
}
|
import io
import os
import setuptools
# Package metadata.
name = 'google-cloud-pubsub'
description = 'Google Cloud Pub/Sub API client library'
version = '0.39.0'
# Should be one of:
# 'Development Status :: 3 - Alpha'
# 'Development Status :: 4 - Beta'
# 'Development Status :: 5 - Production/Stable'
release_status = 'Development Status :: 4 - Beta'
dependencies = [
'google-api-core[grpc] >= 1.6.0, < 2.0.0dev',
'grpc-google-iam-v1 >= 0.11.4, < 0.12dev',
'enum34; python_version < "3.4"',
]
extras = {
}
# Setup boilerplate below this line.
package_root = os.path.abspath(os.path.dirname(__file__))
readme_filename = os.path.join(package_root, 'README.rst')
with io.open(readme_filename, encoding='utf-8') as readme_file:
readme = readme_file.read()
# Only include packages under the 'google' namespace. Do not include tests,
# benchmarks, etc.
packages = [
package for package in setuptools.find_packages()
if package.startswith('google')]
# Determine which namespaces are needed.
namespaces = ['google']
if 'google.cloud' in packages:
namespaces.append('google.cloud')
setuptools.setup(
name=name,
version=version,
description=description,
long_description=readme,
author='Google LLC',
author_email='googleapis-packages@google.com',
license='Apache 2.0',
url='https://github.com/GoogleCloudPlatform/google-cloud-python',
classifiers=[
release_status,
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'Topic :: Internet',
],
platforms='Posix; MacOS X; Windows',
packages=packages,
namespace_packages=namespaces,
install_requires=dependencies,
extras_require=extras,
include_package_data=True,
zip_safe=False,
)
|
{
"content_hash": "24d58a70a9563d450a21aa6f32a9931d",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 75,
"avg_line_length": 28.57894736842105,
"alnum_prop": 0.6597605893186004,
"repo_name": "dhermes/google-cloud-python",
"id": "14d78aa1a1ed9b729de2dd61a7d3cf1ef7629d5c",
"size": "2747",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pubsub/setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "936"
},
{
"name": "Makefile",
"bytes": "1779"
},
{
"name": "Python",
"bytes": "13118304"
},
{
"name": "Shell",
"bytes": "8606"
}
],
"symlink_target": ""
}
|
"""
Sales templatetags docstring
"""
|
{
"content_hash": "552fb37e61a80ba3199cd27ebb068b97",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 29,
"avg_line_length": 12.333333333333334,
"alnum_prop": 0.7027027027027027,
"repo_name": "alejo8591/maker",
"id": "62a3be44542927db920152611b2a8137de46bedd",
"size": "89",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sales/templatetags/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1578070"
},
{
"name": "Perl",
"bytes": "164"
},
{
"name": "Python",
"bytes": "2863599"
},
{
"name": "Shell",
"bytes": "3561"
}
],
"symlink_target": ""
}
|
from django.test import TestCase
from django.urls import reverse
from django.contrib.auth.models import User
from netdevice.models import router, network_os, interface, logical_interface, vrf
from bgp.models import aut_num
def create_router(network_os_name):
nos = network_os.objects.get(name=network_os_name)
local_aut_num = aut_num.objects.create(asn=65000, name='Test ASN')
test_router = router.objects.create(routing_id='1.1.1.1',
hostname='test-router',
ibgp=True,
network_os=nos,
local_aut_num=local_aut_num,
)
return test_router
def create_interface(test_router):
test_interface = interface.objects.create(router=test_router,
name='ge-0/0/0',
description="A test description.",
mtu=9000,
dot1q=True,
)
test_logical_interface = logical_interface.objects.create(interface=test_interface,
name='10',
description="A logical test description.",
mtu=4170,
vlan=10,
physical_interface=False,
ldp=True,
inet_dhcp_client=False,
inet6_dhcp_client=False,
)
return test_logical_interface
class NetdeviceViewTests(TestCase):
def setUp(self):
self.client.force_login(User.objects.get_or_create(username='testuser')[0])
def test_vrf_list_view_with_one_vrf(self):
"""
Create one VRF, then check the vrf_list view and template.
"""
test_vrf = vrf.objects.create(name='Test VRF', target='target:65000:65000')
response = self.client.get(reverse('netdevice:vrf_list'))
self.assertEqual(response.status_code, 200)
def test_vrf_list_view_with_100_vrf(self):
"""
Create 100 VRF, then check the vrf_list view.
"""
test_vrfs = []
for i in range(00, 99):
target_str = 'target:650' + str(i) + ':65000'
test_vrfs.append(vrf.objects.create(name='Test VRF #' + str(i), target=target_str))
response = self.client.get(reverse('netdevice:vrf_list'))
self.assertEqual(response.status_code, 200)
def test_vrf_detail_view(self):
"""
Create one VRF, then check the detail view.
"""
test_vrf = vrf.objects.create(name='Test VRF', target='target:65000:65000')
response = self.client.get(reverse('netdevice:vrf_detail', kwargs={'vrf_id': test_vrf.id}))
self.assertEqual(response.status_code, 200)
def test_vrf_edit_view(self):
"""
Create one VRF, then check the edit view.
"""
test_vrf = vrf.objects.create(name='Test VRF', target='target:65000:65000')
response = self.client.get(reverse('netdevice:vrf_edit', kwargs={'vrf_id': test_vrf.id}))
self.assertEqual(response.status_code, 200)
def test_config_view_with_ios_router(self):
"""
Create an IOS router, interface, and IP addresses, then check the configuration template output.
"""
test_router = create_router('ios')
response = self.client.get(reverse('netdevice:router_config', kwargs={'router_id': test_router.id}))
self.assertEqual(response.status_code, 200)
def test_config_view_with_junos_router(self):
"""
Create a JunOS router, interface, and IP addresses, then check the configuration template output.
"""
test_router = create_router('junos')
response = self.client.get(reverse('netdevice:router_config', kwargs={'router_id': test_router.id}))
self.assertEqual(response.status_code, 200)
def test_create_interface_form_view(self):
"""
Create a router, then check the create interface form view is displayed correctly.
"""
test_router = create_router('junos')
response = self.client.get(reverse('netdevice:interface_create', kwargs={'router_id': test_router.id}))
self.assertEqual(response.status_code, 200)
def test_edit_interface_form_view(self):
"""
Create a router, create an interface, then check the edit interface form view is displayed correctly.
"""
test_router = create_router('junos')
test_logical_interface = create_interface(test_router)
response = self.client.get(reverse('netdevice:interface_edit', kwargs={'interface_id': test_logical_interface.interface.id}))
self.assertEqual(response.status_code, 200)
def test_create_logical_interface_form_view(self):
"""
Create a router, then check the create interface form view is displayed correctly.
"""
test_router = create_router('junos')
test_logical_interface = create_interface(test_router)
response = self.client.get(reverse('netdevice:logical_interface_create', kwargs={'interface_id': test_logical_interface.interface.id}))
self.assertEqual(response.status_code, 200)
def test_edit_logical_interface_form_view(self):
"""
Create a router, create an interface, then check the edit interface form view is displayed correctly.
"""
test_router = create_router('junos')
test_logical_interface = create_interface(test_router)
response = self.client.get(reverse('netdevice:logical_interface_edit', kwargs={'logical_interface_id': test_logical_interface.id}))
self.assertEqual(response.status_code, 200)
|
{
"content_hash": "6cc8db8f5d18e3bfa54b4105db232bc5",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 158,
"avg_line_length": 45.65492957746479,
"alnum_prop": 0.5384852691655098,
"repo_name": "lkmhaqer/gtools-python",
"id": "32effd21a962fc3e1aeb183b8b5b413ec3803bdd",
"size": "6511",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "netdevice/tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2543"
},
{
"name": "HTML",
"bytes": "24530"
},
{
"name": "Python",
"bytes": "123398"
}
],
"symlink_target": ""
}
|
import sys, os
import datetime
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'myproject'
copyright = u'%d, myauthor' % datetime.date.today().year
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'myprojectdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'myproject.tex', u'myproject Documentation',
u'myauthor', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
|
{
"content_hash": "938b39ba5963ba7ce6338625d7185b72",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 80,
"avg_line_length": 32.61538461538461,
"alnum_prop": 0.710411051212938,
"repo_name": "sittizen/django-fullcalendar",
"id": "73921f13471bc750cf4b375dec2c21482604b0d3",
"size": "6356",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "342538"
},
{
"name": "JavaScript",
"bytes": "421916"
},
{
"name": "Python",
"bytes": "63853"
},
{
"name": "Ruby",
"bytes": "1193"
},
{
"name": "Shell",
"bytes": "7500"
}
],
"symlink_target": ""
}
|
"""A class to store named variables and a scope operator to manage sharing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import six
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import logging
__all__ = ["VariableScope", "get_variable_scope", "get_variable",
"variable_scope", "variable_op_scope", "no_regularizer"]
class _VariableStore(object):
"""Variable store that carries a number of named Variables.
New variable names and new variables can be created; all stored
variables are initialized with the initializer passed to __init__.
Attributes:
vars: a dictionary with string names (same as passed in GetVar) as keys
and the corresponding TensorFlow Variables as values.
"""
def __init__(self):
"""Create a variable store."""
self._vars = {} # A dictionary of the stored TensorFlow variables.
def get_variable(self, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None, reuse=None,
trainable=True, collections=None, caching_device=None):
"""Gets an existing variable with these parameters or create a new one.
If a variable with the given name is already stored, we return the stored
variable. Otherwise, we create a new one.
Set `reuse` to `True` when you only want to reuse existing Variables.
Set `reuse` to `False` when you only want to create new Variables.
If `reuse` is `None` (the default), both new and existing variables are
returned.
If initializer is `None` (the default), the default initializer passed in
the constructor is used. If that one is `None` too, we use a new
`UniformUnitScalingInitializer`. If initializer is a Tensor, we use
it as a value and derive the shape from the initializer.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
reuse: a Boolean or `None`. Controls reuse or creation of variables.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.VARIABLES]` (see tf.Variable).
caching_device: Optional device string or function describing where the
Variable should be cached for reading. Defaults to the Variable's
device. If not `None`, caches on another device. Typical use is to
cache on the device where the Ops using the Variable reside, to
deduplicate copying through `Switch` and other conditional statements.
Returns:
The created or existing variable.
Raises:
ValueError: when creating a new variable and shape is not declared,
when reusing a variable and specifying a conflicting shape,
or when violating reuse during variable creation.
"""
# Set to true if initializer is a constant.
initializing_from_value = False
if initializer is not None and isinstance(initializer, ops.Tensor):
initializing_from_value = True
if shape is not None and initializing_from_value:
raise ValueError("If initializer is a constant, do not specify shape.")
should_check = reuse is not None
dtype = dtypes.as_dtype(dtype)
shape = tensor_shape.as_shape(shape)
if name in self._vars:
# Here we handle the case when returning an existing variable.
if should_check and not reuse:
raise ValueError("Variable %s already exists, disallowed."
" Did you mean to set reuse=True in VarScope?" % name)
found_var = self._vars[name]
if not shape.is_compatible_with(found_var.get_shape()):
raise ValueError("Trying to share variable %s, but specified shape %s"
" and found shape %s." % (name, shape,
found_var.get_shape()))
if not dtype.is_compatible_with(found_var.dtype):
dtype_str = dtype.name
found_type_str = found_var.dtype.name
raise ValueError("Trying to share variable %s, but specified dtype %s"
" and found dtype %s." % (name, dtype_str,
found_type_str))
return found_var
# The code below handles only the case of creating a new variable.
if should_check and reuse:
raise ValueError("Variable %s does not exist, disallowed."
" Did you mean to set reuse=None in VarScope?" % name)
if not shape.is_fully_defined() and not initializing_from_value:
raise ValueError("Shape of a new variable (%s) must be fully defined, "
"but instead was %s." % (name, shape))
# Create the tensor to initialize the variable.
if initializer is None:
initializer = init_ops.uniform_unit_scaling_initializer()
# Clear control dependencies while creating the initializer.
with ops.control_dependencies(None):
if initializing_from_value:
init_val = initializer
else:
with ops.name_scope(name + "/Initializer/"):
init_val = initializer(shape.as_list(), dtype=dtype)
# Create the variable.
v = variables.Variable(init_val, name=name, trainable=trainable,
collections=collections,
caching_device=caching_device)
self._vars[name] = v
logging.info("Created variable %s with shape %s and init %s", v.name,
format(shape), initializer)
# Run the regularizer if requested and save the resulting loss.
if regularizer:
with ops.name_scope(name + "/Regularizer/"):
loss = regularizer(v)
if loss:
logging.info("Applied regularizer to %s and added the result %s to "
"REGULARIZATION_LOSSES.", v.name, loss.name)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, loss)
return v
# To stop regularization, use this regularizer
def no_regularizer(_):
"""Use this function to prevent regularization of variables."""
return None
class VariableScope(object):
"""Variable scope object to carry defaults to provide to get_variable.
Many of the arguments we need for get_variable in a variable store are most
easily handled with a context. This object is used for the defaults.
Attributes:
name: name of the current scope, used as prefix in get_variable.
initializer: default initializer passed to get_variable.
regularizer: default regularizer passed to get_variable.
reuse: Boolean or None, setting the reuse in get_variable.
caching_device: string, callable, or None: the caching device passed to
get_variable.
name_scope: The name passed to tf.name_scope.
"""
def __init__(self, reuse, name="", initializer=None, regularizer=None,
caching_device=None, name_scope=""):
"""Creates a new VariableScope with the given properties."""
self._name = name
self._initializer = initializer
self._regularizer = regularizer
self._reuse = reuse
self._caching_device = caching_device
self._name_scope = name_scope
@property
def name(self):
return self._name
@property
def reuse(self):
return self._reuse
@property
def initializer(self):
return self._initializer
@property
def regularizer(self):
return self._regularizer
@property
def caching_device(self):
return self._caching_device
def reuse_variables(self):
"""Reuse variables in this scope."""
self._reuse = True
def set_initializer(self, initializer):
"""Set initializer for this scope."""
self._initializer = initializer
def set_regularizer(self, regularizer):
"""Set regularizer for this scope."""
self._regularizer = regularizer
def set_caching_device(self, caching_device):
"""Set caching_device for this scope."""
self._caching_device = caching_device
def get_variable(self, var_store, name, shape=None, dtype=dtypes.float32,
initializer=None, regularizer=None,
trainable=True, collections=None, caching_device=None):
"""Gets an existing variable with this name or create a new one."""
if initializer is None:
initializer = self._initializer
if regularizer is None:
regularizer = self._regularizer
if caching_device is None:
caching_device = self._caching_device
full_name = self.name + "/" + name if self.name else name
# Variable names only depend on variable_scope (full_name here),
# not name_scope, so we reset it below for the time of variable creation.
with ops.name_scope(None):
return var_store.get_variable(
full_name, shape=shape, dtype=dtype, initializer=initializer,
regularizer=regularizer, reuse=self.reuse, trainable=trainable,
collections=collections, caching_device=caching_device)
_VARSTORE_KEY = ("__variable_store",)
_VARSCOPE_KEY = ("__varscope",)
def get_variable_scope():
"""Returns the current variable scope."""
scope = ops.get_collection(_VARSCOPE_KEY)
if scope: # This collection has at most 1 element, the default scope at [0].
return scope[0]
scope = VariableScope(False)
ops.add_to_collection(_VARSCOPE_KEY, scope)
return scope
def _get_default_variable_store():
store = ops.get_collection(_VARSTORE_KEY)
if store:
return store[0]
store = _VariableStore()
ops.add_to_collection(_VARSTORE_KEY, store)
return store
def get_variable(name, shape=None, dtype=dtypes.float32, initializer=None,
regularizer=None, trainable=True,
collections=None):
"""Gets an existing variable with these parameters or create a new one.
This function prefixes the name with the current variable scope
and performs reuse checks. See the
[Variable Scope How To](../../how_tos/variable_scope/index.md)
for an extensive description of how reusing works. Here is a basic example:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1]) # v.name == "foo/v:0"
w = tf.get_variable("w", [1]) # w.name == "foo/w:0"
with tf.variable_scope("foo", reuse=True)
v1 = tf.get_variable("v") # The same as v above.
```
If initializer is `None` (the default), the default initializer passed in
the variable scope will be used. If that one is `None` too, a
`UniformUnitScalingInitializer` will be used. The initializer can also be
a Tensor, in which case the variable is initialized to this value and shape.
Similarly, if the regularizer is `None` (the default), the default regularizer
passed in the variable scope will be used (if that is `None` too,
then by default no regularization is performed).
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
collections: List of graph collections keys to add the Variable to.
Defaults to `[GraphKeys.VARIABLES]` (see tf.Variable).
Returns:
The created or existing variable.
Raises:
ValueError: when creating a new variable and shape is not declared,
or when violating reuse during variable creation. Reuse is set inside
`variable_scope`.
"""
return get_variable_scope().get_variable(
_get_default_variable_store(), name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer, trainable=trainable,
collections=collections)
@contextlib.contextmanager
def _pure_variable_scope(name_or_scope, reuse=None, initializer=None,
regularizer=None, caching_device=None):
"""Creates a context for the variable_scope, see `variable_scope` for docs.
Note: this does not create a name scope.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or `None`; if `True`, we go into reuse mode for this scope as
well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
Yields:
A scope that can be to captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
get_variable_scope() # Ensure that a default exists, then get a pointer.
default_varscope = ops.get_collection(_VARSCOPE_KEY)
try:
old = default_varscope[0]
reuse = reuse or old.reuse # Re-using is inherited by sub-scopes.
if isinstance(name_or_scope, VariableScope):
name_scope = name_or_scope._name_scope # pylint: disable=protected-access
# Handler for the case when we jump to a shared scope.
# We create a new VariableScope (default_varscope[0]) that contains
# a copy of the provided shared scope, possibly with changed reuse
# and initializer, if the user requested this.
default_varscope[0] = VariableScope(
reuse, name=name_or_scope.name,
initializer=name_or_scope.initializer,
regularizer=name_or_scope.regularizer,
caching_device=name_or_scope.caching_device,
name_scope=name_scope)
if initializer is not None:
default_varscope[0].set_initializer(initializer)
if regularizer is not None:
default_varscope[0].set_regularizer(regularizer)
if caching_device is not None:
default_varscope[0].set_caching_device(caching_device)
yield default_varscope[0]
else:
# Handler for the case when we just prolong current variable scope.
# VariableScope with name extended by the provided one, and inherited
# reuse and initializer (except if the user provided values to set).
new_name = old.name + "/" + name_or_scope if old.name else name_or_scope
default_varscope[0] = VariableScope(
reuse, name=new_name,
initializer=old.initializer,
regularizer=old.regularizer,
caching_device=old.caching_device,
name_scope=name_or_scope)
if initializer is not None:
default_varscope[0].set_initializer(initializer)
if regularizer is not None:
default_varscope[0].set_regularizer(regularizer)
if caching_device is not None:
default_varscope[0].set_caching_device(caching_device)
yield default_varscope[0]
finally:
default_varscope[0] = old
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def variable_scope(name_or_scope, reuse=None, initializer=None,
regularizer=None, caching_device=None):
"""Returns a context for variable scope.
Variable scope allows to create new variables and to share already created
ones while providing checks to not create or share by accident. For details,
see the [Variable Scope How To](../../how_tos/variable_scope/index.md),
here we present only a few basic examples.
Simple example of how to create a new variable:
```python
with tf.variable_scope("foo"):
with tf.variable_scope("bar"):
v = tf.get_variable("v", [1])
assert v.name == "foo/bar/v:0"
```
Basic example of sharing a variable:
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
with tf.variable_scope("foo", reuse=True):
v1 = tf.get_variable("v", [1])
assert v1 == v
```
Sharing a variable by capturing a scope and setting reuse:
```python
with tf.variable_scope("foo") as scope:
v = tf.get_variable("v", [1])
scope.reuse_variables()
v1 = tf.get_variable("v", [1])
assert v1 == v
```
To prevent accidental sharing of variables, we raise an exception when
getting an existing variable in a non-reusing scope.
```python
with tf.variable_scope("foo"):
v = tf.get_variable("v", [1])
v1 = tf.get_variable("v", [1])
# Raises ValueError("... v already exists ...").
```
Similarly, we raise an exception when trying to get a variable that
does not exist in reuse mode.
```python
with tf.variable_scope("foo", reuse=True):
v = tf.get_variable("v", [1])
# Raises ValueError("... v does not exists ...").
```
Note that the `reuse` flag is inherited: if we open a reusing scope,
then all its sub-scopes become reusing as well.
Args:
name_or_scope: `string` or `VariableScope`: the scope to open.
reuse: `True` or `None`; if `True`, we go into reuse mode for this scope as
well as all sub-scopes; if `None`, we just inherit the parent scope reuse.
initializer: default initializer for variables within this scope.
regularizer: default regularizer for variables within this scope.
caching_device: default caching device for variables within this scope.
Returns:
A scope that can be to captured and reused.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
if not isinstance(name_or_scope, (VariableScope,) + six.string_types):
raise TypeError("VariableScope: name_scope must be a string or "
"VariableScope.")
if isinstance(name_or_scope, six.string_types):
name = name_or_scope
else:
name = name_or_scope._name_scope # pylint: disable=protected-access
if name:
with ops.name_scope(name), _pure_variable_scope(
name_or_scope, reuse=reuse, initializer=initializer,
regularizer=regularizer, caching_device=caching_device) as vs:
yield vs
else:
# This can only happen if someone is entering the root variable scope.
with _pure_variable_scope(
name_or_scope, reuse=reuse, initializer=initializer,
regularizer=regularizer, caching_device=caching_device) as vs:
yield vs
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def variable_op_scope(values, name, default_name, initializer=None,
regularizer=None, caching_device=None):
"""Returns a context manager for defining an op that creates variables.
This context manager validates that the given `values` are from the
same graph, ensures that that graph is the default graph, and pushes a
name scope and a variable scope.
If `name` is not None, it is used as is in the variable scope. If `name`
is None, then `default_name` is used. In that case, if the same name has been
previously used in the same scope, it will made unique be appending `_N` to
it.
This is intended to be used when defining generic ops and so reuse is always
inherited.
For example, to define a new Python op called `my_op_with_vars`:
```python
def my_op_with_vars(a, b, name=None):
with tf.variable_op_scope([a, b], name, "MyOp") as scope:
a = tf.convert_to_tensor(a, name="a")
b = tf.convert_to_tensor(b, name="b")
c = tf.get_variable('c')
# Define some computation that uses `a`, `b`, and `c`.
return foo_op(..., name=scope)
```
Args:
values: The list of `Tensor` arguments that are passed to the op function.
name: The name argument that is passed to the op function, this name is not
uniquified in the variable scope.
default_name: The default name to use if the `name` argument is `None`, this
name will be uniquified.
initializer: The default initializer to pass to variable scope.
regularizer: The default regularizer for variables within this scope.
caching_device: The default caching device for variables within this scope.
Returns:
A context manager for use in defining a Python op.
Raises:
ValueError: when trying to reuse within a create scope, or create within
a reuse scope, or if reuse is not `None` or `True`.
TypeError: when the types of some arguments are not appropriate.
"""
if default_name is None:
raise TypeError("default_name cannot be None")
g = ops._get_graph_from_inputs(values) # pylint: disable=protected-access
with g.as_default():
if name:
with variable_scope(name, initializer=initializer,
regularizer=regularizer,
caching_device=caching_device) as vs:
yield vs
else:
with ops.name_scope(default_name) as scope:
count = len(default_name.split("/"))
scoped_name = "/".join(scope.split("/")[-count - 1:-1])
with _pure_variable_scope(
scoped_name, initializer=initializer,
regularizer=regularizer, caching_device=caching_device) as vs:
yield vs
|
{
"content_hash": "3f6c2f889fda559f9b82cab9f8e7687c",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 80,
"avg_line_length": 39.998178506375226,
"alnum_prop": 0.6796302199553714,
"repo_name": "4Quant/tensorflow",
"id": "7ffdd37212d67a64bdf8de21ab703c038522a0e4",
"size": "22637",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/variable_scope.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "151542"
},
{
"name": "C++",
"bytes": "7157767"
},
{
"name": "CMake",
"bytes": "29325"
},
{
"name": "CSS",
"bytes": "107"
},
{
"name": "HTML",
"bytes": "658145"
},
{
"name": "Java",
"bytes": "50361"
},
{
"name": "JavaScript",
"bytes": "16175"
},
{
"name": "Jupyter Notebook",
"bytes": "795383"
},
{
"name": "Objective-C",
"bytes": "1288"
},
{
"name": "Protocol Buffer",
"bytes": "100797"
},
{
"name": "Python",
"bytes": "4006375"
},
{
"name": "Shell",
"bytes": "75494"
},
{
"name": "TypeScript",
"bytes": "329838"
}
],
"symlink_target": ""
}
|
import fileinput
import pdb
import sys
import traceback
from itertools import permutations
def solve(paths):
cities = set()
distances = dict()
for start, end, distance in paths:
cities.add(start)
cities.add(end)
distances.setdefault(start, dict())[end] = distance
distances.setdefault(end, dict())[start] = distance
shortest = sys.maxsize
longest = -sys.maxsize
for items in permutations(cities):
dist = sum(map(lambda x, y: distances[x][y], items[:-1], items[1:]))
shortest = min(shortest, dist)
longest = max(longest, dist)
return (shortest, longest)
if __name__ == "__main__":
try:
lines = []
for line in fileinput.input():
parts = line.strip().split()
lines.append((parts[0], parts[2], int(parts[4])))
print(solve(lines))
except Exception:
traceback.print_exc()
pdb.post_mortem()
|
{
"content_hash": "ce511c9edada1089e66cf334a02cde04",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 76,
"avg_line_length": 26.27777777777778,
"alnum_prop": 0.5972515856236786,
"repo_name": "BrendanLeber/adventofcode",
"id": "549b5cdaae6b43cccc4242c6e45c9c7e2c44b041",
"size": "993",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "2015/09-single_night/solve.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62564"
}
],
"symlink_target": ""
}
|
"""
URLConf for Django user profile management.
Recommended usage is to use a call to ``include()`` in your project's
root URLConf to include this URLConf for any URL beginning with
'/profiles/'.
If the default behavior of the profile views is acceptable to you,
simply use a line like this in your root URLConf to set up the default
URLs for profiles::
(r'^profiles/', include('profiles.urls')),
But if you'd like to customize the behavior (e.g., by passing extra
arguments to the various views) or split up the URLs, feel free to set
up your own URL patterns for these views instead. If you do, it's a
good idea to keep the name ``profiles_profile_detail`` for the pattern
which points to the ``profile_detail`` view, since several views use
``reverse()`` with that name to generate a default post-submission
redirect. If you don't use that name, remember to explicitly pass
``success_url`` to those views.
"""
from django.conf.urls.defaults import patterns, url
from profiles import views
urlpatterns = patterns('',
url(r'^create/$',
views.create_profile,
name='profiles_create_profile'),
url(r'^edit/$',
views.edit_profile,
name='profiles_edit_profile'),
url(r'^(?P<username>[\w.@+-]+)/$',
views.profile_detail,
name='profiles_profile_detail'),
url(r'^$',
views.profile_list,
name='profiles_profile_list'),
)
|
{
"content_hash": "6ce4495c17e251e6a1b2f79784d41e4a",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 70,
"avg_line_length": 38.53488372093023,
"alnum_prop": 0.5896197948098975,
"repo_name": "saebyn/django-profiles",
"id": "9f560f39847e0f54fe981566552fe2d3210baeaa",
"size": "1657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "profiles/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "16083"
}
],
"symlink_target": ""
}
|
from uuid import UUID
from org.apache.qpid.proton.engine import EndpointState, TransportException, Sasl, SslDomain
from org.apache.qpid.proton.engine.impl import ConnectionImpl, SessionImpl, \
SenderImpl, ReceiverImpl, TransportImpl
from org.apache.qpid.proton.engine.impl.ssl import SslDomainImpl, SslPeerDetailsImpl
from org.apache.qpid.proton.message import MessageFormat
from org.apache.qpid.proton.message.impl import MessageImpl
from org.apache.qpid.proton.messenger import MessengerException, Status
from org.apache.qpid.proton.messenger.impl import MessengerImpl
from org.apache.qpid.proton.amqp.messaging import Source, Target, Accepted, AmqpValue
from org.apache.qpid.proton.amqp import UnsignedInteger
from jarray import zeros
from java.util import EnumSet, UUID as JUUID
LANGUAGE = "Java"
class Skipped(Exception):
skipped = True
PN_SESSION_WINDOW = TransportImpl.SESSION_WINDOW
PENDING = "PENDING"
ACCEPTED = "ACCEPTED"
REJECTED = "REJECTED"
STATUSES = {
Status.ACCEPTED: ACCEPTED,
Status.REJECTED: REJECTED,
Status.PENDING: PENDING,
Status.UNKNOWN: None
}
MANUAL = "MANUAL"
AUTOMATIC = "AUTOMATIC"
class Endpoint(object):
LOCAL_UNINIT = 1
LOCAL_ACTIVE = 2
LOCAL_CLOSED = 4
REMOTE_UNINIT = 8
REMOTE_ACTIVE = 16
REMOTE_CLOSED = 32
def __init__(self):
self.condition = None
@property
def remote_condition(self):
raise Skipped()
@property
def state(self):
local = self.impl.getLocalState()
remote = self.impl.getRemoteState()
result = 0
if (local == EndpointState.UNINITIALIZED):
result = result | self.LOCAL_UNINIT
elif (local == EndpointState.ACTIVE):
result = result | self.LOCAL_ACTIVE
elif (local == EndpointState.CLOSED):
result = result | self.LOCAL_CLOSED
if (remote == EndpointState.UNINITIALIZED):
result = result | self.REMOTE_UNINIT
elif (remote == EndpointState.ACTIVE):
result = result | self.REMOTE_ACTIVE
elif (remote == EndpointState.CLOSED):
result = result | self.REMOTE_CLOSED
return result
def _enums(self, mask):
local = []
if (self.LOCAL_UNINIT | mask):
local.append(EndpointState.UNINITIALIZED)
if (self.LOCAL_ACTIVE | mask):
local.append(EndpointState.ACTIVE)
if (self.LOCAL_CLOSED | mask):
local.append(EndpointState.CLOSED)
remote = []
if (self.REMOTE_UNINIT | mask):
remote.append(EndpointState.UNINITIALIZED)
if (self.REMOTE_ACTIVE | mask):
remote.append(EndpointState.ACTIVE)
if (self.REMOTE_CLOSED | mask):
remote.append(EndpointState.CLOSED)
return EnumSet.of(*local), EnumSet.of(*remote)
def open(self):
self.impl.open()
def close(self):
self.impl.close()
class Condition:
def __init__(self, name, description=None, info=None):
self.name = name
self.description = description
self.info = info
def __repr__(self):
return "Condition(%s)" % ", ".join([repr(x) for x in
(self.name, self.description, self.info)
if x])
def __eq__(self, o):
if not isinstance(o, Condition): return False
return self.name == o.name and \
self.description == o.description and \
self.info == o.info
def wrap_connection(impl):
if impl: return Connection(_impl = impl)
class Connection(Endpoint):
def __init__(self, _impl=None):
self.impl = _impl or ConnectionImpl()
@property
def writable(self):
raise Skipped()
def session(self):
return wrap_session(self.impl.session())
def session_head(self, mask):
return wrap_session(self.impl.sessionHead(*self._enums(mask)))
def link_head(self, mask):
return wrap_link(self.impl.linkHead(*self._enums(mask)))
@property
def work_head(self):
return wrap_delivery(self.impl.getWorkHead())
def _get_container(self):
return self.impl.getContainer()
def _set_container(self, container):
self.impl.setContainer(container)
container = property(_get_container, _set_container)
def _get_hostname(self):
return self.impl.getHostname()
def _set_hostname(self, hostname):
self.impl.setHostname(hostname)
hostname = property(_get_hostname, _set_hostname)
def _get_remote_container(self):
return self.impl.getRemoteContainer()
def _set_remote_container(self, container):
self.impl.setRemoteContainer(container)
remote_container = property(_get_remote_container, _set_remote_container)
def _get_remote_hostname(self):
return self.impl.getRemoteHostname()
def _set_remote_hostname(self, hostname):
self.impl.setRemoteHostname(hostname)
remote_hostname = property(_get_remote_hostname, _set_remote_hostname)
@property
def offered_capabilities(self):
return DataDummy()
def wrap_session(impl):
# XXX
if impl: return Session(impl)
class Session(Endpoint):
def __init__(self, impl):
self.impl = impl
@property
def connection(self):
return wrap_connection(self.impl.getConnection())
def sender(self, name):
return wrap_link(self.impl.sender(name))
def receiver(self, name):
return wrap_link(self.impl.receiver(name))
def wrap_link(impl):
if impl is None: return None
elif isinstance(impl, SenderImpl):
return Sender(impl)
elif isinstance(impl, ReceiverImpl):
return Receiver(impl)
else:
raise Exception("unknown type")
class Link(Endpoint):
def __init__(self, impl):
self.impl = impl
@property
def source(self):
if self.impl.getSource() is None:
self.impl.setSource(Source())
return Terminus(self.impl.getSource())
@property
def target(self):
if self.impl.getTarget() is None:
self.impl.setTarget(Target())
return Terminus(self.impl.getTarget())
@property
def remote_source(self):
return Terminus(self.impl.getRemoteSource())
@property
def remote_target(self):
return Terminus(self.impl.getRemoteTarget())
@property
def session(self):
return wrap_session(self.impl.getSession())
def delivery(self, tag):
return wrap_delivery(self.impl.delivery(tag))
@property
def current(self):
return wrap_delivery(self.impl.current())
def advance(self):
return self.impl.advance()
@property
def unsettled(self):
return self.impl.getUnsettled()
@property
def credit(self):
return self.impl.getCredit()
@property
def available(self):
raise Skipped()
@property
def queued(self):
return self.impl.getQueued()
def next(self, mask):
return wrap_link(self.impl.next(*self._enums(mask)))
class DataDummy:
def format(self):
pass
def put_array(self, *args, **kwargs):
raise Skipped()
class Terminus(object):
UNSPECIFIED = None
def __init__(self, impl):
self.impl = impl
self.type = None
self.timeout = None
self.durability = None
self.expiry_policy = None
self.dynamic = None
self.properties = DataDummy()
self.outcomes = DataDummy()
self.filter = DataDummy()
self.capabilities = DataDummy()
def _get_address(self):
return self.impl.getAddress()
def _set_address(self, address):
self.impl.setAddress(address)
address = property(_get_address, _set_address)
def _get_timeout(self):
return self.impl.getTimeout()
def _set_timeout(self, t):
if t is not None:
t = UnsignedInteger(t)
return self.impl.setTimeout(t)
timeout = property(_get_timeout, _set_timeout)
def copy(self, src):
self.address = src.address
class Sender(Link):
def offered(self, n):
raise Skipped()
def send(self, bytes):
return self.impl.send(bytes, 0, len(bytes))
def drained(self):
self.impl.drained()
class Receiver(Link):
def flow(self, n):
self.impl.flow(n)
def drain(self, n):
self.impl.drain(n)
def recv(self, size):
output = zeros(size, "b")
n = self.impl.recv(output, 0, size)
if n >= 0:
return output.tostring()[:n]
elif n == TransportImpl.END_OF_STREAM:
return None
else:
raise Exception(n)
def wrap_delivery(impl):
if impl: return Delivery(impl)
class Delivery(object):
RECEIVED = 1
ACCEPTED = 2
REJECTED = 3
RELEASED = 4
MODIFIED = 5
def __init__(self, impl):
self.impl = impl
@property
def tag(self):
return self.impl.getTag().tostring()
@property
def writable(self):
return self.impl.isWritable()
@property
def readable(self):
return self.impl.isReadable()
@property
def updated(self):
return self.impl.isUpdated()
def update(self, disp):
if disp == self.ACCEPTED:
self.impl.disposition(Accepted.getInstance())
else:
raise Exception("xxx: %s" % disp)
@property
def remote_state(self):
rd = self.impl.getRemoteState()
if(rd == Accepted.getInstance()):
return self.ACCEPTED
else:
raise Exception("xxx: %s" % rd)
@property
def local_state(self):
ld = self.impl.getLocalState()
if(ld == Accepted.getInstance()):
return self.ACCEPTED
else:
raise Exception("xxx: %s" % ld)
def settle(self):
self.impl.settle()
@property
def settled(self):
return self.impl.remotelySettled()
@property
def work_next(self):
return wrap_delivery(self.impl.getWorkNext())
class Transport(object):
TRACE_OFF = 0
TRACE_RAW = 1
TRACE_FRM = 2
TRACE_DRV = 4
def __init__(self):
self.impl = TransportImpl()
def trace(self, mask):
# XXX: self.impl.trace(mask)
pass
def bind(self, connection):
self.impl.bind(connection.impl)
def output(self, size):
""" has the transport produce up to size bytes returning what was
produced to the caller"""
output = zeros(size, "b")
n = self.impl.output(output, 0, size)
if n >= 0:
return output.tostring()[:n]
elif n == TransportImpl.END_OF_STREAM:
return None
else:
raise Exception("XXX: %s" % n)
def input(self, bytes):
return self.impl.input(bytes, 0, len(bytes))
def _get_max_frame_size(self):
#return pn_transport_get_max_frame(self._trans)
raise Skipped()
def _set_max_frame_size(self, value):
#pn_transport_set_max_frame(self._trans, value)
raise Skipped()
max_frame_size = property(_get_max_frame_size, _set_max_frame_size,
doc="""
Sets the maximum size for received frames (in bytes).
""")
@property
def remote_max_frame_size(self):
#return pn_transport_get_remote_max_frame(self._trans)
raise Skipped()
# AMQP 1.0 idle-time-out
def _get_idle_timeout(self):
#return pn_transport_get_idle_timeout(self._trans)
raise Skipped()
def _set_idle_timeout(self, value):
#pn_transport_set_idle_timeout(self._trans, value)
raise Skipped()
idle_timeout = property(_get_idle_timeout, _set_idle_timeout,
doc="""
The idle timeout of the connection (in milliseconds).
""")
@property
def remote_idle_timeout(self):
#return pn_transport_get_remote_idle_timeout(self._trans)
raise Skipped()
@property
def frames_output(self):
#return pn_transport_get_frames_output(self._trans)
raise Skipped()
@property
def frames_input(self):
#return pn_transport_get_frames_input(self._trans)
raise Skipped()
class symbol(unicode):
def __repr__(self):
return "symbol(%s)" % unicode.__repr__(self)
class Data(object):
SYMBOL = None
def __init__(self, *args, **kwargs):
raise Skipped()
class Timeout(Exception):
pass
class Messenger(object):
def __init__(self, *args, **kwargs):
#comment out or remove line below to enable messenger tests
raise Skipped()
self.impl = MessengerImpl()
def start(self):
self.impl.start()
def stop(self):
self.impl.stop()
def subscribe(self, source):
self.impl.subscribe(source)
def put(self, message):
self.impl.put(message.impl)
return self.impl.outgoingTracker()
def send(self):
self.impl.send()
def recv(self, n):
self.impl.recv(n)
def get(self, message=None):
if message is None:
self.impl.get()
else:
message.impl = self.impl.get()
return self.impl.incomingTracker()
@property
def outgoing(self):
return self.impl.outgoing()
@property
def incoming(self):
return self.impl.incoming()
def _get_timeout(self):
return self.impl.getTimeout()
def _set_timeout(self, t):
self.impl.setTimeout(t)
timeout = property(_get_timeout, _set_timeout)
def accept(self, tracker=None):
if tracker is None:
tracker = self.impl.incomingTracker()
flags = self.impl.CUMULATIVE
else:
flags = 0
self.impl.accept(tracker, flags)
def reject(self, tracker=None):
if tracker is None:
tracker = self.impl.incomingTracker()
flags = self.impl.CUMULATIVE
else:
flags = 0
self.impl.reject(tracker, flags)
def settle(self, tracker=None):
if tracker is None:
tracker = self.impl.outgoingTracker()
flags = self.impl.CUMULATIVE
else:
flags = 0
self.impl.settle(tracker, flags)
def status(self, tracker):
return STATUSES[self.impl.getStatus(tracker)]
def _get_incoming_window(self):
return self.impl.getIncomingWindow()
def _set_incoming_window(self, window):
self.impl.setIncomingWindow(window)
incoming_window = property(_get_incoming_window, _set_incoming_window)
def _get_outgoing_window(self):
return self.impl.getOutgoingWindow()
def _set_outgoing_window(self, window):
self.impl.setOutgoingWindow(window)
outgoing_window = property(_get_outgoing_window, _set_outgoing_window)
class Message(object):
AMQP = MessageFormat.AMQP
TEXT = MessageFormat.TEXT
DATA = MessageFormat.DATA
JSON = MessageFormat.JSON
DEFAULT_PRIORITY = MessageImpl.DEFAULT_PRIORITY
def __init__(self):
self.impl = MessageImpl()
def clear(self):
self.impl.clear()
def save(self):
saved = self.impl.save()
if saved is None:
saved = ""
elif not isinstance(saved, unicode):
saved = saved.tostring()
return saved
def load(self, data):
self.impl.load(data)
def encode(self):
size = 1024
output = zeros(size, "b")
while True:
n = self.impl.encode(output, 0, size)
# XXX: need to check for overflow
if n > 0:
return output.tostring()[:n]
else:
raise Exception(n)
def decode(self, data):
self.impl.decode(data,0,len(data))
def _get_id(self):
id = self.impl.getMessageId()
if isinstance(id, JUUID):
id = UUID( id.toString() )
return id
def _set_id(self, value):
if isinstance(value, UUID):
value = JUUID.fromString( str(value) )
return self.impl.setMessageId(value)
id = property(_get_id, _set_id)
def _get_correlation_id(self):
id = self.impl.getCorrelationId()
if isinstance(id, JUUID):
id = UUID( id.toString() )
return id
def _set_correlation_id(self, value):
if isinstance(value, UUID):
value = JUUID.fromString( str(value) )
return self.impl.setCorrelationId(value)
correlation_id = property(_get_correlation_id, _set_correlation_id)
def _get_ttl(self):
return self.impl.getTtl()
def _set_ttl(self, ttl):
self.impl.setTtl(ttl)
ttl = property(_get_ttl, _set_ttl)
def _get_priority(self):
return self.impl.getPriority()
def _set_priority(self, priority):
self.impl.setPriority(priority)
priority = property(_get_priority, _set_priority)
def _get_address(self):
return self.impl.getAddress()
def _set_address(self, address):
self.impl.setAddress(address)
address = property(_get_address, _set_address)
def _get_subject(self):
return self.impl.getSubject()
def _set_subject(self, subject):
self.impl.setSubject(subject)
subject = property(_get_subject, _set_subject)
def _get_user_id(self):
u = self.impl.getUserId()
if u is None: return ""
else: return u.tostring()
def _set_user_id(self, user_id):
self.impl.setUserId(user_id)
user_id = property(_get_user_id, _set_user_id)
def _get_reply_to(self):
return self.impl.getReplyTo()
def _set_reply_to(self, reply_to):
self.impl.setReplyTo(reply_to)
reply_to = property(_get_reply_to, _set_reply_to)
def _get_reply_to_group_id(self):
return self.impl.getReplyToGroupId()
def _set_reply_to_group_id(self, reply_to_group_id):
self.impl.setReplyToGroupId(reply_to_group_id)
reply_to_group_id = property(_get_reply_to_group_id, _set_reply_to_group_id)
def _get_group_id(self):
return self.impl.getGroupId()
def _set_group_id(self, group_id):
self.impl.setGroupId(group_id)
group_id = property(_get_group_id, _set_group_id)
def _get_group_sequence(self):
return self.impl.getGroupSequence()
def _set_group_sequence(self, group_sequence):
self.impl.setGroupSequence(group_sequence)
group_sequence = property(_get_group_sequence, _set_group_sequence)
def _is_first_acquirer(self):
return self.impl.isFirstAcquirer()
def _set_first_acquirer(self, b):
self.impl.setFirstAcquirer(b)
first_acquirer = property(_is_first_acquirer, _set_first_acquirer)
def _get_expiry_time(self):
return self.impl.getExpiryTime()
def _set_expiry_time(self, expiry_time):
self.impl.setExpiryTime(expiry_time)
expiry_time = property(_get_expiry_time, _set_expiry_time)
def _is_durable(self):
return self.impl.isDurable()
def _set_durable(self, durable):
self.impl.setDurable(durable)
durable = property(_is_durable, _set_durable)
def _get_delivery_count(self):
return self.impl.getDeliveryCount()
def _set_delivery_count(self, delivery_count):
self.impl.setDeliveryCount(delivery_count)
delivery_count = property(_get_delivery_count, _set_delivery_count)
def _get_creation_time(self):
return self.impl.getCreationTime()
def _set_creation_time(self, creation_time):
self.impl.setCreationTime(creation_time)
creation_time = property(_get_creation_time, _set_creation_time)
def _get_content_type(self):
return self.impl.getContentType()
def _set_content_type(self, content_type):
self.impl.setContentType(content_type)
content_type = property(_get_content_type, _set_content_type)
def _get_content_encoding(self):
return self.impl.getContentEncoding()
def _set_content_encoding(self, content_encoding):
self.impl.setContentEncoding(content_encoding)
content_encoding = property(_get_content_encoding, _set_content_encoding)
def _get_format(self):
return self.impl.getFormat()
def _set_format(self, format):
self.impl.setMessageFormat(format)
format = property(_get_format, _set_format)
def _get_body(self):
body = self.impl.getBody()
if isinstance(body, AmqpValue):
return body.getValue()
else:
return body
def _set_body(self, body):
self.impl.setBody(AmqpValue(body))
body = property(_get_body, _set_body)
class SASL(object):
OK = Sasl.PN_SASL_OK
AUTH = Sasl.PN_SASL_AUTH
def __init__(self,transport):
self._sasl = transport.impl.sasl()
def mechanisms(self, mechanisms):
self._sasl.setMechanisms(mechanisms.split())
def client(self):
self._sasl.client()
def server(self):
self._sasl.server()
def send(self, data):
self._sasl.send(data, 0, len(data))
def recv(self):
size = 4096
output = zeros(size, "b")
n = self._sasl.recv(output, 0, size)
if n >= 0:
return output.tostring()[:n]
elif n == TransportImpl.END_OF_STREAM:
return None
else:
raise Exception(n)
def _get_outcome(self):
value = self._sasl.getOutcome()
if value == Sasl.PN_SASL_NONE:
return None
else:
return value
def _set_outcome(self, outcome):
self.impl.setOutcome(outcome)
outcome = property(_get_outcome, _set_outcome)
def done(self, outcome):
self._sasl.done(outcome)
def plain(self, user, password):
self._sasl.plain(user,password)
class SSLException(Exception):
pass
class SSLUnavailable(SSLException):
pass
class SSLDomain(object):
MODE_SERVER = SslDomain.Mode.SERVER
MODE_CLIENT = SslDomain.Mode.CLIENT
VERIFY_PEER = SslDomain.VerifyMode.VERIFY_PEER
ANONYMOUS_PEER = SslDomain.VerifyMode.ANONYMOUS_PEER
VERIFY_PEER_NAME = None # TBD
def __init__(self, mode):
self._domain = SslDomainImpl()
self._domain.init(mode)
def set_credentials(self, cert_file, key_file, password):
self._domain.setCredentials(cert_file, key_file, password)
def set_trusted_ca_db(self, certificate_db):
self._domain.setTrustedCaDb(certificate_db)
def set_peer_authentication(self, verify_mode, trusted_CAs=None):
self._domain.setPeerAuthentication(verify_mode)
if trusted_CAs is not None:
self._domain.setTrustedCaDb(trusted_CAs)
def allow_unsecured_client(self, allow_unsecured = True):
self._domain.allowUnsecuredClient(allow_unsecured)
class SSLSessionDetails(object):
def __init__(self, session_id):
self._session_details = SslPeerDetailsImpl(session_id, 1)
class SSL(object):
def __init__(self, transport, domain, session_details=None):
internal_session_details = None
if session_details:
internal_session_details = session_details._session_details
self._ssl = transport.impl.ssl(domain._domain, internal_session_details)
self._session_details = session_details
def get_session_details(self):
return self._session_details
def cipher_name(self):
return self._ssl.getCipherName()
def protocol_name(self):
return self._ssl.getProtocolName()
def _set_peer_hostname(self, hostname):
raise Skipped()
def _get_peer_hostname(self):
raise Skipped()
peer_hostname = property(_get_peer_hostname, _set_peer_hostname)
__all__ = [
"ACCEPTED",
"LANGUAGE",
"MANUAL",
"PENDING",
"REJECTED",
"PN_SESSION_WINDOW",
"Condition",
"Connection",
"Data",
"Delivery",
"Endpoint",
"Link",
"Message",
"MessageException",
"Messenger",
"MessengerException",
"ProtonException",
"Receiver",
"SASL",
"Sender",
"Session",
"SSL",
"SSLDomain",
"SSLException",
"SSLSessionDetails",
"SSLUnavailable",
"symbol",
"Terminus",
"Timeout",
"Transport",
"TransportException"]
|
{
"content_hash": "3da96f45c0e70072b7f7fa1646eeda1d",
"timestamp": "",
"source": "github",
"line_count": 894,
"max_line_length": 92,
"avg_line_length": 25.184563758389263,
"alnum_prop": 0.6643126804352654,
"repo_name": "chirino/proton",
"id": "6dabb9175d719eca096e62349afaaa38d6df6144",
"size": "23303",
"binary": false,
"copies": "1",
"ref": "refs/heads/trunk",
"path": "proton-j/proton/src/main/scripts/proton.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "502370"
},
{
"name": "Java",
"bytes": "1085703"
},
{
"name": "PHP",
"bytes": "40840"
},
{
"name": "Perl",
"bytes": "28661"
},
{
"name": "Python",
"bytes": "265196"
},
{
"name": "Ruby",
"bytes": "89013"
},
{
"name": "Shell",
"bytes": "8364"
}
],
"symlink_target": ""
}
|
from mediapp.settings.base import *
from aws.conf import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
USE_TZ = True
ALLOWED_HOSTS = ['localhost','127.0.0.1','167.99.0.50','app.mediteccali.com']
#STATIC_ROOT = 'staticfiles'
INSTALLED_APPS += (
'storages',
)
|
{
"content_hash": "13f09b73d7b767c35b35f6d5d62ebb55",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 77,
"avg_line_length": 18.9375,
"alnum_prop": 0.6897689768976898,
"repo_name": "andresmauro17/mediapp",
"id": "9dd1d0b1033ca0c789246f472943231a50cd6417",
"size": "303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/mediapp/settings/prod.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "605428"
},
{
"name": "HTML",
"bytes": "357312"
},
{
"name": "JavaScript",
"bytes": "575186"
},
{
"name": "Python",
"bytes": "335982"
}
],
"symlink_target": ""
}
|
import unittest
from tf import tf
VALID_CONFIG = {
'project_root': '/tmp'
}
class CommandsTest(unittest.TestCase):
def test_terraform_command_with_arguments(self):
wrapper = tf.TF('test', 'qa', tfargs=['-target', 'test', '-Xdestroy', '-target=./mymodule'], config=VALID_CONFIG)
wrapper.build_path()
cmd = wrapper.get_tf_command('plan')
self.assertSequenceEqual(cmd, [
'terraform',
'plan',
'-var-file={0}'.format(wrapper.tfstate_file_path),
'-target',
'test',
'-Xdestroy',
'-target=./mymodule',
wrapper.stack_path
])
def test_terraform_command_without_arguments(self):
wrapper = tf.TF('test', 'plan', tfargs=[], config=VALID_CONFIG)
wrapper.build_path()
cmd = wrapper.get_tf_command('plan')
self.assertSequenceEqual(cmd, [
'terraform',
'plan',
'-var-file={0}'.format(wrapper.tfstate_file_path),
wrapper.stack_path
])
|
{
"content_hash": "e5d4770c5c53348a7914a7ce02cf7140",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 117,
"avg_line_length": 26.11111111111111,
"alnum_prop": 0.6180851063829788,
"repo_name": "micahlmartin/terraflow",
"id": "76861a0efdf67954528e21d694e4447144bb8b5e",
"size": "940",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_commands.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12250"
},
{
"name": "Shell",
"bytes": "124"
}
],
"symlink_target": ""
}
|
import argparse
import collections
from doctest import SKIP
import multiprocessing
import os
import re
import subprocess
import sys
import tempfile
# find our home
ROOT = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), '../..'))
os.chdir(ROOT)
vendors = collections.defaultdict(list)
scores = collections.defaultdict(int)
avoidness = collections.defaultdict(int)
consumes = {}
no_update = set()
buildozer_commands = []
needs_codegen_base_src = set()
original_deps = {}
original_external_deps = {}
skip_headers = collections.defaultdict(set)
# TODO(ctiller): ideally we wouldn't hardcode a bunch of paths here.
# We can likely parse out BUILD files from dependencies to generate this index.
EXTERNAL_DEPS = {
'absl/algorithm/container.h':
'absl/algorithm:container',
'absl/base/attributes.h':
'absl/base:core_headers',
'absl/base/call_once.h':
'absl/base',
# TODO(ctiller) remove this
'absl/base/internal/endian.h':
'absl/base',
'absl/base/thread_annotations.h':
'absl/base:core_headers',
'absl/container/flat_hash_map.h':
'absl/container:flat_hash_map',
'absl/container/flat_hash_set.h':
'absl/container:flat_hash_set',
'absl/container/inlined_vector.h':
'absl/container:inlined_vector',
'absl/cleanup/cleanup.h':
'absl/cleanup',
'absl/debugging/failure_signal_handler.h':
'absl/debugging:failure_signal_handler',
'absl/debugging/stacktrace.h':
'absl/debugging:stacktrace',
'absl/debugging/symbolize.h':
'absl/debugging:symbolize',
'absl/flags/flag.h':
'absl/flags:flag',
'absl/flags/parse.h':
'absl/flags:parse',
'absl/functional/any_invocable.h':
'absl/functional:any_invocable',
'absl/functional/bind_front.h':
'absl/functional:bind_front',
'absl/functional/function_ref.h':
'absl/functional:function_ref',
'absl/hash/hash.h':
'absl/hash',
'absl/memory/memory.h':
'absl/memory',
'absl/meta/type_traits.h':
'absl/meta:type_traits',
'absl/random/random.h':
'absl/random',
'absl/status/status.h':
'absl/status',
'absl/status/statusor.h':
'absl/status:statusor',
'absl/strings/ascii.h':
'absl/strings',
'absl/strings/cord.h':
'absl/strings:cord',
'absl/strings/escaping.h':
'absl/strings',
'absl/strings/match.h':
'absl/strings',
'absl/strings/numbers.h':
'absl/strings',
'absl/strings/str_cat.h':
'absl/strings',
'absl/strings/str_format.h':
'absl/strings:str_format',
'absl/strings/str_join.h':
'absl/strings',
'absl/strings/str_replace.h':
'absl/strings',
'absl/strings/str_split.h':
'absl/strings',
'absl/strings/string_view.h':
'absl/strings',
'absl/strings/strip.h':
'absl/strings',
'absl/strings/substitute.h':
'absl/strings',
'absl/synchronization/mutex.h':
'absl/synchronization',
'absl/synchronization/notification.h':
'absl/synchronization',
'absl/time/clock.h':
'absl/time',
'absl/time/time.h':
'absl/time',
'absl/types/optional.h':
'absl/types:optional',
'absl/types/span.h':
'absl/types:span',
'absl/types/variant.h':
'absl/types:variant',
'absl/utility/utility.h':
'absl/utility',
'address_sorting/address_sorting.h':
'address_sorting',
'ares.h':
'cares',
'gmock/gmock.h':
'gtest',
'gtest/gtest.h':
'gtest',
'opencensus/trace/context_util.h':
'opencensus-trace-context_util',
'opencensus/trace/propagation/grpc_trace_bin.h':
'opencensus-trace-propagation',
'opencensus/tags/context_util.h':
'opencensus-tags-context_util',
'openssl/base.h':
'libssl',
'openssl/bio.h':
'libssl',
'openssl/bn.h':
'libcrypto',
'openssl/buffer.h':
'libcrypto',
'openssl/crypto.h':
'libcrypto',
'openssl/digest.h':
'libssl',
'openssl/engine.h':
'libcrypto',
'openssl/err.h':
'libcrypto',
'openssl/evp.h':
'libcrypto',
'openssl/hmac.h':
'libcrypto',
'openssl/pem.h':
'libcrypto',
'openssl/rsa.h':
'libcrypto',
'openssl/sha.h':
'libcrypto',
'openssl/ssl.h':
'libssl',
'openssl/tls1.h':
'libssl',
'openssl/x509.h':
'libcrypto',
'openssl/x509v3.h':
'libcrypto',
're2/re2.h':
're2',
'upb/arena.h':
'upb_lib',
'upb/def.h':
'upb_lib',
'upb/json_encode.h':
'upb_json_lib',
'upb/text_encode.h':
'upb_textformat_lib',
'upb/def.hpp':
'upb_reflection',
'upb/upb.h':
'upb_lib',
'upb/upb.hpp':
'upb_lib',
'xxhash.h':
'xxhash',
'zlib.h':
'madler_zlib',
}
INTERNAL_DEPS = {
'google/api/expr/v1alpha1/syntax.upb.h':
'google_type_expr_upb',
'google/rpc/status.upb.h':
'google_rpc_status_upb',
'google/protobuf/any.upb.h':
'protobuf_any_upb',
'google/protobuf/duration.upb.h':
'protobuf_duration_upb',
'google/protobuf/struct.upb.h':
'protobuf_struct_upb',
'google/protobuf/timestamp.upb.h':
'protobuf_timestamp_upb',
'google/protobuf/wrappers.upb.h':
'protobuf_wrappers_upb',
'grpc/status.h':
'grpc_public_hdrs',
'src/proto/grpc/channelz/channelz.grpc.pb.h':
'//src/proto/grpc/channelz:channelz_proto',
'src/proto/grpc/core/stats.pb.h':
'//src/proto/grpc/core:stats_proto',
'src/proto/grpc/health/v1/health.upb.h':
'grpc_health_upb',
'src/proto/grpc/lb/v1/load_reporter.grpc.pb.h':
'//src/proto/grpc/lb/v1:load_reporter_proto',
'src/proto/grpc/lb/v1/load_balancer.upb.h':
'grpc_lb_upb',
'src/proto/grpc/reflection/v1alpha/reflection.grpc.pb.h':
'//src/proto/grpc/reflection/v1alpha:reflection_proto',
'src/proto/grpc/gcp/transport_security_common.upb.h':
'alts_upb',
'src/proto/grpc/gcp/altscontext.upb.h':
'alts_upb',
'src/proto/grpc/lookup/v1/rls.upb.h':
'rls_upb',
'src/proto/grpc/lookup/v1/rls_config.upb.h':
'rls_config_upb',
'src/proto/grpc/lookup/v1/rls_config.upbdefs.h':
'rls_config_upbdefs',
'src/proto/grpc/testing/xds/v3/csds.grpc.pb.h':
'//src/proto/grpc/testing/xds/v3:csds_proto',
'xds/data/orca/v3/orca_load_report.upb.h':
'xds_orca_upb',
'xds/service/orca/v3/orca.upb.h':
'xds_orca_service_upb',
'xds/type/v3/typed_struct.upb.h':
'xds_type_upb',
}
class FakeSelects:
def config_setting_group(self, **kwargs):
pass
num_cc_libraries = 0
num_opted_out_cc_libraries = 0
parsing_path = None
def grpc_cc_library(name,
hdrs=[],
public_hdrs=[],
srcs=[],
select_deps=None,
tags=[],
deps=[],
external_deps=[],
proto=None,
**kwargs):
global args
global num_cc_libraries
global num_opted_out_cc_libraries
global parsing_path
assert (parsing_path is not None)
name = '//%s:%s' % (parsing_path, name)
num_cc_libraries += 1
if select_deps or 'nofixdeps' in tags:
if args.whats_left and not select_deps and 'nofixdeps' not in tags:
num_opted_out_cc_libraries += 1
print("Not opted in: {}".format(name))
no_update.add(name)
scores[name] = len(public_hdrs + hdrs)
# avoid_dep is the internal way of saying prefer something else
# we add grpc_avoid_dep to allow internal grpc-only stuff to avoid each
# other, whilst not biasing dependent projects
if 'avoid_dep' in tags or 'grpc_avoid_dep' in tags:
avoidness[name] += 10
if proto:
proto_hdr = '%s%s' % ((parsing_path + '/' if parsing_path else ''),
proto.replace('.proto', '.pb.h'))
skip_headers[name].add(proto_hdr)
for hdr in hdrs + public_hdrs:
filename = '%s%s' % ((parsing_path + '/' if parsing_path else ''), hdr)
vendors[filename].append(name)
inc = set()
original_deps[name] = frozenset(deps)
original_external_deps[name] = frozenset(external_deps)
for src in hdrs + public_hdrs + srcs:
filename = '%s%s' % ((parsing_path + '/' if parsing_path else ''), src)
for line in open(filename):
m = re.search(r'#include <(.*)>', line)
if m:
inc.add(m.group(1))
m = re.search(r'#include "(.*)"', line)
if m:
inc.add(m.group(1))
if 'grpc::g_glip' in line or 'grpc::g_core_codegen_interface' in line:
needs_codegen_base_src.add(name)
consumes[name] = list(inc)
def buildozer(cmd, target):
buildozer_commands.append('%s|%s' % (cmd, target))
def buildozer_set_list(name, values, target, via=""):
if not values:
buildozer('remove %s' % name, target)
return
adjust = via if via else name
buildozer('set %s %s' % (adjust, ' '.join('"%s"' % s for s in values)),
target)
if via:
buildozer('remove %s' % name, target)
buildozer('rename %s %s' % (via, name), target)
def score_edit_distance(proposed, existing):
"""Score a proposed change primarily by edit distance"""
sum = 0
for p in proposed:
if p not in existing:
sum += 1
for e in existing:
if e not in proposed:
sum += 1
return sum
def total_score(proposal):
return sum(scores[dep] for dep in proposal)
def total_avoidness(proposal):
return sum(avoidness[dep] for dep in proposal)
def score_list_size(proposed, existing):
"""Score a proposed change primarily by number of dependencies"""
return len(proposed)
def score_best(proposed, existing):
"""Score a proposed change primarily by dependency score"""
return 0
SCORERS = {
'edit_distance': score_edit_distance,
'list_size': score_list_size,
'best': score_best,
}
parser = argparse.ArgumentParser(description='Fix build dependencies')
parser.add_argument('targets',
nargs='*',
default=[],
help='targets to fix (empty => all)')
parser.add_argument('--score',
type=str,
default='edit_distance',
help='scoring function to use: one of ' +
', '.join(SCORERS.keys()))
parser.add_argument('--whats_left',
action='store_true',
default=False,
help='show what is left to opt in')
parser.add_argument('--explain',
action='store_true',
default=False,
help='try to explain some decisions')
parser.add_argument(
'--why',
type=str,
default=None,
help='with --explain, target why a given dependency is needed')
args = parser.parse_args()
for dirname in [
"",
"test/core/uri",
"test/core/util",
"test/core/end2end",
"test/core/event_engine",
"test/core/resource_quota",
]:
parsing_path = dirname
exec(
open('%sBUILD' % (dirname + '/' if dirname else ''), 'r').read(), {
'load': lambda filename, *args: None,
'licenses': lambda licenses: None,
'package': lambda **kwargs: None,
'exports_files': lambda files: None,
'config_setting': lambda **kwargs: None,
'selects': FakeSelects(),
'python_config_settings': lambda **kwargs: None,
'grpc_cc_binary': grpc_cc_library,
'grpc_cc_library': grpc_cc_library,
'grpc_cc_test': grpc_cc_library,
'grpc_fuzzer': grpc_cc_library,
'grpc_proto_fuzzer': grpc_cc_library,
'select': lambda d: d["//conditions:default"],
'grpc_end2end_tests': lambda: None,
'grpc_upb_proto_library': lambda name, **kwargs: None,
'grpc_upb_proto_reflection_library': lambda name, **kwargs: None,
'grpc_generate_one_off_targets': lambda: None,
'grpc_package': lambda **kwargs: None,
'filegroup': lambda name, **kwargs: None,
'sh_library': lambda name, **kwargs: None,
}, {})
parsing_path = None
if args.whats_left:
print("{}/{} libraries are opted in".format(
num_cc_libraries - num_opted_out_cc_libraries, num_cc_libraries))
def make_relative_path(dep, lib):
if lib is None:
return dep
lib_path = lib[:lib.rfind(':') + 1]
if dep.startswith(lib_path):
return dep[len(lib_path):]
return dep
if args.whats_left:
print("{}/{} libraries are opted in".format(
num_cc_libraries - num_opted_out_cc_libraries, num_cc_libraries))
# Keeps track of all possible sets of dependencies that could satify the
# problem. (models the list monad in Haskell!)
class Choices:
def __init__(self, library, substitutions):
self.library = library
self.to_add = []
self.to_remove = []
self.substitutions = substitutions
def add_one_of(self, choices, trigger):
if not choices:
return
choices = sum([self.apply_substitutions(choice) for choice in choices],
[])
if args.explain and (args.why is None or args.why in choices):
print("{}: Adding one of {} for {}".format(self.library, choices,
trigger))
self.to_add.append(
tuple(
make_relative_path(choice, self.library) for choice in choices))
def add(self, choice, trigger):
self.add_one_of([choice], trigger)
def remove(self, remove):
for remove in self.apply_substitutions(remove):
self.to_remove.append(make_relative_path(remove, self.library))
def apply_substitutions(self, dep):
if dep in self.substitutions:
return self.substitutions[dep]
return [dep]
def best(self, scorer):
choices = set()
choices.add(frozenset())
for add in sorted(set(self.to_add), key=lambda x: (len(x), x)):
new_choices = set()
for append_choice in add:
for choice in choices:
new_choices.add(choice.union([append_choice]))
choices = new_choices
for remove in sorted(set(self.to_remove)):
new_choices = set()
for choice in choices:
new_choices.add(choice.difference([remove]))
choices = new_choices
best = None
final_scorer = lambda x: (total_avoidness(x), scorer(x), total_score(x))
for choice in choices:
if best is None or final_scorer(choice) < final_scorer(best):
best = choice
return best
def make_library(library):
error = False
hdrs = sorted(consumes[library])
# we need a little trickery here since grpc_base has channel.cc, which calls grpc_init
# which is in grpc, which is illegal but hard to change
# once event engine lands we can clean this up
deps = Choices(library, {'//:grpc_base': ['//:grpc', '//:grpc_unsecure']}
if library.startswith('//test/') else {})
external_deps = Choices(None, {})
for hdr in hdrs:
if hdr in skip_headers[library]:
continue
if hdr == 'src/core/lib/profiling/stap_probes.h':
continue
if hdr.startswith('src/libfuzzer/'):
continue
if hdr == 'grpc/grpc.h' and not library.startswith('//:'):
# not the root build including grpc.h ==> //:grpc
deps.add_one_of(['//:grpc', '//:grpc_unsecure'], hdr)
continue
if hdr in INTERNAL_DEPS:
dep = INTERNAL_DEPS[hdr]
if not dep.startswith('//'):
dep = '//:' + dep
deps.add(dep, hdr)
continue
if hdr in vendors:
deps.add_one_of(vendors[hdr], hdr)
continue
if 'include/' + hdr in vendors:
deps.add_one_of(vendors['include/' + hdr], hdr)
continue
if '.' not in hdr:
# assume a c++ system include
continue
if hdr in EXTERNAL_DEPS:
external_deps.add(EXTERNAL_DEPS[hdr], hdr)
continue
if hdr.startswith('opencensus/'):
trail = hdr[len('opencensus/'):]
trail = trail[:trail.find('/')]
external_deps.add('opencensus-' + trail, hdr)
continue
if hdr.startswith('envoy/'):
path, file = os.path.split(hdr)
file = file.split('.')
path = path.split('/')
dep = '_'.join(path[:-1] + [file[1]])
deps.add(dep, hdr)
continue
if hdr.startswith('google/protobuf/') and not hdr.endswith('.upb.h'):
external_deps.add('protobuf_headers', hdr)
continue
if '/' not in hdr:
# assume a system include
continue
is_sys_include = False
for sys_path in [
'sys',
'arpa',
'gperftools',
'netinet',
'linux',
'android',
'mach',
'net',
'CoreFoundation',
]:
if hdr.startswith(sys_path + '/'):
is_sys_include = True
break
if is_sys_include:
# assume a system include
continue
print("# ERROR: can't categorize header: %s used by %s" %
(hdr, library))
error = True
if library in needs_codegen_base_src:
deps.add('grpc++_codegen_base_src', '#needs_codegen_base_src')
deps.remove(library)
deps = sorted(
deps.best(lambda x: SCORERS[args.score](x, original_deps[library])))
external_deps = sorted(
external_deps.best(lambda x: SCORERS[args.score]
(x, original_external_deps[library])))
return (library, error, deps, external_deps)
update_libraries = []
for library in sorted(consumes.keys()):
if library in no_update:
continue
if args.targets and library not in args.targets:
continue
update_libraries.append(library)
with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as p:
updated_libraries = p.map(make_library, update_libraries, 1)
error = False
for library, lib_error, deps, external_deps in updated_libraries:
if lib_error:
error = True
continue
buildozer_set_list('external_deps', external_deps, library, via='deps')
buildozer_set_list('deps', deps, library)
if buildozer_commands:
ok_statuses = (0, 3)
temp = tempfile.NamedTemporaryFile()
open(temp.name, 'w').write('\n'.join(buildozer_commands))
c = ['tools/distrib/buildozer.sh', '-f', temp.name]
r = subprocess.call(c)
if r not in ok_statuses:
print('{} failed with status {}'.format(c, r))
sys.exit(1)
if error:
sys.exit(1)
|
{
"content_hash": "1315815b2b37d928b7ad78e31c2607f3",
"timestamp": "",
"source": "github",
"line_count": 624,
"max_line_length": 90,
"avg_line_length": 31.173076923076923,
"alnum_prop": 0.5653403249023237,
"repo_name": "ejona86/grpc",
"id": "cff53e5dc1537e4d72d5443ebe007580005fa1d7",
"size": "20054",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tools/distrib/fix_build_deps.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "5444"
},
{
"name": "Batchfile",
"bytes": "38831"
},
{
"name": "C",
"bytes": "1377708"
},
{
"name": "C#",
"bytes": "106367"
},
{
"name": "C++",
"bytes": "16353334"
},
{
"name": "CMake",
"bytes": "29311"
},
{
"name": "CSS",
"bytes": "1519"
},
{
"name": "Cython",
"bytes": "258768"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "Dockerfile",
"bytes": "179860"
},
{
"name": "Go",
"bytes": "34794"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "Java",
"bytes": "13923"
},
{
"name": "JavaScript",
"bytes": "5572"
},
{
"name": "Objective-C",
"bytes": "724357"
},
{
"name": "Objective-C++",
"bytes": "79351"
},
{
"name": "PHP",
"bytes": "486781"
},
{
"name": "PowerShell",
"bytes": "4516"
},
{
"name": "Python",
"bytes": "3814860"
},
{
"name": "Ruby",
"bytes": "650063"
},
{
"name": "Shell",
"bytes": "766652"
},
{
"name": "Starlark",
"bytes": "805915"
},
{
"name": "Swift",
"bytes": "7487"
},
{
"name": "XSLT",
"bytes": "9846"
}
],
"symlink_target": ""
}
|
class Solution:
def minFlips(self, target: str) -> int:
answer=0
last='0'
for x in target:
if x!=last:
answer+=1
last=x
return answer
|
{
"content_hash": "d36e8d64f87e4ce45e569bbe6159bf6c",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 43,
"avg_line_length": 23.5,
"alnum_prop": 0.40425531914893614,
"repo_name": "Magic07/online-judge-solutions",
"id": "9f54758bd8ef792701ec65b82982a1b123312c29",
"size": "235",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "leetcode/1652-bulb-switcher-iv.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "34617"
}
],
"symlink_target": ""
}
|
import re
from os import path
from setuptools import setup
ROOT_DIR = path.abspath(path.dirname(__file__))
DESCRIPTION = 'Flask-Diced - CRUD views generator for Flask'
LONG_DESCRIPTION = open(path.join(ROOT_DIR, 'README.rst')).read()
VERSION = re.search(
"__version__ = '([^']+)'",
open(path.join(ROOT_DIR, 'flask_diced.py')).read()
).group(1)
setup(
name='Flask-Diced',
version=VERSION,
url='https://github.com/pyx/flask-diced/',
license='BSD-New',
author='Philip Xu',
author_email='pyx@xrefactor.com',
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
py_modules=['flask_diced'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask>=0.10',
],
extras_require={
'test': [
'pytest>=2.8.2',
'Flask-SQLAlchemy',
'Flask-WTF',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
|
{
"content_hash": "33734cb109f87bc4c4121a23c89fb4c1",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 71,
"avg_line_length": 30.703703703703702,
"alnum_prop": 0.5850422195416164,
"repo_name": "pyx/flask-diced",
"id": "a4f3e4242ae2cbf49bedf64a44ba813fd30b363a",
"size": "1682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1595"
},
{
"name": "Python",
"bytes": "18122"
}
],
"symlink_target": ""
}
|
import os
import sys
import resource
__all__ = ['release_syslim', 'tune_opencv', 'tune_tensorflow', 'initialize_main']
def release_syslim():
sys.setrecursionlimit(1000000)
try:
slim = 65536 * 1024
resource.setrlimit(resource.RLIMIT_STACK, (slim, slim))
except ValueError:
pass
def tune_tensorflow():
os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' # issue#9339
os.environ['TF_AUTOTUNE_THRESHOLD'] = '3' # use more warm-up
def tune_opencv():
os.environ['OPENCV_OPENCL_RUNTIME'] = ''
def initialize_main():
release_syslim()
tune_tensorflow()
|
{
"content_hash": "68642e3404d608b3e1c7fc93a03bf000",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 81,
"avg_line_length": 21.714285714285715,
"alnum_prop": 0.649671052631579,
"repo_name": "vacancy/TensorArtist",
"id": "4b024b0f50a8187c6938ba60ab710cf06cbe3ad1",
"size": "764",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tartist/core/utils/init.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "497134"
},
{
"name": "Shell",
"bytes": "630"
}
],
"symlink_target": ""
}
|
"""NginxParser is a member object of the NginxConfigurator class."""
import copy
import glob
import logging
import os
import pyparsing
import re
from certbot import errors
from certbot_nginx import obj
from certbot_nginx import nginxparser
logger = logging.getLogger(__name__)
class NginxParser(object):
"""Class handles the fine details of parsing the Nginx Configuration.
:ivar str root: Normalized absolute path to the server root
directory. Without trailing slash.
:ivar dict parsed: Mapping of file paths to parsed trees
"""
def __init__(self, root, ssl_options):
self.parsed = {}
self.root = os.path.abspath(root)
self.loc = self._set_locations(ssl_options)
# Parse nginx.conf and included files.
# TODO: Check sites-available/ as well. For now, the configurator does
# not enable sites from there.
self.load()
def load(self):
"""Loads Nginx files into a parsed tree.
"""
self.parsed = {}
self._parse_recursively(self.loc["root"])
def _parse_recursively(self, filepath):
"""Parses nginx config files recursively by looking at 'include'
directives inside 'http' and 'server' blocks. Note that this only
reads Nginx files that potentially declare a virtual host.
:param str filepath: The path to the files to parse, as a glob
"""
filepath = self.abs_path(filepath)
trees = self._parse_files(filepath)
for tree in trees:
for entry in tree:
if _is_include_directive(entry):
# Parse the top-level included file
self._parse_recursively(entry[1])
elif entry[0] == ['http'] or entry[0] == ['server']:
# Look for includes in the top-level 'http'/'server' context
for subentry in entry[1]:
if _is_include_directive(subentry):
self._parse_recursively(subentry[1])
elif entry[0] == ['http'] and subentry[0] == ['server']:
# Look for includes in a 'server' context within
# an 'http' context
for server_entry in subentry[1]:
if _is_include_directive(server_entry):
self._parse_recursively(server_entry[1])
def abs_path(self, path):
"""Converts a relative path to an absolute path relative to the root.
Does nothing for paths that are already absolute.
:param str path: The path
:returns: The absolute path
:rtype: str
"""
if not os.path.isabs(path):
return os.path.join(self.root, path)
else:
return path
def get_vhosts(self):
# pylint: disable=cell-var-from-loop
"""Gets list of all 'virtual hosts' found in Nginx configuration.
Technically this is a misnomer because Nginx does not have virtual
hosts, it has 'server blocks'.
:returns: List of :class:`~certbot_nginx.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
enabled = True # We only look at enabled vhosts for now
vhosts = []
servers = {}
for filename in self.parsed:
tree = self.parsed[filename]
servers[filename] = []
srv = servers[filename] # workaround undefined loop var in lambdas
# Find all the server blocks
_do_for_subarray(tree, lambda x: x[0] == ['server'],
lambda x, y: srv.append((x[1], y)))
# Find 'include' statements in server blocks and append their trees
for i, (server, path) in enumerate(servers[filename]):
new_server = self._get_included_directives(server)
servers[filename][i] = (new_server, path)
for filename in servers:
for server, path in servers[filename]:
# Parse the server block into a VirtualHost object
parsed_server = parse_server(server)
vhost = obj.VirtualHost(filename,
parsed_server['addrs'],
parsed_server['ssl'],
enabled,
parsed_server['names'],
server,
path)
vhosts.append(vhost)
return vhosts
def _get_included_directives(self, block):
"""Returns array with the "include" directives expanded out by
concatenating the contents of the included file to the block.
:param list block:
:rtype: list
"""
result = copy.deepcopy(block) # Copy the list to keep self.parsed idempotent
for directive in block:
if _is_include_directive(directive):
included_files = glob.glob(
self.abs_path(directive[1]))
for incl in included_files:
try:
result.extend(self.parsed[incl])
except KeyError:
pass
return result
def _parse_files(self, filepath, override=False):
"""Parse files from a glob
:param str filepath: Nginx config file path
:param bool override: Whether to parse a file that has been parsed
:returns: list of parsed tree structures
:rtype: list
"""
files = glob.glob(filepath) # nginx on unix calls glob(3) for this
# XXX Windows nginx uses FindFirstFile, and
# should have a narrower call here
trees = []
for item in files:
if item in self.parsed and not override:
continue
try:
with open(item) as _file:
parsed = nginxparser.load(_file)
self.parsed[item] = parsed
trees.append(parsed)
except IOError:
logger.warning("Could not open file: %s", item)
except pyparsing.ParseException:
logger.debug("Could not parse file: %s", item)
return trees
def _parse_ssl_options(self, ssl_options):
if ssl_options is not None:
try:
with open(ssl_options) as _file:
return nginxparser.load(_file).spaced
except IOError:
logger.warn("Missing NGINX TLS options file: %s", ssl_options)
except pyparsing.ParseBaseException:
logger.debug("Could not parse file: %s", ssl_options)
return []
def _set_locations(self, ssl_options):
"""Set default location for directives.
Locations are given as file_paths
.. todo:: Make sure that files are included
"""
root = self._find_config_root()
default = root
nginx_temp = os.path.join(self.root, "nginx_ports.conf")
if os.path.isfile(nginx_temp):
listen = nginx_temp
name = nginx_temp
else:
listen = default
name = default
return {"root": root, "default": default, "listen": listen,
"name": name, "ssl_options": self._parse_ssl_options(ssl_options)}
def _find_config_root(self):
"""Find the Nginx Configuration Root file."""
location = ['nginx.conf']
for name in location:
if os.path.isfile(os.path.join(self.root, name)):
return os.path.join(self.root, name)
raise errors.NoInstallationError(
"Could not find configuration root")
def filedump(self, ext='tmp', lazy=True):
"""Dumps parsed configurations into files.
:param str ext: The file extension to use for the dumped files. If
empty, this overrides the existing conf files.
:param bool lazy: Only write files that have been modified
"""
# Best-effort atomicity is enforced above us by reverter.py
for filename in self.parsed:
tree = self.parsed[filename]
if ext:
filename = filename + os.path.extsep + ext
try:
if lazy and not tree.is_dirty():
continue
out = nginxparser.dumps(tree)
logger.debug('Writing nginx conf tree to %s:\n%s', filename, out)
with open(filename, 'w') as _file:
_file.write(out)
except IOError:
logger.error("Could not open file for writing: %s", filename)
def has_ssl_on_directive(self, vhost):
"""Does vhost have ssl on for all ports?
:param :class:`~certbot_nginx.obj.VirtualHost` vhost: The vhost in question
:returns: True if 'ssl on' directive is included
:rtype: bool
"""
server = vhost.raw
for directive in server:
if not directive or len(directive) < 2:
continue
elif directive[0] == 'ssl' and directive[1] == 'on':
return True
return False
def add_server_directives(self, vhost, directives, replace):
"""Add or replace directives in the server block identified by vhost.
This method modifies vhost to be fully consistent with the new directives.
..note :: If replace is True, this raises a misconfiguration error
if the directive does not already exist.
..note :: If replace is False nothing gets added if an identical
block exists already.
..todo :: Doesn't match server blocks whose server_name directives are
split across multiple conf files.
:param :class:`~certbot_nginx.obj.VirtualHost` vhost: The vhost
whose information we use to match on
:param list directives: The directives to add
:param bool replace: Whether to only replace existing directives
"""
filename = vhost.filep
try:
result = self.parsed[filename]
for index in vhost.path:
result = result[index]
if not isinstance(result, list) or len(result) != 2:
raise errors.MisconfigurationError("Not a server block.")
result = result[1]
_add_directives(result, directives, replace)
# update vhost based on new directives
new_server = self._get_included_directives(result)
parsed_server = parse_server(new_server)
vhost.addrs = parsed_server['addrs']
vhost.ssl = parsed_server['ssl']
vhost.names = parsed_server['names']
vhost.raw = new_server
except errors.MisconfigurationError as err:
raise errors.MisconfigurationError("Problem in %s: %s" % (filename, err.message))
def get_all_certs_keys(self):
"""Gets all certs and keys in the nginx config.
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: set
"""
c_k = set()
vhosts = self.get_vhosts()
for vhost in vhosts:
tup = [None, None, vhost.filep]
if vhost.ssl:
for directive in vhost.raw:
# A directive can be an empty list to preserve whitespace
if not directive:
continue
if directive[0] == 'ssl_certificate':
tup[0] = directive[1]
elif directive[0] == 'ssl_certificate_key':
tup[1] = directive[1]
if tup[0] is not None and tup[1] is not None:
c_k.add(tuple(tup))
return c_k
def _do_for_subarray(entry, condition, func, path=None):
"""Executes a function for a subarray of a nested array if it matches
the given condition.
:param list entry: The list to iterate over
:param function condition: Returns true iff func should be executed on item
:param function func: The function to call for each matching item
"""
if path is None:
path = []
if isinstance(entry, list):
if condition(entry):
func(entry, path)
else:
for index, item in enumerate(entry):
_do_for_subarray(item, condition, func, path + [index])
def get_best_match(target_name, names):
"""Finds the best match for target_name out of names using the Nginx
name-matching rules (exact > longest wildcard starting with * >
longest wildcard ending with * > regex).
:param str target_name: The name to match
:param set names: The candidate server names
:returns: Tuple of (type of match, the name that matched)
:rtype: tuple
"""
exact = []
wildcard_start = []
wildcard_end = []
regex = []
for name in names:
if _exact_match(target_name, name):
exact.append(name)
elif _wildcard_match(target_name, name, True):
wildcard_start.append(name)
elif _wildcard_match(target_name, name, False):
wildcard_end.append(name)
elif _regex_match(target_name, name):
regex.append(name)
if len(exact) > 0:
# There can be more than one exact match; e.g. eff.org, .eff.org
match = min(exact, key=len)
return ('exact', match)
if len(wildcard_start) > 0:
# Return the longest wildcard
match = max(wildcard_start, key=len)
return ('wildcard_start', match)
if len(wildcard_end) > 0:
# Return the longest wildcard
match = max(wildcard_end, key=len)
return ('wildcard_end', match)
if len(regex) > 0:
# Just return the first one for now
match = regex[0]
return ('regex', match)
return (None, None)
def _exact_match(target_name, name):
return target_name == name or '.' + target_name == name
def _wildcard_match(target_name, name, start):
# Degenerate case
if name == '*':
return True
parts = target_name.split('.')
match_parts = name.split('.')
# If the domain ends in a wildcard, do the match procedure in reverse
if not start:
parts.reverse()
match_parts.reverse()
# The first part must be a wildcard or blank, e.g. '.eff.org'
first = match_parts.pop(0)
if first != '*' and first != '':
return False
target_name = '.'.join(parts)
name = '.'.join(match_parts)
# Ex: www.eff.org matches *.eff.org, eff.org does not match *.eff.org
return target_name.endswith('.' + name)
def _regex_match(target_name, name):
# Must start with a tilde
if len(name) < 2 or name[0] != '~':
return False
# After tilde is a perl-compatible regex
try:
regex = re.compile(name[1:])
if re.match(regex, target_name):
return True
else:
return False
except re.error: # pragma: no cover
# perl-compatible regexes are sometimes not recognized by python
return False
def _is_include_directive(entry):
"""Checks if an nginx parsed entry is an 'include' directive.
:param list entry: the parsed entry
:returns: Whether it's an 'include' directive
:rtype: bool
"""
return (isinstance(entry, list) and
len(entry) == 2 and entry[0] == 'include' and
isinstance(entry[1], str))
def _get_servernames(names):
"""Turns a server_name string into a list of server names
:param str names: server names
:rtype: list
"""
whitespace_re = re.compile(r'\s+')
names = re.sub(whitespace_re, ' ', names)
return names.split(' ')
def parse_server(server):
"""Parses a list of server directives.
:param list server: list of directives in a server block
:rtype: dict
"""
parsed_server = {'addrs': set(),
'ssl': False,
'names': set()}
for directive in server:
if not directive:
continue
if directive[0] == 'listen':
addr = obj.Addr.fromstring(directive[1])
parsed_server['addrs'].add(addr)
if not parsed_server['ssl'] and addr.ssl:
parsed_server['ssl'] = True
elif directive[0] == 'server_name':
parsed_server['names'].update(
_get_servernames(directive[1]))
elif directive[0] == 'ssl' and directive[1] == 'on':
parsed_server['ssl'] = True
return parsed_server
def _add_directives(block, directives, replace):
"""Adds or replaces directives in a config block.
When replace=False, it's an error to try and add a directive that already
exists in the config block with a conflicting value.
When replace=True, a directive with the same name MUST already exist in the
config block, and the first instance will be replaced.
..todo :: Find directives that are in included files.
:param list block: The block to replace in
:param list directives: The new directives.
"""
for directive in directives:
_add_directive(block, directive, replace)
if block and '\n' not in block[-1]: # could be " \n " or ["\n"] !
block.append(nginxparser.UnspacedList('\n'))
REPEATABLE_DIRECTIVES = set(['server_name', 'listen', 'include'])
COMMENT = ' managed by Certbot'
COMMENT_BLOCK = [' ', '#', COMMENT]
def _comment_directive(block, location):
"""Add a comment to the end of the line at location."""
next_entry = block[location + 1] if location + 1 < len(block) else None
if isinstance(next_entry, list) and next_entry:
if len(next_entry) >= 2 and next_entry[-2] == "#" and COMMENT in next_entry[-1]:
return
elif isinstance(next_entry, nginxparser.UnspacedList):
next_entry = next_entry.spaced[0]
else:
next_entry = next_entry[0]
block.insert(location + 1, COMMENT_BLOCK[:])
if next_entry is not None and "\n" not in next_entry:
block.insert(location + 2, '\n')
def _add_directive(block, directive, replace):
"""Adds or replaces a single directive in a config block.
See _add_directives for more documentation.
"""
directive = nginxparser.UnspacedList(directive)
if len(directive) == 0 or directive[0] == '#':
# whitespace or comment
block.append(directive)
return
# Find the index of a config line where the name of the directive matches
# the name of the directive we want to add. If no line exists, use None.
location = next((index for index, line in enumerate(block)
if line and line[0] == directive[0]), None)
if replace:
if location is None:
raise errors.MisconfigurationError(
'expected directive for {0} in the Nginx '
'config but did not find it.'.format(directive[0]))
block[location] = directive
_comment_directive(block, location)
else:
# Append directive. Fail if the name is not a repeatable directive name,
# and there is already a copy of that directive with a different value
# in the config file.
directive_name = directive[0]
directive_value = directive[1]
if location is None or (isinstance(directive_name, str) and
directive_name in REPEATABLE_DIRECTIVES):
block.append(directive)
_comment_directive(block, len(block) - 1)
elif block[location][1] != directive_value:
raise errors.MisconfigurationError(
'tried to insert directive "{0}" but found '
'conflicting "{1}".'.format(directive, block[location]))
|
{
"content_hash": "7b416278ef5844bcdd83c24c7d65200c",
"timestamp": "",
"source": "github",
"line_count": 572,
"max_line_length": 93,
"avg_line_length": 35.21503496503497,
"alnum_prop": 0.5741945092587996,
"repo_name": "jtl999/certbot",
"id": "a9ef21f2ec2de99159729b8ee34f027f90db8276",
"size": "20143",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "certbot-nginx/certbot_nginx/parser.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62302"
},
{
"name": "Augeas",
"bytes": "5245"
},
{
"name": "Batchfile",
"bytes": "35005"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Makefile",
"bytes": "37245"
},
{
"name": "Nginx",
"bytes": "118585"
},
{
"name": "Python",
"bytes": "1477643"
},
{
"name": "Shell",
"bytes": "176838"
},
{
"name": "Standard ML",
"bytes": "256"
}
],
"symlink_target": ""
}
|
from ..testutil import eq_
from ..notify import Broadcaster, Listener, Repeater
class HelloListener(Listener):
def __init__(self, broadcaster):
Listener.__init__(self, broadcaster)
self.hello_count = 0
def hello(self):
self.hello_count += 1
class HelloRepeater(Repeater):
def __init__(self, broadcaster):
Repeater.__init__(self, broadcaster)
self.hello_count = 0
def hello(self):
self.hello_count += 1
def create_pair():
b = Broadcaster()
l = HelloListener(b)
return b, l
def test_disconnect_during_notification():
# When a listener disconnects another listener the other listener will not receive a
# notification.
# This whole complication scheme below is because the order of the notification is not
# guaranteed. We could disconnect everything from self.broadcaster.listeners, but this
# member is supposed to be private. Hence, the '.other' scheme
class Disconnecter(Listener):
def __init__(self, broadcaster):
Listener.__init__(self, broadcaster)
self.hello_count = 0
def hello(self):
self.hello_count += 1
self.other.disconnect()
broadcaster = Broadcaster()
first = Disconnecter(broadcaster)
second = Disconnecter(broadcaster)
first.other, second.other = second, first
first.connect()
second.connect()
broadcaster.notify('hello')
# only one of them was notified
eq_(first.hello_count + second.hello_count, 1)
def test_disconnect():
# After a disconnect, the listener doesn't hear anything.
b, l = create_pair()
l.connect()
l.disconnect()
b.notify('hello')
eq_(l.hello_count, 0)
def test_disconnect_when_not_connected():
# When disconnecting an already disconnected listener, nothing happens.
b, l = create_pair()
l.disconnect()
def test_not_connected_on_init():
# A listener is not initialized connected.
b, l = create_pair()
b.notify('hello')
eq_(l.hello_count, 0)
def test_notify():
# The listener listens to the broadcaster.
b, l = create_pair()
l.connect()
b.notify('hello')
eq_(l.hello_count, 1)
def test_reconnect():
# It's possible to reconnect a listener after disconnection.
b, l = create_pair()
l.connect()
l.disconnect()
l.connect()
b.notify('hello')
eq_(l.hello_count, 1)
def test_repeater():
b = Broadcaster()
r = HelloRepeater(b)
l = HelloListener(r)
r.connect()
l.connect()
b.notify('hello')
eq_(r.hello_count, 1)
eq_(l.hello_count, 1)
def test_repeater_with_repeated_notifications():
# If REPEATED_NOTIFICATIONS is not empty, only notifs in this set are repeated (but they're
# still dispatched locally).
class MyRepeater(HelloRepeater):
REPEATED_NOTIFICATIONS = set(['hello'])
def __init__(self, broadcaster):
HelloRepeater.__init__(self, broadcaster)
self.foo_count = 0
def foo(self):
self.foo_count += 1
b = Broadcaster()
r = MyRepeater(b)
l = HelloListener(r)
r.connect()
l.connect()
b.notify('hello')
b.notify('foo') # if the repeater repeated this notif, we'd get a crash on HelloListener
eq_(r.hello_count, 1)
eq_(l.hello_count, 1)
eq_(r.foo_count, 1)
def test_repeater_doesnt_try_to_dispatch_to_self_if_it_cant():
# if a repeater doesn't handle a particular message, it doesn't crash and simply repeats it.
b = Broadcaster()
r = Repeater(b) # doesnt handle hello
l = HelloListener(r)
r.connect()
l.connect()
b.notify('hello') # no crash
eq_(l.hello_count, 1)
def test_bind_messages():
b, l = create_pair()
l.bind_messages({'foo', 'bar'}, l.hello)
l.connect()
b.notify('foo')
b.notify('bar')
b.notify('hello') # Normal dispatching still work
eq_(l.hello_count, 3)
|
{
"content_hash": "64c3467c283bfac072bb0660134849f0",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 96,
"avg_line_length": 29.380597014925375,
"alnum_prop": 0.6309372618745237,
"repo_name": "hsoft/currency_server",
"id": "336be66b328948e24cacebdb801768eebfa857e9",
"size": "4218",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hscommon/tests/notify_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "182869"
}
],
"symlink_target": ""
}
|
"""
This script computes the absolute trajectory error from the ground truth
trajectory and the estimated trajectory.
"""
import sys
import numpy
import argparse
import associate
def align(model,data):
"""Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
"""
numpy.set_printoptions(precision=3,suppress=True)
model_zerocentered = model - model.mean(1)
data_zerocentered = data - data.mean(1)
W = numpy.zeros( (3,3) )
for column in range(model.shape[1]):
W += numpy.outer(model_zerocentered[:,column],data_zerocentered[:,column])
U,d,Vh = numpy.linalg.linalg.svd(W.transpose())
S = numpy.matrix(numpy.identity( 3 ))
if(numpy.linalg.det(U) * numpy.linalg.det(Vh)<0):
S[2,2] = -1
rot = U*S*Vh
trans = data.mean(1) - rot * model.mean(1)
model_aligned = rot * model + trans
alignment_error = model_aligned - data
trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error,alignment_error),0)).A[0]
return rot,trans,trans_error
def plot_traj(ax,stamps,traj,style,color,label):
"""
Plot a trajectory using matplotlib.
Input:
ax -- the plot
stamps -- time stamps (1xn)
traj -- trajectory (3xn)
style -- line style
color -- line color
label -- plot legend
"""
stamps.sort()
interval = numpy.median([s-t for s,t in zip(stamps[1:],stamps[:-1])])
x = []
y = []
last = stamps[0]
for i in range(len(stamps)):
if stamps[i]-last < 2*interval:
x.append(traj[i][0])
y.append(traj[i][1])
elif len(x)>0:
ax.plot(x,y,style,color=color,label=label)
label=""
x=[]
y=[]
last= stamps[i]
if len(x)>0:
ax.plot(x,y,style,color=color,label=label)
if __name__=="__main__":
# parse command line
parser = argparse.ArgumentParser(description='''
This script computes the absolute trajectory error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('first_file', help='ground truth trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('second_file', help='estimated trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the second trajectory (default: 1.0)',default=1.0)
parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
parser.add_argument('--save', help='save aligned second trajectory to disk (format: stamp2 x2 y2 z2)')
parser.add_argument('--save_associations', help='save associated first and aligned second trajectory to disk (format: stamp1 x1 y1 z1 stamp2 x2 y2 z2)')
parser.add_argument('--plot', help='plot the first and the aligned second trajectory to an image (format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the RMSE absolute translational error in meters after alignment will be printed)', action='store_true')
args = parser.parse_args()
first_list = associate.read_file_list(args.first_file)
second_list = associate.read_file_list(args.second_file)
matches = associate.associate(first_list, second_list,float(args.offset),float(args.max_difference))
if len(matches)<2:
sys.exit("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! Did you choose the correct sequence?")
first_xyz = numpy.matrix([[float(value) for value in first_list[a][0:3]] for a,b in matches]).transpose()
second_xyz = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for a,b in matches]).transpose()
rot,trans,trans_error = align(second_xyz,first_xyz)
second_xyz_aligned = rot * second_xyz + trans
first_stamps = first_list.keys()
first_stamps.sort()
first_xyz_full = numpy.matrix([[float(value) for value in first_list[b][0:3]] for b in first_stamps]).transpose()
second_stamps = second_list.keys()
second_stamps.sort()
second_xyz_full = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for b in second_stamps]).transpose()
second_xyz_full_aligned = rot * second_xyz_full + trans
if args.verbose:
print "compared_pose_pairs %d pairs"%(len(trans_error))
print "absolute_translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
print "absolute_translational_error.mean %f m"%numpy.mean(trans_error)
print "absolute_translational_error.median %f m"%numpy.median(trans_error)
print "absolute_translational_error.std %f m"%numpy.std(trans_error)
print "absolute_translational_error.min %f m"%numpy.min(trans_error)
print "absolute_translational_error.max %f m"%numpy.max(trans_error)
else:
print "%f"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
if args.save_associations:
file = open(args.save_associations,"w")
file.write("\n".join(["%f %f %f %f %f %f %f %f"%(a,x1,y1,z1,b,x2,y2,z2) for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A)]))
file.close()
if args.save:
file = open(args.save,"w")
file.write("\n".join(["%f "%stamp+" ".join(["%f"%d for d in line]) for stamp,line in zip(second_stamps,second_xyz_full_aligned.transpose().A)]))
file.close()
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.patches import Ellipse
fig = plt.figure()
ax = fig.add_subplot(111)
plot_traj(ax,first_stamps,first_xyz_full.transpose().A,'-',"black","ground truth")
plot_traj(ax,second_stamps,second_xyz_full_aligned.transpose().A,'-',"blue","estimated")
#label="difference"
#for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A):
# ax.plot([x1,x2],[y1,y2],'-',color="red",label=label)
# label=""
ax.legend()
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
plt.savefig(args.plot,dpi=300, format='pdf')
|
{
"content_hash": "893452c7f4df40ee13af98ad6be65269",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 189,
"avg_line_length": 42.716981132075475,
"alnum_prop": 0.6409010600706714,
"repo_name": "introlab/rtabmap_ros",
"id": "6b23f42f2650d9fdaf6a04201159a032602f3d50",
"size": "8456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "launch/jfr2018/evaluate_ate.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "14954"
},
{
"name": "C++",
"bytes": "1308634"
},
{
"name": "CMake",
"bytes": "23532"
},
{
"name": "Dockerfile",
"bytes": "2814"
},
{
"name": "Lua",
"bytes": "1599"
},
{
"name": "Makefile",
"bytes": "43"
},
{
"name": "Python",
"bytes": "56169"
},
{
"name": "Shell",
"bytes": "1160"
}
],
"symlink_target": ""
}
|
"""
Unit Tests for the --Problem Name-- problem
for Google Code Jam --Year--
--Round--
Link to problem description:
--Link--
Author:
Chris Nitsas
(nitsas)
Language:
Python 3(.4)
Date:
--Date--
Usage:
python3 test_runme.py
"""
import io
import os
import sys
import unittest
# modules I've written:
import runme
def are_extra_samples_present():
return os.path.isfile('extra_sample.in') and os.path.isfile('extra_sample.out')
def contents_of(output_file):
with open(output_file, 'r', encoding='utf-8') as f:
return f.read()
def output_of_runme_on(input_file):
# call runme.main and get its output into from_main
with io.StringIO() as target_output_stream:
# redirect stdout to an io.StringIO object to run main
sys.stdout, old_stdout = target_output_stream, sys.stdout
runme.main(input_file)
from_main = target_output_stream.getvalue()
# get original stdout back
sys.stdout = old_stdout
return from_main
class TestRunme(unittest.TestCase):
"""
Simple tests for the --Problem Name-- problem
for Google Code Jam --Year--
--Round--
"""
# define if needed
# def setUp(self):
# pass
#
# define if needed
# def tearDown(self):
# pass
#
# def test_something(self):
# # use self.assertEqual(), self.assertTrue() or self.assertRaises()
# pass
#
def test_main_on_sample_in(self):
input_file, output_file = 'sample.in', 'sample.out'
# compare runme.main's results with sample.out's contents
self.assertEqual(output_of_runme_on(input_file),
contents_of(output_file))
@unittest.skipIf(not are_extra_samples_present(), 'no extra samples')
def test_main_on_extra_sample_in(self):
input_file, output_file = 'extra_sample.in', 'extra_sample.out'
# compare runme.main's results with extra_sample.out's contents
self.assertEqual(output_of_runme_on(input_file),
contents_of(output_file))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "adcd6eef668efa2165cf98d64068f2df",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 83,
"avg_line_length": 24.58139534883721,
"alnum_prop": 0.6263008514664143,
"repo_name": "nitsas/codejamsolutions",
"id": "7b9f3451bab2ef7d26d39e22b38ab08ebfd3f1f9",
"size": "2137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_runme_template.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "143501"
},
{
"name": "Shell",
"bytes": "1002"
}
],
"symlink_target": ""
}
|
def extractRosetranslatorWordpressCom(item):
'''
Parser for 'rosetranslator.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "381711d1d075529fc2cd2ff3906f2dd2",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27,
"alnum_prop": 0.6402116402116402,
"repo_name": "fake-name/ReadableWebProxy",
"id": "0130dbe2d576ab2963989cc6ed44d6cfa1f8f88a",
"size": "568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractRosetranslatorWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
import os
from django.db import models
from django import forms
from .files import ProcessorFieldFile
from .widgets import ClearableProcessedFileInput
class FileFieldProcessor(forms.FileField):
widget = ClearableProcessedFileInput
class FileProcessorField(models.FileField):
attr_class = ProcessorFieldFile
def __init__(self, *args, **kwargs):
self._processors = {}
processors = kwargs.pop('processors', None)
if processors:
for alias, processor in processors.iteritems():
self[alias] = processor
super(FileProcessorField, self).__init__(*args, **kwargs)
def __getitem__(self, alias):
return self._processors[alias]
def __setitem__(self, alias, value):
self._processors[alias] = value
def iterprocessors(self):
return self._processors.iteritems()
def generate_filename(self, instance, filename, subdir=None):
return os.path.join(self.get_directory_name(), subdir or '', self.get_filename(filename))
def formfield(self, **kwargs):
defaults = {'form_class': FileFieldProcessor}
defaults.update(kwargs)
field = super(FileProcessorField, self).formfield(**defaults)
field.widget = ClearableProcessedFileInput()
return field
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^processorfield\.fields\.FileProcessorField"])
except ImportError:
pass
|
{
"content_hash": "30a10cabc5a7a868a24f7bd96677b662",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 97,
"avg_line_length": 33.40909090909091,
"alnum_prop": 0.6863945578231293,
"repo_name": "syrusakbary/django-processorfield",
"id": "737db8d8dee320c2f5f084e5fcf723a0767cbc1c",
"size": "1470",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "processorfield/fields.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "13341"
}
],
"symlink_target": ""
}
|
from logging import getLogger
from cornice.resource import resource, view
from openprocurement.api.models import Bid, get_now
from openprocurement.api.utils import (
save_tender,
set_ownership,
apply_patch,
error_handler,
update_journal_handler_params,
)
from openprocurement.api.validation import (
validate_bid_data,
validate_patch_bid_data,
)
LOGGER = getLogger(__name__)
@resource(name='Tender Bids',
collection_path='/tenders/{tender_id}/bids',
path='/tenders/{tender_id}/bids/{bid_id}',
description="Tender bids",
error_handler=error_handler)
class TenderBidResource(object):
def __init__(self, request):
self.request = request
self.db = request.registry.db
@view(content_type="application/json", permission='create_bid', validators=(validate_bid_data,), renderer='json')
def collection_post(self):
"""Registration of new bid proposal
Creating new Bid proposal
-------------------------
Example request to create bid proposal:
.. sourcecode:: http
POST /tenders/4879d3f8ee2443169b5fbbc9f89fa607/bids HTTP/1.1
Host: example.com
Accept: application/json
{
"data": {
"tenderers": [
{
"id": {
"name": "Державне управління справами",
"scheme": "https://ns.openprocurement.org/ua/edrpou",
"uid": "00037256",
"uri": "http://www.dus.gov.ua/"
},
"address": {
"countryName": "Україна",
"postalCode": "01220",
"region": "м. Київ",
"locality": "м. Київ",
"streetAddress": "вул. Банкова, 11, корпус 1"
}
}
],
"value": {
"amount": 489,
"currency": "UAH",
"valueAddedTaxIncluded": true
}
}
}
This is what one should expect in response:
.. sourcecode:: http
HTTP/1.1 201 Created
Content-Type: application/json
{
"data": {
"id": "4879d3f8ee2443169b5fbbc9f89fa607",
"status": "registration",
"date": "2014-10-28T11:44:17.947Z",
"tenderers": [
{
"id": {
"name": "Державне управління справами",
"scheme": "https://ns.openprocurement.org/ua/edrpou",
"uid": "00037256",
"uri": "http://www.dus.gov.ua/"
},
"address": {
"countryName": "Україна",
"postalCode": "01220",
"region": "м. Київ",
"locality": "м. Київ",
"streetAddress": "вул. Банкова, 11, корпус 1"
}
}
],
"value": {
"amount": 489,
"currency": "UAH",
"valueAddedTaxIncluded": true
}
}
}
"""
# See https://github.com/open-contracting/standard/issues/78#issuecomment-59830415
# for more info upon schema
tender = self.request.validated['tender']
if self.request.validated['tender_status'] != 'active.tendering':
self.request.errors.add('body', 'data', 'Can\'t add bid in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
bid_data = self.request.validated['data']
bid = Bid(bid_data)
set_ownership(bid, self.request)
tender.bids.append(bid)
if save_tender(self.request):
update_journal_handler_params({'bid_id': bid.id})
LOGGER.info('Created tender bid {}'.format(bid.id), extra={'MESSAGE_ID': 'tender_bid_create'})
self.request.response.status = 201
self.request.response.headers['Location'] = self.request.route_url('Tender Bids', tender_id=tender.id, bid_id=bid['id'])
return {
'data': bid.serialize('view'),
'access': {
'token': bid.owner_token
}
}
@view(renderer='json', permission='view_tender')
def collection_get(self):
"""Bids Listing
Get Bids List
-------------
Example request to get bids list:
.. sourcecode:: http
GET /tenders/4879d3f8ee2443169b5fbbc9f89fa607/bids HTTP/1.1
Host: example.com
Accept: application/json
This is what one should expect in response:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
{
"data": [
{
"value": {
"amount": 489,
"currency": "UAH",
"valueAddedTaxIncluded": true
}
}
]
}
"""
tender = self.request.validated['tender']
if self.request.validated['tender_status'] in ['active.tendering', 'active.auction']:
self.request.errors.add('body', 'data', 'Can\'t view bids in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
return {'data': [i.serialize(self.request.validated['tender_status']) for i in tender.bids]}
@view(renderer='json', permission='view_tender')
def get(self):
"""Retrieving the proposal
Example request for retrieving the proposal:
.. sourcecode:: http
GET /tenders/4879d3f8ee2443169b5fbbc9f89fa607/bids/71b6c23ed8944d688e92a31ec8c3f61a HTTP/1.1
Host: example.com
Accept: application/json
And here is the response to be expected:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
{
"data": {
"value": {
"amount": 600,
"currency": "UAH",
"valueAddedTaxIncluded": true
}
}
}
"""
if self.request.authenticated_role == 'bid_owner':
return {'data': self.request.context.serialize('view')}
if self.request.validated['tender_status'] in ['active.tendering', 'active.auction']:
self.request.errors.add('body', 'data', 'Can\'t view bid in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
return {'data': self.request.context.serialize(self.request.validated['tender_status'])}
@view(content_type="application/json", permission='edit_bid', validators=(validate_patch_bid_data,), renderer='json')
def patch(self):
"""Update of proposal
Example request to change bid proposal:
.. sourcecode:: http
PATCH /tenders/4879d3f8ee2443169b5fbbc9f89fa607/bids/71b6c23ed8944d688e92a31ec8c3f61a HTTP/1.1
Host: example.com
Accept: application/json
{
"data": {
"value": {
"amount": 600
}
}
}
And here is the response to be expected:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
{
"data": {
"value": {
"amount": 600,
"currency": "UAH",
"valueAddedTaxIncluded": true
}
}
}
"""
if self.request.validated['tender_status'] != 'active.tendering':
self.request.errors.add('body', 'data', 'Can\'t update bid in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
value = self.request.validated['data'].get("value", {}).get("amount")
if value and value != self.request.context.get("value", {}).get("amount"):
self.request.validated['data']['date'] = get_now().isoformat()
if apply_patch(self.request, src=self.request.context.serialize()):
LOGGER.info('Updated tender bid {}'.format(self.request.context.id), extra={'MESSAGE_ID': 'tender_bid_patch'})
return {'data': self.request.context.serialize("view")}
@view(renderer='json', permission='edit_bid')
def delete(self):
"""Cancelling the proposal
Example request for cancelling the proposal:
.. sourcecode:: http
DELETE /tenders/4879d3f8ee2443169b5fbbc9f89fa607/bids/71b6c23ed8944d688e92a31ec8c3f61a HTTP/1.1
Host: example.com
Accept: application/json
And here is the response to be expected:
.. sourcecode:: http
HTTP/1.0 200 OK
Content-Type: application/json
{
"data": {
"value": {
"amount": 489,
"currency": "UAH",
"valueAddedTaxIncluded": true
}
}
}
"""
bid = self.request.context
if self.request.validated['tender_status'] != 'active.tendering':
self.request.errors.add('body', 'data', 'Can\'t delete bid in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
res = bid.serialize("view")
self.request.validated['tender'].bids.remove(bid)
if save_tender(self.request):
LOGGER.info('Deleted tender bid {}'.format(self.request.context.id), extra={'MESSAGE_ID': 'tender_bid_delete'})
return {'data': res}
|
{
"content_hash": "1d4db7f67218f4730421b6a1d56b89f3",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 150,
"avg_line_length": 35.682119205298015,
"alnum_prop": 0.4843170007423905,
"repo_name": "selurvedu/openprocurement.api",
"id": "cbe35d8ada1e7b32e62da62105327add835d8451",
"size": "10918",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/openprocurement/api/views/bid.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "563874"
}
],
"symlink_target": ""
}
|
"""
Copyright 2020 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
This module contains tests for the functions in aspects.py
Currently the tests are only for the slicing aspect - to
check it's time complexity.
"""
import sys
sys.path.append(".")
import pandas
import time
import randstr, random
from util import aspects, enums
def generate_1():
"""
This function hardcodes a pandas dataframe containing 1 Million rows &
3 columns, and stores it as a csv file in ../data/data_for_test_aspects/.
Columns = [Name, Age, Gender]
Around half of the rows would be Gender = Male, and such rows will be placed
at odd row indices.
Args:
Returns:
Raises:
"""
number_of_rows = 1000000
map_gender = {0 : 'Female', 1: 'Male'}
# Generating a list of random strings as Names
list_names = [randstr.randstr(16) for row in range(number_of_rows)]
# Generating a list of random integers between 1 - 100 as Ages
list_age = [random.randint(1, 100) for row in range(number_of_rows)]
list_gender = [map_gender[row % 2] for row in range(number_of_rows)]
# Generating a list of random 'Male' / 'Female'
table = pandas.DataFrame({'Name' : list_names,
'Age' : list_age,
'Gender' : list_gender})
table.to_csv('/data/data_for_test_aspects/test_1.csv', index=False)
def test_1():
"""
Situation : This test will check the time complexity of the drop
aspect.
Alternate rows are dropped in this test case.
The drop aspect should work in O(number_of_rows *
average_bytes_per_column). And not in O(number_of_rows *
number_of_rows * average_bytes_per_column).
This test checks if the slice_table aspect actually works in
the desired time complexiy.
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/test_1.csv')
# noting the calling time of the slice function
start_time = time.time()
table = aspects.slice_table(table, [('Gender', enums.Filters.EQUAL_TO,
'Female')])
# noting the end return time of the slice function
end_time = time.time()
time_taken = end_time - start_time
print('Execution Time ', time_taken)
assert(time_taken <= 20)
def test_2():
"""
Situation : This test will check the time complexity of the drop
aspect.
Rows with age > 50 will be dropped, so around half of the
rows will be dropped.
The drop aspect should work in O(number_of_rows *
average_bytes_per_column). And not in O(number_of_rows *
number_of_rows * average_bytes_per_column).
This test checks if the slice_table aspect actually works in
the desired time complexiy.
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/test_1.csv')
# noting the calling time of the slice function
start_time = time.time()
table = aspects.slice_table(table, [('Age', enums.Filters.LESS_THAN, 51)])
# noting the end return time of the slice function
end_time = time.time()
time_taken = end_time - start_time
print('Execution Time ', time_taken)
assert(time_taken <= 20)
def test_3():
"""
Situation : This tests the median aspect.
In the same randomly generated dataset calculate the median
age group by gender
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/test_1.csv')
result = aspects.group_by(table, ['Gender'], enums.SummaryOperators.MEDIAN)
result_table = result['table']
result_suggestions = result['suggestions']
print(result_table)
expected_result = """ Gender Age
0 Female 50
1 Male 51"""
expected_suggestions = "[]"
assert(result_table.to_string() == expected_result)
assert(str(result_suggestions) == expected_suggestions)
def test_4():
""" Test for summary operator = PROPORTION_OF_COUNT
Proportion of count of gender for each race/ethnicity
Dataset used : https://www.kaggle.com/spscientist/students-performance-in-exams
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/student_performance.csv')
result = aspects.group_by(table, ['race/ethnicity'],
enums.SummaryOperators.PROPORTION_OF_COUNT)
result_table = result['table']
result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'gender'])
result_suggestions = result['suggestions']
# Sum of proportion column should be(close to) 1.0
assert(result_table['gender'].sum() == 1.0)
print(result_table)
expected_result_table = """ race/ethnicity gender
0 group A 0.089
1 group B 0.190
2 group C 0.319
3 group D 0.262
4 group E 0.140"""
expected_suggestions = "[]"
assert(expected_result_table == result_table.to_string())
assert(str(result_suggestions) == expected_suggestions)
def test_5():
""" Test for summary operator = PROPORTION_OF_SUM
Proportion of sum of reading score for each race/ethnicity
Dataset used : https://www.kaggle.com/spscientist/students-performance-in-exams
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/student_performance.csv')
result = aspects.group_by(table, ['race/ethnicity'],
enums.SummaryOperators.PROPORTION_OF_SUM)
result_table = result['table']
result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'reading score'])
result_suggestions = result['suggestions']
# Sum of proportion column should be(close to) 1.0
assert(float(format(result_table['reading score'].sum(), '.5f')) == 1)
print(result_table)
expected_result_table = """ race/ethnicity reading score
0 group A 0.083216
1 group B 0.185011
2 group C 0.318698
3 group D 0.265263
4 group E 0.147812"""
expected_suggestions = "[]"
assert(expected_result_table == result_table.to_string())
assert(str(result_suggestions) == expected_suggestions)
def test_6():
""" Test for oversight : Attribution With Hidden Negative
Proportion of sum of reading score for each race/ethnicity
Dataset used : https://www.kaggle.com/spscientist/students-performance-in-exams
Args:
Returns:
"""
table = pandas.read_csv('data/data_for_test_aspects/student_performance_updated_to_create_attribution_with_hidden_negative_oversight.csv')
result = aspects.group_by(table, ['race/ethnicity'],
enums.SummaryOperators.PROPORTION_OF_SUM)
result_table = result['table']
result_table = aspects.crop_other_columns(result_table, ['race/ethnicity', 'reading score'])
result_suggestions = result['suggestions']
print(result_table)
expected_result_table = """ race/ethnicity reading score
0 group A 0.083434
1 group B 0.185493
2 group C 0.316920
3 group D 0.265955
4 group E 0.148198"""
expected_suggestions = "[{'suggestion': 'There exists negative values among the values on which proportion is being applied', 'oversight': <Oversights.ATTRIBUTION_WITH_HIDDEN_NEGATIVES: 11>, 'is_row_level_suggestion': True, 'confidence_score': 1, 'row_list': [{'row': 14, 'confidence_score': 1}]}]"
assert(expected_result_table == result_table.to_string())
assert(str(result_suggestions) == expected_suggestions)
# print(generate_1.__doc__)
# generate_1()
print(test_1.__doc__)
test_1()
print(test_2.__doc__)
test_2()
print(test_3.__doc__)
test_3()
print(test_4.__doc__)
test_4()
print(test_5.__doc__)
test_5()
print(test_6.__doc__)
test_6()
print('Test cases completed')
|
{
"content_hash": "64d17b42e9889ef09090a318a87cd8d4",
"timestamp": "",
"source": "github",
"line_count": 264,
"max_line_length": 304,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.6403467666354264,
"repo_name": "google/debaised-analysis",
"id": "f6186ab4a7f52bcc2af19ba7f4fa90ccfcd21462",
"size": "8536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intents/util/test_aspects.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "129"
},
{
"name": "HTML",
"bytes": "584541"
},
{
"name": "JavaScript",
"bytes": "229958"
},
{
"name": "Python",
"bytes": "357832"
},
{
"name": "Shell",
"bytes": "7483"
}
],
"symlink_target": ""
}
|
import spidev
class mcp3208:
#initialize mcp3208 on chip select device
def __init__(self,device):
self.values = [-1] * 8
self.spi = spidev.SpiDev()
self.spi.open(0,device)
#Read MCP3208 and return array of value of each channel
def readADC(self):
for channel in range(0,8):
self.values[channel] = self.readChannel(channel)
return self.values
#Read single channel of MCP3208
def readChannel(self, channel):
if (channel > 7 or channel < 0):
return -1
readBytes = self.spi.xfer2([6 + ((channel&4) >> 2),(channel&3) <<6, 0])
#with default spi configuration mcp3208 is LSB first
self.values[channel] = (((readBytes[1] & 15) << 8) + readBytes[2])
return self.values[channel]
#Close the spi device
def __del__(self):
self.spi.close()
return 0
|
{
"content_hash": "395466ae0adeb2140b3ea3ecb75deb21",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 25.387096774193548,
"alnum_prop": 0.6734434561626429,
"repo_name": "spencerdb/rpi-arena",
"id": "391ad41c194e01251ccee1f485f9be98efa7f8ef",
"size": "1009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mcp3208.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29711"
},
{
"name": "Shell",
"bytes": "27"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.