text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import sys
import MkDict
print("Test1 - create a dictionary")
keys = ['Australia', 'Eire', 'France', 'Finland', 'UK', 'US']
values = ['Canberra', 'Dublin', 'Paris', 'Helsinki', 'London', 'Washington']
countries = MkDict.MkDict(keys = keys, values = values)
print(countries)
print("Ref count:", sys.getrefcount(countries))
print("\nTest 2 - assign None for missing values")
keys = ['Ford', 'Honda', 'Renault', 'Ferrari', 'Bentley']
values = ['Focus', 'CRV', 'Espace']
cars = MkDict.MkDict(keys, values)
print(cars)
print("\nTest 3 - ignore extra values")
keys = ['M42', 'C33', 'M8', 'M17']
values = ['Orion', 'Veil', 'Lagoon', 'Swan', 'Dumbell', 'Crab']
nebula = MkDict.MkDict(values = values, keys = keys)
print(nebula)
for key in nebula.keys():
print("Ref count:", sys.getrefcount(key))
|
{
"content_hash": "b1f83b63c2127184f9713e99b8e8f415",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 76,
"avg_line_length": 32.2,
"alnum_prop": 0.6509316770186335,
"repo_name": "rbprogrammer/advanced_python_topics",
"id": "fb683ad805b5c4c277963800c8177c4e387e1be1",
"size": "830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "course-material/py3/solutions/14 Extending Python 2/testit.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "429"
},
{
"name": "C",
"bytes": "163248"
},
{
"name": "C++",
"bytes": "25126"
},
{
"name": "Makefile",
"bytes": "1222"
},
{
"name": "Python",
"bytes": "292085"
},
{
"name": "Shell",
"bytes": "2515"
},
{
"name": "VimL",
"bytes": "10757"
}
],
"symlink_target": ""
}
|
from radish.stepregistry import step
from radish import given, when, then
@step("I have the following quote")
def have_quote(step):
step.context.quote = step.text
@when("I add it to the database")
def add_quote_to_db(step):
step.context.database.quotes.append(step.context.quote)
@then("I expect {number:g} quote in the database")
def expect_amount_of_quotes(step, number):
assert len(step.context.database.quotes) == number
|
{
"content_hash": "0bac712b85bb9eb44c9534d3c23888b2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 59,
"avg_line_length": 26.058823529411764,
"alnum_prop": 0.7336343115124153,
"repo_name": "radish-bdd/radish",
"id": "08457e88a8dd50dbcc45f49d77e8aecd2cd610d3",
"size": "468",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/exploratory/steptext/radish/steps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Gherkin",
"bytes": "44053"
},
{
"name": "Python",
"bytes": "340136"
},
{
"name": "Shell",
"bytes": "1839"
}
],
"symlink_target": ""
}
|
class GramFuzzError(Exception): pass
class OptGram(GramFuzzError): pass
class FlushGrams(GramFuzzError): pass
|
{
"content_hash": "a4aade9413572a2a99ba98cb8976e2ed",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 37,
"avg_line_length": 22.4,
"alnum_prop": 0.8214285714285714,
"repo_name": "mseclab/PyJFuzz",
"id": "46e3d7d9f4a106d8e275c15db9058e7d88f83c81",
"size": "154",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gramfuzz/gramfuzz/errors.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "18"
},
{
"name": "HTML",
"bytes": "20812"
},
{
"name": "JavaScript",
"bytes": "28825"
},
{
"name": "Python",
"bytes": "196691"
}
],
"symlink_target": ""
}
|
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(739, 497)
self.gridLayout = QtWidgets.QGridLayout(Form)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setSpacing(0)
self.gridLayout.setObjectName("gridLayout")
self.splitter = QtWidgets.QSplitter(Form)
self.splitter.setOrientation(QtCore.Qt.Vertical)
self.splitter.setObjectName("splitter")
self.layoutWidget = QtWidgets.QWidget(self.splitter)
self.layoutWidget.setObjectName("layoutWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget)
self.verticalLayout.setObjectName("verticalLayout")
self.output = QtWidgets.QPlainTextEdit(self.layoutWidget)
font = QtGui.QFont()
font.setFamily("Monospace")
self.output.setFont(font)
self.output.setReadOnly(True)
self.output.setObjectName("output")
self.verticalLayout.addWidget(self.output)
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.input = CmdInput(self.layoutWidget)
self.input.setObjectName("input")
self.horizontalLayout.addWidget(self.input)
self.historyBtn = QtWidgets.QPushButton(self.layoutWidget)
self.historyBtn.setCheckable(True)
self.historyBtn.setObjectName("historyBtn")
self.horizontalLayout.addWidget(self.historyBtn)
self.exceptionBtn = QtWidgets.QPushButton(self.layoutWidget)
self.exceptionBtn.setCheckable(True)
self.exceptionBtn.setObjectName("exceptionBtn")
self.horizontalLayout.addWidget(self.exceptionBtn)
self.verticalLayout.addLayout(self.horizontalLayout)
self.historyList = QtWidgets.QListWidget(self.splitter)
font = QtGui.QFont()
font.setFamily("Monospace")
self.historyList.setFont(font)
self.historyList.setObjectName("historyList")
self.exceptionGroup = QtWidgets.QGroupBox(self.splitter)
self.exceptionGroup.setObjectName("exceptionGroup")
self.gridLayout_2 = QtWidgets.QGridLayout(self.exceptionGroup)
self.gridLayout_2.setContentsMargins(-1, 0, -1, 0)
self.gridLayout_2.setHorizontalSpacing(2)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName("gridLayout_2")
self.clearExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.clearExceptionBtn.setEnabled(False)
self.clearExceptionBtn.setObjectName("clearExceptionBtn")
self.gridLayout_2.addWidget(self.clearExceptionBtn, 0, 6, 1, 1)
self.catchAllExceptionsBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.catchAllExceptionsBtn.setCheckable(True)
self.catchAllExceptionsBtn.setObjectName("catchAllExceptionsBtn")
self.gridLayout_2.addWidget(self.catchAllExceptionsBtn, 0, 1, 1, 1)
self.catchNextExceptionBtn = QtWidgets.QPushButton(self.exceptionGroup)
self.catchNextExceptionBtn.setCheckable(True)
self.catchNextExceptionBtn.setObjectName("catchNextExceptionBtn")
self.gridLayout_2.addWidget(self.catchNextExceptionBtn, 0, 0, 1, 1)
self.onlyUncaughtCheck = QtWidgets.QCheckBox(self.exceptionGroup)
self.onlyUncaughtCheck.setChecked(True)
self.onlyUncaughtCheck.setObjectName("onlyUncaughtCheck")
self.gridLayout_2.addWidget(self.onlyUncaughtCheck, 0, 4, 1, 1)
self.exceptionStackList = QtWidgets.QListWidget(self.exceptionGroup)
self.exceptionStackList.setAlternatingRowColors(True)
self.exceptionStackList.setObjectName("exceptionStackList")
self.gridLayout_2.addWidget(self.exceptionStackList, 2, 0, 1, 7)
self.runSelectedFrameCheck = QtWidgets.QCheckBox(self.exceptionGroup)
self.runSelectedFrameCheck.setChecked(True)
self.runSelectedFrameCheck.setObjectName("runSelectedFrameCheck")
self.gridLayout_2.addWidget(self.runSelectedFrameCheck, 3, 0, 1, 7)
self.exceptionInfoLabel = QtWidgets.QLabel(self.exceptionGroup)
self.exceptionInfoLabel.setWordWrap(True)
self.exceptionInfoLabel.setObjectName("exceptionInfoLabel")
self.gridLayout_2.addWidget(self.exceptionInfoLabel, 1, 0, 1, 7)
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem, 0, 5, 1, 1)
self.label = QtWidgets.QLabel(self.exceptionGroup)
self.label.setObjectName("label")
self.gridLayout_2.addWidget(self.label, 0, 2, 1, 1)
self.filterText = QtWidgets.QLineEdit(self.exceptionGroup)
self.filterText.setObjectName("filterText")
self.gridLayout_2.addWidget(self.filterText, 0, 3, 1, 1)
self.gridLayout.addWidget(self.splitter, 0, 0, 1, 1)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Console"))
self.historyBtn.setText(_translate("Form", "History.."))
self.exceptionBtn.setText(_translate("Form", "Exceptions.."))
self.exceptionGroup.setTitle(_translate("Form", "Exception Handling"))
self.clearExceptionBtn.setText(_translate("Form", "Clear Stack"))
self.catchAllExceptionsBtn.setText(_translate("Form", "Show All Exceptions"))
self.catchNextExceptionBtn.setText(_translate("Form", "Show Next Exception"))
self.onlyUncaughtCheck.setText(_translate("Form", "Only Uncaught Exceptions"))
self.runSelectedFrameCheck.setText(_translate("Form", "Run commands in selected stack frame"))
self.exceptionInfoLabel.setText(_translate("Form", "Stack Trace"))
self.label.setText(_translate("Form", "Filter (regex):"))
from .CmdInput import CmdInput
|
{
"content_hash": "a8fd026e5f86e3181ae6e6550342ddb6",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 114,
"avg_line_length": 56.75471698113208,
"alnum_prop": 0.7167553191489362,
"repo_name": "pmaunz/pyqtgraph",
"id": "c8c2cbacdba3643bbe95d80ffc7c0311b38d5396",
"size": "6230",
"binary": false,
"copies": "7",
"ref": "refs/heads/develop",
"path": "pyqtgraph/console/template_pyqt5.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Matlab",
"bytes": "1752"
},
{
"name": "Python",
"bytes": "2125387"
}
],
"symlink_target": ""
}
|
from google.cloud import aiplatform_v1
def sample_get_model_evaluation():
# Create a client
client = aiplatform_v1.ModelServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.GetModelEvaluationRequest(
name="name_value",
)
# Make the request
response = client.get_model_evaluation(request=request)
# Handle the response
print(response)
# [END aiplatform_v1_generated_ModelService_GetModelEvaluation_sync]
|
{
"content_hash": "17fa3a426f7ea6144e8cab6d3d790b89",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 68,
"avg_line_length": 24.894736842105264,
"alnum_prop": 0.718816067653277,
"repo_name": "googleapis/python-aiplatform",
"id": "2a365ca607e72834e70df7c3a6acfbf581304554",
"size": "1873",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/aiplatform_v1_generated_model_service_get_model_evaluation_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "23977004"
},
{
"name": "Shell",
"bytes": "30668"
}
],
"symlink_target": ""
}
|
import mock
from oslo.config import cfg
from neutron.agent.common import config as a_cfg
import neutron_fwaas.services.firewall.drivers.linux.iptables_fwaas as fwaas
from neutron.tests import base
from neutron.tests.unit import test_api_v2
_uuid = test_api_v2._uuid
FAKE_SRC_PREFIX = '10.0.0.0/24'
FAKE_DST_PREFIX = '20.0.0.0/24'
FAKE_PROTOCOL = 'tcp'
FAKE_SRC_PORT = 5000
FAKE_DST_PORT = 22
FAKE_FW_ID = 'fake-fw-uuid'
class IptablesFwaasTestCase(base.BaseTestCase):
def setUp(self):
super(IptablesFwaasTestCase, self).setUp()
cfg.CONF.register_opts(a_cfg.ROOT_HELPER_OPTS, 'AGENT')
self.utils_exec_p = mock.patch(
'neutron.agent.linux.utils.execute')
self.utils_exec = self.utils_exec_p.start()
self.iptables_cls_p = mock.patch(
'neutron.agent.linux.iptables_manager.IptablesManager')
self.iptables_cls_p.start()
self.firewall = fwaas.IptablesFwaasDriver()
def _fake_rules_v4(self, fwid, apply_list):
rule_list = []
rule1 = {'enabled': True,
'action': 'allow',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '80',
'source_ip_address': '10.24.4.2'}
rule2 = {'enabled': True,
'action': 'deny',
'ip_version': 4,
'protocol': 'tcp',
'destination_port': '22'}
ingress_chain = ('iv4%s' % fwid)[:11]
egress_chain = ('ov4%s' % fwid)[:11]
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
v4filter_inst.chains.append(ingress_chain)
v4filter_inst.chains.append(egress_chain)
rule_list.append(rule1)
rule_list.append(rule2)
return rule_list
def _fake_firewall_no_rule(self):
rule_list = []
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': True,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_firewall_with_admin_down(self, rule_list):
fw_inst = {'id': FAKE_FW_ID,
'admin_state_up': False,
'tenant_id': 'tenant-uuid',
'firewall_rule_list': rule_list}
return fw_inst
def _fake_apply_list(self, router_count=1, distributed=False,
distributed_mode=None):
apply_list = []
while router_count > 0:
iptables_inst = mock.Mock()
router_inst = {'distributed': distributed}
v4filter_inst = mock.Mock()
v6filter_inst = mock.Mock()
v4filter_inst.chains = []
v6filter_inst.chains = []
iptables_inst.ipv4 = {'filter': v4filter_inst}
iptables_inst.ipv6 = {'filter': v6filter_inst}
router_info_inst = mock.Mock()
router_info_inst.iptables_manager = iptables_inst
router_info_inst.snat_iptables_manager = iptables_inst
if distributed_mode == 'dvr':
router_info_inst.dist_fip_count = 1
router_info_inst.router = router_inst
apply_list.append(router_info_inst)
router_count -= 1
return apply_list
def _setup_firewall_with_rules(self, func, router_count=1,
distributed=False, distributed_mode=None):
apply_list = self._fake_apply_list(router_count=router_count,
distributed=distributed, distributed_mode=distributed_mode)
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall(rule_list)
if distributed:
if distributed_mode == 'dvr_snat':
if_prefix = 'sg-+'
if distributed_mode == 'dvr':
if_prefix = 'rfp-+'
else:
if_prefix = 'qr-+'
distributed_mode = 'legacy'
func(distributed_mode, apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
rule1 = '-p tcp --dport 80 -s 10.24.4.2 -j ACCEPT'
rule2 = '-p tcp --dport 22 -j DROP'
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
bname = fwaas.iptables_manager.binary_name
ipt_mgr_ichain = '%s-%s' % (bname, ingress_chain[:11])
ipt_mgr_echain = '%s-%s' % (bname, egress_chain[:11])
for router_info_inst in apply_list:
v4filter_inst = router_info_inst.iptables_manager.ipv4['filter']
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule(ingress_chain, rule1),
mock.call.add_rule(egress_chain, rule1),
mock.call.add_rule(ingress_chain, rule2),
mock.call.add_rule(egress_chain, rule2),
mock.call.add_rule('FORWARD',
'-o %s -j %s' % (if_prefix,
ipt_mgr_ichain)),
mock.call.add_rule('FORWARD',
'-i %s -j %s' % (if_prefix,
ipt_mgr_echain)),
mock.call.add_rule('FORWARD',
'-o %s -j %s-fwaas-defau' % (if_prefix,
bname)),
mock.call.add_rule('FORWARD',
'-i %s -j %s-fwaas-defau' % (if_prefix,
bname))]
v4filter_inst.assert_has_calls(calls)
def test_create_firewall_no_rules(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.create_firewall('legacy', apply_list, firewall)
invalid_rule = '-m state --state INVALID -j DROP'
est_rule = '-m state --state ESTABLISHED,RELATED -j ACCEPT'
bname = fwaas.iptables_manager.binary_name
for ip_version in (4, 6):
ingress_chain = ('iv%s%s' % (ip_version, firewall['id']))
egress_chain = ('ov%s%s' % (ip_version, firewall['id']))
calls = [mock.call.remove_chain(
'iv%sfake-fw-uuid' % ip_version),
mock.call.remove_chain(
'ov%sfake-fw-uuid' % ip_version),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP'),
mock.call.add_chain(ingress_chain),
mock.call.add_rule(ingress_chain, invalid_rule),
mock.call.add_rule(ingress_chain, est_rule),
mock.call.add_chain(egress_chain),
mock.call.add_rule(egress_chain, invalid_rule),
mock.call.add_rule(egress_chain, est_rule),
mock.call.add_rule('FORWARD',
'-o qr-+ -j %s-fwaas-defau' % bname),
mock.call.add_rule('FORWARD',
'-i qr-+ -j %s-fwaas-defau' % bname)]
if ip_version == 4:
v4filter_inst = apply_list[0].iptables_manager.ipv4['filter']
v4filter_inst.assert_has_calls(calls)
else:
v6filter_inst = apply_list[0].iptables_manager.ipv6['filter']
v6filter_inst.assert_has_calls(calls)
def test_create_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.create_firewall)
def test_create_firewall_with_rules_two_routers(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
router_count=2)
def test_update_firewall_with_rules(self):
self._setup_firewall_with_rules(self.firewall.update_firewall)
def test_delete_firewall(self):
apply_list = self._fake_apply_list()
firewall = self._fake_firewall_no_rule()
self.firewall.delete_firewall('legacy', apply_list, firewall)
ingress_chain = 'iv4%s' % firewall['id']
egress_chain = 'ov4%s' % firewall['id']
calls = [mock.call.remove_chain(ingress_chain),
mock.call.remove_chain(egress_chain),
mock.call.remove_chain('fwaas-default-policy')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_admin_down(self):
apply_list = self._fake_apply_list()
rule_list = self._fake_rules_v4(FAKE_FW_ID, apply_list)
firewall = self._fake_firewall_with_admin_down(rule_list)
self.firewall.create_firewall('legacy', apply_list, firewall)
calls = [mock.call.remove_chain('iv4fake-fw-uuid'),
mock.call.remove_chain('ov4fake-fw-uuid'),
mock.call.remove_chain('fwaas-default-policy'),
mock.call.add_chain('fwaas-default-policy'),
mock.call.add_rule('fwaas-default-policy', '-j DROP')]
apply_list[0].iptables_manager.ipv4['filter'].assert_has_calls(calls)
def test_create_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_update_firewall_with_rules_dvr_snat(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr_snat')
def test_create_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.create_firewall,
distributed=True, distributed_mode='dvr')
def test_update_firewall_with_rules_dvr(self):
self._setup_firewall_with_rules(self.firewall.update_firewall,
distributed=True, distributed_mode='dvr')
|
{
"content_hash": "c97be87f4efb1b8906cdcd5e3767d169",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 79,
"avg_line_length": 46.61702127659574,
"alnum_prop": 0.5477863989046098,
"repo_name": "dougwig/x-neutron-fwaas",
"id": "5302f0cb3e42455e62c3970d59fa7116706ce1bf",
"size": "11580",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neutron_fwaas/tests/unit/services/firewall/drivers/linux/test_iptables_fwaas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "380634"
}
],
"symlink_target": ""
}
|
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,urlparse,json
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import debrid
from resources.lib.modules import control
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['rlsbb.com', 'rlsbb.ru']
self.base_link = 'http://rlsbb.ru'
self.search_base_link = 'http://search.rlsbb.ru'
self.search_cookie = 'serach_mode=rlsbb'
self.search_link = '/lib/search526049.php?phrase=%s&pindex=1&content=true'
self.search_link2 = '/search/%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = {'imdb': imdb, 'title': title, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
if debrid.status() == False: raise Exception()
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
posts = []
if 'tvshowtitle' in data:
query = '%s %s S%02dE%02d' % (data['tvshowtitle'], int(data['year']), int(data['season']), int(data['episode']))
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
referer = self.search_link2 % urllib.quote_plus(query)
referer = urlparse.urljoin(self.search_base_link, referer)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.search_base_link, url)
result = client.request(url, cookie=self.search_cookie, XHR=True, referer=referer)
try: posts += json.loads(re.findall('({.+?})$', result)[0])['results']
except: pass
else:
query = '%s %s' % (data['title'], data['year'])
query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
referer = self.search_link2 % urllib.quote_plus(query)
referer = urlparse.urljoin(self.search_base_link, referer)
url = self.search_link % urllib.quote_plus(query)
url = urlparse.urljoin(self.search_base_link, url)
result = client.request(url, cookie=self.search_cookie, XHR=True, referer=referer)
try: posts += json.loads(re.findall('({.+?})$', result)[0])['results']
except: pass
links = [] ; dupes = []
for post in posts:
try:
name = post['post_title'] ; url = post['post_name']
if not url in dupes:
dupes.append(url)
t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
if not cleantitle.get(title) in cleantitle.get(t): raise Exception()
try: y = re.findall('[\.|\(|\[|\s](S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
except: y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
if 'S' in y and 'E' in y: cat = 'episode'
elif 'S' in y: cat = 'tvshow'
elif y.isdigit(): cat = 'movie'
if cat == 'movie': hdlr = data['year']
elif cat == 'episode': hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
elif cat == 'tvshow': hdlr = 'S%02d' % int(data['season'])
if not y == hdlr: raise Exception()
items = []
content = post['post_content']
try: items += zip([i for i in client.parseDOM(content, 'p') if 'Release Name:' in i], [i for i in client.parseDOM(content, 'p') if '<strong>Download' in i])
except: pass
try: items += client.parseDOM(content, 'p', attrs = {'style': '.+?'})
except: pass
for item in items:
try:
if type(item) == tuple: item = '######URL######'.join(item)
fmt = re.sub('(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)', '', name.upper())
fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
fmt = [i.lower() for i in fmt]
if any(i.endswith(('subs', 'sub', 'dubbed', 'dub')) for i in fmt): raise Exception()
if any(i in ['extras'] for i in fmt): raise Exception()
if '1080p' in fmt: quality = '1080p'
elif '720p' in fmt: quality = '720p'
else: quality = 'SD'
if any(i in ['dvdscr', 'r5', 'r6'] for i in fmt): quality = 'SCR'
elif any(i in ['camrip', 'tsrip', 'hdcam', 'hdts', 'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'] for i in fmt): quality = 'CAM'
quality, infoo = source_utils.get_release_quality(name, i[1])
info = []
if '3d' in fmt: info.append('3D')
try:
if cat == 'tvshow': raise Exception()
size = re.findall('(\d+(?:\.|/,|)\d+(?:\s+|)(?:GB|GiB|MB|MiB))', item)[0].strip()
div = 1 if size.endswith(('GB', 'GiB')) else 1024
size = float(re.sub('[^0-9|/.|/,]', '', size))/div
size = '%.2f GB' % size
info.append(size)
except:
pass
info = ' | '.join(info)
url = item.rsplit('######URL######')[-1]
url = zip(client.parseDOM(url, 'a'), client.parseDOM(url, 'a', ret='href'))
for i in url: links.append({'url': i[1], 'quality': quality, 'info': info, 'host': i[0], 'cat': cat})
except:
pass
except:
pass
check = [i for i in links if not i['quality'] == 'CAM']
if len(check) > 0: links = check
hostDict = hostprDict + hostDict
for i in links:
try:
url = i['url']
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
if i['cat'] == 'tvshow':
if not i['quality'] in ['1080p', 'HD']: raise Exception()
if not any(i['host'].lower() in x for x in hostDict): raise Exception()
url = client.request(url)
url = client.parseDOM(url, 'ol')[0]
url = client.parseDOM(url, 'div', attrs = {'style': '.+?'})[int(data['episode'])-1]
host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
if not host in hostDict: raise Exception()
host = client.replaceHTMLCodes(host)
host = host.encode('utf-8')
sources.append({'source': host, 'quality': i['quality'], 'language': 'en', 'url': url, 'info': i['info'], 'direct': False, 'debridonly': True})
except:
pass
return sources
except:
return sources
def resolve(self, url):
return url
|
{
"content_hash": "310172bb7b72c591e85e67a07d72c0ba",
"timestamp": "",
"source": "github",
"line_count": 230,
"max_line_length": 180,
"avg_line_length": 41.66086956521739,
"alnum_prop": 0.45481110415362136,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "4e5be74251e3f2cd061e1a3b85eae57f72d6e18b",
"size": "9607",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "script.module.uncoded/lib/resources/lib/sources/en/releasebb.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
}
|
import os
import tarfile
import zipfile
import redis
from datetime import datetime
from flask import Blueprint, request, jsonify, current_app
from flask_recaptcha import ReCaptcha
from werkzeug.utils import secure_filename
from routes.file_utils import fix_uri, handle_tar, handle_zip
from modules.gc import blob_gc_enqueue
from modules.pan_spfy import spfy
from routes.ra_posts import handle_groupresults, handle_singleton
from middleware.api import subtyping_dependencies
bp_ra_pan = Blueprint('reactapp_pan', __name__)
def handle_singleton(jobs_dict):
'''
Takes the jobs_dict dict and creates "blob" jobs which have the QC/ID
Reservation job ids attached to it. This allows the front-end to poll this
"blob" + hash job and the back-end (here) will handle checking if dependencies
failed.
Groups by filename and by analysis: so for a Serotype/VF and a AMR job,
will return two "blob" ids which have the corresponding QC/ID tasks added
to both. Also will then group by file names: so, multiple files with multiple
jobs return a multiplicative number of "blob" ids. For example, 3 files,
each with a Serotype/VF and a AMR job (2 jobs ea) will return 6 "blob" ids.
'''
# create a dictionary of file names to append a list of relavant job hashes
by_file = {}
# a key should be a job id
for key in jobs_dict:
# we're inverting the structure of jobs_dict here
# have we encountered this file before?
f = jobs_dict[key]['file']
if f not in by_file:
# create a empty list using the filename as the key
by_file[f] = {}
# it's important we maintain the structure of jobs_dict
# so that when it comes to polling 'blob' ids everything
# works as expected
by_file[f].update({key: jobs_dict[key]})
# after this for loop, we should now have a list of dictionaries
# where the filename is the key to a group
# something like
# {"/datastore/2017-06-14-21-26-43-375215-GCA_001683595.1_NGF2_genomic.fna":
# { "16515ba5-040d-4315-9c88-a3bf5bfbe84e": {
# "analysis": "Quality Control",
# "file": "/datastore/2017-06-14-21-26-43-375215-GCA_001683595.1_NGF2_genomic.fna"
# }, "9b043d55-cb16-46bd-b086-d2a11c053b54": {
# "analysis": "Antimicrobial Resistance",
# "file": "/datastore/2017-06-14-21-26-43-375215-GCA_001683595.1_NGF2_genomic.fna"
# }, "aa10aedc-c7c2-4fd9-8756-a907ea45382a": {
# "analysis": "ID Reservation",
# "file": "/datastore/2017-06-14-21-26-43-375215-GCA_001683595.1_NGF2_genomic.fna"
# },
# "c96619b8-b089-4a3a-8dd2-b09b5d5e38e9": {
# "analysis": "Virulence Factors and Serotype",
# "file": "/datastore/2017-06-14-21-26-43-375215-GCA_001683595.1_NGF2_genomic.fna"
# }
# }
# }
# create a blob_ids dict to return
blob_ids = {}
# step through the by_file dict
for f in by_file:
# step through the job Ids and figure out which is QC and which is ID
qc = ''
idr = ''
for jobId in by_file[f]:
analysis = by_file[f][jobId]['analysis']
if analysis == "Quality Control":
qc = jobId
elif analysis == "ID Reservation":
idr = jobId
# go again and find there not QC or ID reservation and create blob ids
for jobId in by_file[f]:
analysis = by_file[f][jobId]['analysis']
# look for some analysis name that isn't a dependencies
if analysis not in subtyping_dependencies:
# create the blob dict to be stored in redis
blob_dict = {jobId: by_file[f][jobId]}
blob_dict.update({qc: by_file[f][qc]})
blob_dict.update({idr: by_file[f][idr]})
blob_ids.update(create_blob_id(f,analysis,blob_dict))
return blob_ids
@bp_ra_pan.route('/api/v0/panseq', methods=['POST'])
def pan_upload():
print('james_`debug : found the correct route')
recaptcha = ReCaptcha(app=current_app)
if recaptcha.verify():
form = request.form
options = {}
# defaults
options['pi']=90
options['pan'] = True
options['amr']=False
options['vf']=False
options['serotype']=False
options['bulk']=False
# processing form data
for key, value in form.items():
#we need to convert lower-case true/false in js to upper case in python
#remember, we also have numbers
if not value.isdigit():
if value.lower() == 'false':
value = False
else:
value = True
if key == 'options.amr':
options['amr']=value
if key == 'options.vf':
options['vf']=value
if key == 'options.serotype':
options['serotype']=value
if key == 'options.groupresults':
groupresults = value
if key == 'options.bulk':
options['bulk'] = value
else:
if key =='options.pi':
options['pi']=int(value)
# get a list of files submitted
uploaded_files = request.files.getlist("file")
#set up constants for identifying this sessions
now = datetime.now()
now = now.strftime("%Y-%m-%d-%H-%M-%S-%f")
jobs_dict = {}
print('james_debug : entering for loop')
print('james_debug : uploaded files : ' + str(uploaded_files))
file_list = []
for file in uploaded_files:
print('james_debug : file: ' + str(file))
if file:
# for saving file
filename = os.path.join(current_app.config[
'DATASTORE'], now + '-' + secure_filename(file.filename))
file.save(filename)
#print 'Uploaded File Saved at', str(filename)
if tarfile.is_tarfile(filename):
# set filename to dir for spfy call
filename = handle_tar(filename, now)
elif zipfile.is_zipfile(filename):
filename = handle_zip(filename, now)
print('james_debug : filename: ' + str(filename))
if not options['pan']:
# for enqueing task
jobs_enqueued = spfy(
{'i': filename, 'pi':options['pi'], 'options':options})
jobs_dict.update(jobs_enqueued)
else:
file_list.append(filename)
# new in 4.2.0
if options['pan']:
jobs_enqueued = spfy({'i': file_list, 'pi':options['pi'], 'options':options})
jobs_dict.update(jobs_enqueued)
print 'upload(): all files enqueued, returning...'
#if groupresults:
# return jsonify(handle_groupresults(jobs_dict))
#else:
print('james_debug: upload return: ' + str(jobs_dict))
return jsonify((jobs_dict))
else:
return "Captcha Failed Verification", 500
|
{
"content_hash": "42dfbd1ecaee51bde448cbe6c003ddc9",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 97,
"avg_line_length": 41.186440677966104,
"alnum_prop": 0.5683127572016461,
"repo_name": "superphy/backend",
"id": "40bd4e640ff6d09b254956c955777d9747e94e4e",
"size": "7290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/routes/ra_pan.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "392139"
}
],
"symlink_target": ""
}
|
__author__ = 'maxim'
class Model(object):
DATA_WITH_BIAS = False
EXPECTS_TIME_PARAM = False
def __init__(self, **params):
self._features = params['features']
def session(self):
class Dummy:
def __enter__(self): pass
def __exit__(self, exc_type, exc_val, exc_tb): pass
return Dummy()
def fit(self, train_set):
raise NotImplementedError
def predict(self, x):
raise NotImplementedError
def save(self, dest_dir):
pass
def restore(self, source_dir):
pass
|
{
"content_hash": "df8a67e5a1081e113a7e76234b9f8151",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 57,
"avg_line_length": 19.037037037037038,
"alnum_prop": 0.6264591439688716,
"repo_name": "maxim5/time-series-machine-learning",
"id": "0161aa0b90329882ec146d2eefd86e02c10da9fe",
"size": "560",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "46331"
}
],
"symlink_target": ""
}
|
import dis
import os.path
import re
import subprocess
import sys
import types
import unittest
from test.support import findfile, run_unittest
def abspath(filename):
return os.path.abspath(findfile(filename, subdir="dtracedata"))
def normalize_trace_output(output):
"""Normalize DTrace output for comparison.
DTrace keeps a per-CPU buffer, and when showing the fired probes, buffers
are concatenated. So if the operating system moves our thread around, the
straight result can be "non-causal". So we add timestamps to the probe
firing, sort by that field, then strip it from the output"""
# When compiling with '--with-pydebug', strip '[# refs]' debug output.
output = re.sub(r"\[[0-9]+ refs\]", "", output)
try:
result = [
row.split("\t")
for row in output.splitlines()
if row and not row.startswith('#')
]
result.sort(key=lambda row: int(row[0]))
result = [row[1] for row in result]
return "\n".join(result)
except (IndexError, ValueError):
raise AssertionError(
"tracer produced unparseable output:\n{}".format(output)
)
class TraceBackend:
EXTENSION = None
COMMAND = None
COMMAND_ARGS = []
def run_case(self, name, optimize_python=None):
actual_output = normalize_trace_output(self.trace_python(
script_file=abspath(name + self.EXTENSION),
python_file=abspath(name + ".py"),
optimize_python=optimize_python))
with open(abspath(name + self.EXTENSION + ".expected")) as f:
expected_output = f.read().rstrip()
return (expected_output, actual_output)
def generate_trace_command(self, script_file, subcommand=None):
command = self.COMMAND + [script_file]
if subcommand:
command += ["-c", subcommand]
return command
def trace(self, script_file, subcommand=None):
command = self.generate_trace_command(script_file, subcommand)
stdout, _ = subprocess.Popen(command,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True).communicate()
return stdout
def trace_python(self, script_file, python_file, optimize_python=None):
python_flags = []
if optimize_python:
python_flags.extend(["-O"] * optimize_python)
subcommand = " ".join([sys.executable] + python_flags + [python_file])
return self.trace(script_file, subcommand)
def assert_usable(self):
try:
output = self.trace(abspath("assert_usable" + self.EXTENSION))
output = output.strip()
except (FileNotFoundError, NotADirectoryError, PermissionError) as fnfe:
output = str(fnfe)
if output != "probe: success":
raise unittest.SkipTest(
"{}(1) failed: {}".format(self.COMMAND[0], output)
)
class DTraceBackend(TraceBackend):
EXTENSION = ".d"
COMMAND = ["dtrace", "-q", "-s"]
class SystemTapBackend(TraceBackend):
EXTENSION = ".stp"
COMMAND = ["stap", "-g"]
class TraceTests(unittest.TestCase):
# unittest.TestCase options
maxDiff = None
# TraceTests options
backend = None
optimize_python = 0
@classmethod
def setUpClass(self):
self.backend.assert_usable()
def run_case(self, name):
actual_output, expected_output = self.backend.run_case(
name, optimize_python=self.optimize_python)
self.assertEqual(actual_output, expected_output)
def test_function_entry_return(self):
self.run_case("call_stack")
def test_verify_call_opcodes(self):
"""Ensure our call stack test hits all function call opcodes"""
opcodes = set(["CALL_FUNCTION", "CALL_FUNCTION_EX", "CALL_FUNCTION_KW"])
with open(abspath("call_stack.py")) as f:
code_string = f.read()
def get_function_instructions(funcname):
# Recompile with appropriate optimization setting
code = compile(source=code_string,
filename="<string>",
mode="exec",
optimize=self.optimize_python)
for c in code.co_consts:
if isinstance(c, types.CodeType) and c.co_name == funcname:
return dis.get_instructions(c)
return []
for instruction in get_function_instructions('start'):
opcodes.discard(instruction.opname)
self.assertEqual(set(), opcodes)
def test_gc(self):
self.run_case("gc")
def test_line(self):
self.run_case("line")
class DTraceNormalTests(TraceTests):
backend = DTraceBackend()
optimize_python = 0
class DTraceOptimizedTests(TraceTests):
backend = DTraceBackend()
optimize_python = 2
class SystemTapNormalTests(TraceTests):
backend = SystemTapBackend()
optimize_python = 0
class SystemTapOptimizedTests(TraceTests):
backend = SystemTapBackend()
optimize_python = 2
def test_main():
run_unittest(DTraceNormalTests, DTraceOptimizedTests, SystemTapNormalTests,
SystemTapOptimizedTests)
if __name__ == '__main__':
test_main()
|
{
"content_hash": "7edb196f707248a1c8d09b64e19d501d",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 80,
"avg_line_length": 30.089887640449437,
"alnum_prop": 0.6120238984316654,
"repo_name": "FFMG/myoddweb.piger",
"id": "8612e276d765884634bf3a033542a4fb14021f69",
"size": "5356",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "monitor/api/python/Python-3.7.2/Lib/test/test_dtrace.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Ada",
"bytes": "89079"
},
{
"name": "Assembly",
"bytes": "399228"
},
{
"name": "Batchfile",
"bytes": "93889"
},
{
"name": "C",
"bytes": "32256857"
},
{
"name": "C#",
"bytes": "197461"
},
{
"name": "C++",
"bytes": "200544641"
},
{
"name": "CMake",
"bytes": "192771"
},
{
"name": "CSS",
"bytes": "441704"
},
{
"name": "CWeb",
"bytes": "174166"
},
{
"name": "Common Lisp",
"bytes": "24481"
},
{
"name": "Cuda",
"bytes": "52444"
},
{
"name": "DIGITAL Command Language",
"bytes": "33549"
},
{
"name": "DTrace",
"bytes": "2157"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "HTML",
"bytes": "181677643"
},
{
"name": "IDL",
"bytes": "14"
},
{
"name": "Inno Setup",
"bytes": "9647"
},
{
"name": "JavaScript",
"bytes": "705756"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "Lua",
"bytes": "3332"
},
{
"name": "M4",
"bytes": "259214"
},
{
"name": "Makefile",
"bytes": "1262318"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "Objective-C",
"bytes": "2167778"
},
{
"name": "Objective-C++",
"bytes": "630"
},
{
"name": "PHP",
"bytes": "59030"
},
{
"name": "PLSQL",
"bytes": "22886"
},
{
"name": "Pascal",
"bytes": "75208"
},
{
"name": "Perl",
"bytes": "42080"
},
{
"name": "PostScript",
"bytes": "13803"
},
{
"name": "PowerShell",
"bytes": "11781"
},
{
"name": "Python",
"bytes": "30377308"
},
{
"name": "QML",
"bytes": "593"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Rich Text Format",
"bytes": "6743"
},
{
"name": "Roff",
"bytes": "55661"
},
{
"name": "Ruby",
"bytes": "5532"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Shell",
"bytes": "783974"
},
{
"name": "TSQL",
"bytes": "1201"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "32117"
},
{
"name": "Visual Basic",
"bytes": "70"
},
{
"name": "XSLT",
"bytes": "552736"
},
{
"name": "Yacc",
"bytes": "19623"
}
],
"symlink_target": ""
}
|
import re
import unicodedata
from socketio import socketio_manage
from socketio.namespace import BaseNamespace
from socketio.mixins import RoomsMixin, BroadcastMixin
from werkzeug.exceptions import NotFound
from gevent import monkey
from flask import Flask, Response, request, render_template, url_for, redirect
from flask.ext.sqlalchemy import SQLAlchemy
monkey.patch_all()
app = Flask(__name__)
app.debug = True
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/chat.db'
db = SQLAlchemy(app)
# models
class ChatRoom(db.Model):
__tablename__ = 'chatrooms'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
slug = db.Column(db.String(50))
users = db.relationship('ChatUser', backref='chatroom', lazy='dynamic')
def __unicode__(self):
return self.name
def get_absolute_url(self):
return url_for('room', slug=self.slug)
def save(self, *args, **kwargs):
if not self.slug:
self.slug = slugify(self.name)
db.session.add(self)
db.session.commit()
class ChatUser(db.Model):
__tablename__ = 'chatusers'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(20), nullable=False)
session = db.Column(db.String(20), nullable=False)
chatroom_id = db.Column(db.Integer, db.ForeignKey('chatrooms.id'))
def __unicode__(self):
return self.name
# utils
def slugify(value):
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+', '-', value)
def get_object_or_404(klass, **query):
instance = klass.query.filter_by(**query).first()
if not instance:
raise NotFound()
return instance
def get_or_create(klass, **kwargs):
try:
return get_object_or_404(klass, **kwargs), False
except NotFound:
instance = klass(**kwargs)
instance.save()
return instance, True
def init_db():
db.create_all(app=app)
# views
@app.route('/')
def rooms():
"""
Homepage - lists all rooms.
"""
context = {"rooms": ChatRoom.query.all()}
return render_template('rooms.html', **context)
@app.route('/<path:slug>')
def room(slug):
"""
Show a room.
"""
context = {"room": get_object_or_404(ChatRoom, slug=slug)}
return render_template('room.html', **context)
@app.route('/create', methods=['POST'])
def create():
"""
Handles post from the "Add room" form on the homepage, and
redirects to the new room.
"""
name = request.form.get("name")
if name:
room, created = get_or_create(ChatRoom, name=name)
return redirect(url_for('room', slug=room.slug))
return redirect(url_for('rooms'))
class ChatNamespace(BaseNamespace, RoomsMixin, BroadcastMixin):
nicknames = []
def initialize(self):
self.logger = app.logger
self.log("Socketio session started")
def log(self, message):
self.logger.info("[{0}] {1}".format(self.socket.sessid, message))
def on_join(self, room):
self.room = room
self.join(room)
return True
def on_nickname(self, nickname):
self.log('Nickname: {0}'.format(nickname))
self.nicknames.append(nickname)
self.session['nickname'] = nickname
self.broadcast_event('announcement', '%s has connected' % nickname)
self.broadcast_event('nicknames', self.nicknames)
return True, nickname
def recv_disconnect(self):
# Remove nickname from the list.
self.log('Disconnected')
nickname = self.session['nickname']
self.nicknames.remove(nickname)
self.broadcast_event('announcement', '%s has disconnected' % nickname)
self.broadcast_event('nicknames', self.nicknames)
self.disconnect(silent=True)
return True
def on_user_message(self, msg):
self.log('User message: {0}'.format(msg))
self.emit_to_room(self.room, 'msg_to_room',
self.session['nickname'], msg)
return True
@app.route('/socket.io/<path:remaining>')
def socketio(remaining):
try:
socketio_manage(request.environ, {'/chat': ChatNamespace}, request)
except:
app.logger.error("Exception while handling socketio connection",
exc_info=True)
return Response()
if __name__ == '__main__':
app.run()
|
{
"content_hash": "6020a77b79a11f15667701d3cd3d8b09",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 78,
"avg_line_length": 27.51851851851852,
"alnum_prop": 0.6336922386720503,
"repo_name": "abourget/gevent-socketio",
"id": "1ecd5a38fac69b191e6c86fa2ba1738a75289fcd",
"size": "4458",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "examples/flask_chat/chat.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "4515"
},
{
"name": "JavaScript",
"bytes": "153092"
},
{
"name": "Python",
"bytes": "126209"
}
],
"symlink_target": ""
}
|
"""
A vtkTkImageViewerWidget for python, which is based on the
vtkTkImageWindowWidget.
Specify double=1 to get a double-buffered window.
Created by David Gobbi, Nov 1999
"""
import Tkinter
import math, os, sys
import vtk
from vtkLoadPythonTkWidgets import vtkLoadPythonTkWidgets
class vtkTkImageViewerWidget(Tkinter.Widget):
"""
A vtkTkImageViewerWidget for Python.
Use GetImageViewer() to get the vtkImageViewer.
Create with the keyword double=1 in order to generate a
double-buffered viewer.
Create with the keyword focus_on_enter=1 to enable
focus-follows-mouse. The default is for a click-to-focus mode.
"""
def __init__(self, master, cnf={}, **kw):
"""
Constructor.
Keyword arguments:
iv -- Use passed image viewer instead of creating a new one.
double -- If True, generate a double-buffered viewer.
Defaults to False.
focus_on_enter -- If True, use a focus-follows-mouse mode.
Defaults to False where the widget will use a click-to-focus
mode.
"""
# load the necessary extensions into tk
vtkLoadPythonTkWidgets(master.tk)
try: # use specified vtkImageViewer
imageViewer = kw['iv']
except KeyError: # or create one if none specified
imageViewer = vtk.vtkImageViewer()
doubleBuffer = 0
try:
if kw['double']:
doubleBuffer = 1
del kw['double']
except:
pass
# check if focus should follow mouse
if kw.get('focus_on_enter'):
self._FocusOnEnter = 1
del kw['focus_on_enter']
else:
self._FocusOnEnter = 0
kw['iv'] = imageViewer.GetAddressAsString("vtkImageViewer")
Tkinter.Widget.__init__(self, master, 'vtkTkImageViewerWidget',
cnf, kw)
if doubleBuffer:
imageViewer.GetRenderWindow().DoubleBufferOn()
self.BindTkImageViewer()
def __getattr__(self,attr):
# because the tk part of vtkTkImageViewerWidget must have
# the only remaining reference to the ImageViewer when
# it is destroyed, we can't actually store the ImageViewer
# as an attribute but instead have to get it from the tk-side
if attr == '_ImageViewer':
addr = self.tk.call(self._w, 'GetImageViewer')[5:]
return vtk.vtkImageViewer('_%s_vtkImageViewer_p' % addr)
raise AttributeError, self.__class__.__name__ + \
" has no attribute named " + attr
def GetImageViewer(self):
return self._ImageViewer
def Render(self):
self._ImageViewer.Render()
def BindTkImageViewer(self):
imager = self._ImageViewer.GetRenderer()
# stuff for window level text.
mapper = vtk.vtkTextMapper()
mapper.SetInput("none")
t_prop = mapper.GetTextProperty()
t_prop.SetFontFamilyToTimes()
t_prop.SetFontSize(18)
t_prop.BoldOn()
t_prop.ShadowOn()
self._LevelMapper = mapper
actor = vtk.vtkActor2D()
actor.SetMapper(mapper)
actor.SetLayerNumber(1)
actor.GetPositionCoordinate().SetValue(4,22)
actor.GetProperty().SetColor(1,1,0.5)
actor.SetVisibility(0)
imager.AddActor2D(actor)
self._LevelActor = actor
mapper = vtk.vtkTextMapper()
mapper.SetInput("none")
t_prop = mapper.GetTextProperty()
t_prop.SetFontFamilyToTimes()
t_prop.SetFontSize(18)
t_prop.BoldOn()
t_prop.ShadowOn()
self._WindowMapper = mapper
actor = vtk.vtkActor2D()
actor.SetMapper(mapper)
actor.SetLayerNumber(1)
actor.GetPositionCoordinate().SetValue(4,4)
actor.GetProperty().SetColor(1,1,0.5)
actor.SetVisibility(0)
imager.AddActor2D(actor)
self._WindowActor = actor
self._LastX = 0
self._LastY = 0
self._OldFocus = 0
self._InExpose = 0
# bindings
# window level
self.bind("<ButtonPress-1>",
lambda e,s=self: s.StartWindowLevelInteraction(e.x,e.y))
self.bind("<B1-Motion>",
lambda e,s=self: s.UpdateWindowLevelInteraction(e.x,e.y))
self.bind("<ButtonRelease-1>",
lambda e,s=self: s.EndWindowLevelInteraction())
# Get the value
self.bind("<ButtonPress-3>",
lambda e,s=self: s.StartQueryInteraction(e.x,e.y))
self.bind("<B3-Motion>",
lambda e,s=self: s.UpdateQueryInteraction(e.x,e.y))
self.bind("<ButtonRelease-3>",
lambda e,s=self: s.EndQueryInteraction())
self.bind("<Expose>",
lambda e,s=self: s.ExposeTkImageViewer())
self.bind("<Enter>",
lambda e,s=self: s.EnterTkViewer())
self.bind("<Leave>",
lambda e,s=self: s.LeaveTkViewer())
self.bind("<KeyPress-e>",
lambda e,s=self: s.quit())
self.bind("<KeyPress-r>",
lambda e,s=self: s.ResetTkImageViewer())
def GetImageViewer(self):
return self._ImageViewer
def Render(self):
self._ImageViewer.Render()
def _GrabFocus(self):
self._OldFocus=self.focus_get()
self.focus()
def EnterTkViewer(self):
if self._FocusOnEnter:
self._GrabFocus()
def LeaveTkViewer(self):
if self._FocusOnEnter and (self._OldFocus != None):
self._OldFocus.focus()
def ExposeTkImageViewer(self):
if (self._InExpose == 0):
self._InExpose = 1
if (not self._ImageViewer.GetRenderWindow().
IsA('vtkCocoaRenderWindow')):
self.update()
self._ImageViewer.Render()
self._InExpose = 0
def StartWindowLevelInteraction(self,x,y):
if not self._FocusOnEnter:
self._GrabFocus()
viewer = self._ImageViewer
self._LastX = x
self._LastY = y
self._Window = float(viewer.GetColorWindow())
self._Level = float(viewer.GetColorLevel())
# make the window level text visible
self._LevelActor.SetVisibility(1)
self._WindowActor.SetVisibility(1)
self.UpdateWindowLevelInteraction(x,y)
def EndWindowLevelInteraction(self):
# make the window level text invisible
self._LevelActor.SetVisibility(0)
self._WindowActor.SetVisibility(0)
self.Render()
def UpdateWindowLevelInteraction(self,x,y):
# compute normalized delta
dx = 4.0*(x - self._LastX)/self.winfo_width()*self._Window
dy = 4.0*(self._LastY - y)/self.winfo_height()*self._Level
# abs so that direction does not flip
if (self._Window < 0.0):
dx = -dx
if (self._Level < 0.0):
dy = -dy
# compute new window level
window = self._Window + dx
if (window < 0.0):
level = self._Level + dy
else:
level = self._Level - dy
viewer = self._ImageViewer
viewer.SetColorWindow(window)
viewer.SetColorLevel(level)
self._WindowMapper.SetInput("Window: %g" % window)
self._LevelMapper.SetInput("Level: %g" % level)
self.Render()
def ResetTkImageViewer(self):
# Reset: Set window level to show all values
viewer = self._ImageViewer
input = viewer.GetInput()
if (input == None):
return
# Get the extent in viewer
z = viewer.GetZSlice()
input.SetUpdateExtent(-99999,99999,-99999,99999,z,z)
input.Update()
(low,high) = input.GetScalarRange()
viewer.SetColorWindow(high - low)
viewer.SetColorLevel((high + low) * 0.5)
self.Render()
def StartQueryInteraction(self,x,y):
if not self._FocusOnEnter:
self._GrabFocus()
# Query PixleValue stuff
self._WindowActor.SetVisibility(1)
self.UpdateQueryInteraction(x,y)
def EndQueryInteraction(self):
self._WindowActor.SetVisibility(0)
self.Render()
def UpdateQueryInteraction(self,x,y):
viewer = self._ImageViewer
input = viewer.GetInput()
z = viewer.GetZSlice()
# y is flipped upside down
y = self.winfo_height() - y
# make sure point is in the whole extent of the image.
(xMin,xMax,yMin,yMax,zMin,zMax) = input.GetWholeExtent()
if (x < xMin or x > xMax or y < yMin or \
y > yMax or z < zMin or z > zMax):
return
input.SetUpdateExtent(x,x,y,y,z,z)
input.Update()
numComps = input.GetNumberOfScalarComponents()
text = ""
for i in xrange(numComps):
val = input.GetScalarComponentAsDouble(x,y,z,i)
text = "%s %.1f" % (text,val)
self._WindowMapper.SetInput("(%d, %d): %s" % (x,y,text))
self.Render()
#-----------------------------------------------------------------------------
# an example of how to use this widget
if __name__ == "__main__":
canvas = vtk.vtkImageCanvasSource2D()
canvas.SetNumberOfScalarComponents(3)
canvas.SetScalarType(3)
canvas.SetExtent(0,511,0,511,0,0)
canvas.SetDrawColor(100,100,0)
canvas.FillBox(0,511,0,511)
canvas.SetDrawColor(200,0,200)
canvas.FillBox(32,511,100,500)
canvas.SetDrawColor(100,0,0)
canvas.FillTube(550,20,30,400,5)
canvas.SetDrawColor(255,255,255)
canvas.DrawSegment3D(10,20,0,90,510,0)
canvas.SetDrawColor(200,50,50)
canvas.DrawSegment3D(510,90,0,10,20,0)
# Check segment clipping
canvas.SetDrawColor(0,200,0)
canvas.DrawSegment(-10,30,30,-10)
canvas.DrawSegment(-10,481,30,521)
canvas.DrawSegment(481,-10,521,30)
canvas.DrawSegment(481,521,521,481)
# Check Filling a triangle
canvas.SetDrawColor(20,200,200)
canvas.FillTriangle(-100,100,190,150,40,300)
# Check drawing a circle
canvas.SetDrawColor(250,250,10)
canvas.DrawCircle(350,350,200.0)
# Check drawing a point
canvas.SetDrawColor(250,250,250)
canvas.DrawPoint(350,350)
canvas.DrawPoint(350,550)
# Test filling functionality
canvas.SetDrawColor(55,0,0)
canvas.DrawCircle(450,350,80.0)
canvas.SetDrawColor(100,255,100)
canvas.FillPixel(450,350)
# Create the GUI: two renderer widgets and a quit button
frame = Tkinter.Frame()
widget = vtkTkImageViewerWidget(frame,width=512,height=512,double=1)
viewer = widget.GetImageViewer()
viewer.SetInput(canvas.GetOutput())
viewer.SetColorWindow(256)
viewer.SetColorLevel(127.5)
button = Tkinter.Button(frame,text="Quit",command=frame.quit)
widget.pack(side='top',padx=3,pady=3,fill='both',expand='t')
frame.pack(fill='both',expand='t')
button.pack(fill='x')
frame.mainloop()
|
{
"content_hash": "8d19dbd738a7b26ed75ec89c257688c4",
"timestamp": "",
"source": "github",
"line_count": 362,
"max_line_length": 78,
"avg_line_length": 30.433701657458563,
"alnum_prop": 0.5962603249523464,
"repo_name": "daviddoria/PointGraphsPhase1",
"id": "0dca9a7e3578037e750f5fd5fae084ed3b86c94a",
"size": "11017",
"binary": false,
"copies": "9",
"ref": "refs/heads/PointGraphsPhase1",
"path": "Wrapping/Python/vtk/tk/vtkTkImageViewerWidget.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "40489"
},
{
"name": "C",
"bytes": "43183331"
},
{
"name": "C++",
"bytes": "52316409"
},
{
"name": "Java",
"bytes": "97436"
},
{
"name": "Objective-C",
"bytes": "105122"
},
{
"name": "Perl",
"bytes": "174808"
},
{
"name": "Python",
"bytes": "1005369"
},
{
"name": "Shell",
"bytes": "22210"
},
{
"name": "Tcl",
"bytes": "1922521"
}
],
"symlink_target": ""
}
|
import rdflib
import unittest
import threading
def makeNode():
i = 0
while i < 9999:
i += 1
rdflib.term.BNode()
class TestRandomSeedInThread(unittest.TestCase):
def test_bnode_id_gen_in_thread(self):
"""
"""
th = threading.Thread(target=makeNode)
th.daemon = True
th.start()
makeNode()
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "e249849c639fd329e70bdbafa54b703d",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 48,
"avg_line_length": 16.64,
"alnum_prop": 0.5600961538461539,
"repo_name": "armandobs14/rdflib",
"id": "1feb0615b360e05dfb7d51504922cca04ea7a73e",
"size": "416",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "test/test_issue209.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "145"
},
{
"name": "HTML",
"bytes": "120202"
},
{
"name": "Python",
"bytes": "1436864"
},
{
"name": "Ruby",
"bytes": "28544"
},
{
"name": "Shell",
"bytes": "1052"
}
],
"symlink_target": ""
}
|
from . import chime as app
from flask import current_app, render_template, session, request
from urlparse import urlparse
from functools import wraps
from .error_functions import common_error_template_args, make_email_params, summarize_conflict_details, extract_branch_name_from_path
from .view_functions import common_template_args, get_repo
from .repo_functions import MergeConflict
def raise_if_debug(route_function):
''' Wrap error functions with this to manually raise the error
without routing it if the application is in debug mode.
'''
@wraps(route_function)
def decorated_function(*args, **kwargs):
if current_app.debug:
raise
return route_function(*args, **kwargs)
return decorated_function
@app.app_errorhandler(404)
def page_not_found(error):
''' Render a 404 error page
'''
repo = get_repo(flask_app=current_app)
kwargs = common_template_args(current_app.config, session)
kwargs.update(common_error_template_args(current_app.config))
# if we can extract a branch name from the path, construct an edit link for it
path = urlparse(request.url).path
branch_name = repo.active_branch.name
if branch_name == current_app.config['default_branch']:
branch_name = extract_branch_name_from_path(path)
branch_name = extract_branch_name_from_path(path)
if branch_name:
kwargs.update({"edit_path": u'/tree/{}/edit/'.format(branch_name)})
error_uuid = getattr(error, 'uuid', None)
template_message = u'(404) {}'.format(path)
kwargs.update({"message": template_message})
kwargs.update({"email_params": make_email_params(message=template_message, uuid=error_uuid)})
kwargs.update({'error_uuid': error_uuid})
return render_template('error_404.html', **kwargs), 404
@app.app_errorhandler(500)
@raise_if_debug
def internal_server_error(error):
''' Render a 500 error page
'''
kwargs = common_template_args(current_app.config, session)
kwargs.update(common_error_template_args(current_app.config))
kwargs.update({"show_merge_conflict": False})
path = urlparse(request.url).path
error_uuid = getattr(error, 'uuid', None)
template_message = u'(500) {}'.format(path)
kwargs.update({"message": template_message})
kwargs.update({"email_params": make_email_params(message=template_message, uuid=error_uuid)})
kwargs.update({'error_uuid': error_uuid})
return render_template('error_500.html', **kwargs), 500
@app.app_errorhandler(MergeConflict)
@raise_if_debug
def merge_conflict(error):
''' Render a 500 error page with merge conflict details
'''
kwargs = common_template_args(current_app.config, session)
kwargs.update(common_error_template_args(current_app.config))
kwargs.update({"conflict_files": summarize_conflict_details(error)})
kwargs.update({"show_merge_conflict": True})
message = u'\n'.join([u'{} {}'.format(item['actions'], item['path']) for item in error.files()])
error_uuid = getattr(error, 'uuid', None)
template_message = u'(MergeConflict)\n{}'.format(message)
kwargs.update({"message": template_message})
kwargs.update({"email_params": make_email_params(message=template_message, path=urlparse(request.url).path, uuid=error_uuid)})
kwargs.update({'error_uuid': error_uuid})
return render_template('error_500.html', **kwargs), 500
@app.app_errorhandler(Exception)
@raise_if_debug
def exception(error):
''' Render a 500 error page for exceptions not caught elsewhere
'''
error_class = type(error).__name__
kwargs = common_template_args(current_app.config, session)
kwargs.update(common_error_template_args(current_app.config))
try:
error_message = error.args[0]
except:
error_message = u''
kwargs.update({"show_merge_conflict": False})
kwargs.update({"error_class": error_class})
error_uuid = getattr(error, 'uuid', None)
template_message = u'({}) {}'.format(error_class, error_message)
kwargs.update({"message": template_message})
kwargs.update({"email_params": make_email_params(message=template_message, path=urlparse(request.url).path, uuid=error_uuid)})
kwargs.update({'error_uuid': error_uuid})
return render_template('error_500.html', **kwargs), 500
|
{
"content_hash": "269e735cf21700ddee0cf9a58d4dff37",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 133,
"avg_line_length": 41.26923076923077,
"alnum_prop": 0.6966449207828518,
"repo_name": "chimecms/chime",
"id": "d4f1e3d9235ab6e8178e50adec0f866e867acb40",
"size": "4292",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chime/errors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "496943"
},
{
"name": "HTML",
"bytes": "1361982"
},
{
"name": "JavaScript",
"bytes": "59686"
},
{
"name": "Python",
"bytes": "706324"
},
{
"name": "Ruby",
"bytes": "16142"
},
{
"name": "Shell",
"bytes": "2988"
}
],
"symlink_target": ""
}
|
"""Classes representing statistical distributions and ops for working with them.
See the @{$python/contrib.distributions} guide.
## Distribution Object
@@ReparameterizationType
@@Distribution
## Individual Distributions
@@Binomial
@@Bernoulli
@@BernoulliWithSigmoidProbs
@@Beta
@@BetaWithSoftplusConcentration
@@Categorical
@@Chi2
@@Chi2WithAbsDf
@@Deterministic
@@VectorDeterministic
@@Exponential
@@ExponentialWithSoftplusRate
@@Gamma
@@GammaWithSoftplusConcentrationRate
@@Geometric
@@InverseGamma
@@InverseGammaWithSoftplusConcentrationRate
@@Laplace
@@LaplaceWithSoftplusScale
@@Logistic
@@NegativeBinomial
@@Normal
@@NormalWithSoftplusScale
@@Poisson
@@StudentT
@@StudentTWithAbsDfSoftplusScale
@@Uniform
@@MultivariateNormalDiag
@@MultivariateNormalTriL
@@MultivariateNormalDiagPlusLowRank
@@MultivariateNormalDiagWithSoftplusScale
@@Dirichlet
@@DirichletMultinomial
@@Multinomial
@@WishartCholesky
@@WishartFull
@@TransformedDistribution
@@QuantizedDistribution
@@Mixture
@@ExpRelaxedOneHotCategorical
@@OneHotCategorical
@@RelaxedBernoulli
@@RelaxedOneHotCategorical
## Kullback-Leibler Divergence
@@kl
@@RegisterKL
## Helper Functions
@@matrix_diag_transform
@@normal_conjugates_known_scale_posterior
@@normal_conjugates_known_scale_predictive
@@softplus_inverse
## Functions for statistics of samples
@@percentile
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.contrib.distributions.python.ops import bijectors
from tensorflow.contrib.distributions.python.ops.bernoulli import *
from tensorflow.contrib.distributions.python.ops.beta import *
from tensorflow.contrib.distributions.python.ops.binomial import *
from tensorflow.contrib.distributions.python.ops.categorical import *
from tensorflow.contrib.distributions.python.ops.chi2 import *
from tensorflow.contrib.distributions.python.ops.conditional_distribution import *
from tensorflow.contrib.distributions.python.ops.conditional_transformed_distribution import *
from tensorflow.contrib.distributions.python.ops.deterministic import *
from tensorflow.contrib.distributions.python.ops.dirichlet import *
from tensorflow.contrib.distributions.python.ops.dirichlet_multinomial import *
from tensorflow.contrib.distributions.python.ops.distribution import *
from tensorflow.contrib.distributions.python.ops.distribution_util import matrix_diag_transform
from tensorflow.contrib.distributions.python.ops.distribution_util import softplus_inverse
from tensorflow.contrib.distributions.python.ops.exponential import *
from tensorflow.contrib.distributions.python.ops.gamma import *
from tensorflow.contrib.distributions.python.ops.geometric import *
from tensorflow.contrib.distributions.python.ops.inverse_gamma import *
from tensorflow.contrib.distributions.python.ops.kullback_leibler import *
from tensorflow.contrib.distributions.python.ops.laplace import *
from tensorflow.contrib.distributions.python.ops.logistic import *
from tensorflow.contrib.distributions.python.ops.mixture import *
from tensorflow.contrib.distributions.python.ops.multinomial import *
from tensorflow.contrib.distributions.python.ops.mvn_diag import *
from tensorflow.contrib.distributions.python.ops.mvn_diag_plus_low_rank import *
from tensorflow.contrib.distributions.python.ops.mvn_tril import *
from tensorflow.contrib.distributions.python.ops.negative_binomial import *
from tensorflow.contrib.distributions.python.ops.normal import *
from tensorflow.contrib.distributions.python.ops.normal_conjugate_posteriors import *
from tensorflow.contrib.distributions.python.ops.onehot_categorical import *
from tensorflow.contrib.distributions.python.ops.poisson import *
from tensorflow.contrib.distributions.python.ops.quantized_distribution import *
from tensorflow.contrib.distributions.python.ops.relaxed_bernoulli import *
from tensorflow.contrib.distributions.python.ops.relaxed_onehot_categorical import *
from tensorflow.contrib.distributions.python.ops.sample_stats import *
from tensorflow.contrib.distributions.python.ops.student_t import *
from tensorflow.contrib.distributions.python.ops.transformed_distribution import *
from tensorflow.contrib.distributions.python.ops.uniform import *
from tensorflow.contrib.distributions.python.ops.wishart import *
# pylint: enable=unused-import,wildcard-import,line-too-long,g-importing-member
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = [
'bijectors',
'ConditionalDistribution',
'ConditionalTransformedDistribution',
'FULLY_REPARAMETERIZED',
'NOT_REPARAMETERIZED',
]
remove_undocumented(__name__, _allowed_symbols)
|
{
"content_hash": "1230513af53c4687aad2bdc9bb69f80c",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 95,
"avg_line_length": 36.25954198473283,
"alnum_prop": 0.8347368421052631,
"repo_name": "whn09/tensorflow",
"id": "257aefa8572351f00ddcb77d2a49c4d79d660826",
"size": "5439",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/distributions/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7481"
},
{
"name": "C",
"bytes": "182478"
},
{
"name": "C++",
"bytes": "23440224"
},
{
"name": "CMake",
"bytes": "158302"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "804382"
},
{
"name": "HTML",
"bytes": "654838"
},
{
"name": "Java",
"bytes": "286562"
},
{
"name": "JavaScript",
"bytes": "14005"
},
{
"name": "Jupyter Notebook",
"bytes": "1833654"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "37302"
},
{
"name": "Objective-C",
"bytes": "7037"
},
{
"name": "Objective-C++",
"bytes": "64166"
},
{
"name": "Protocol Buffer",
"bytes": "213841"
},
{
"name": "Python",
"bytes": "20372706"
},
{
"name": "Shell",
"bytes": "335987"
},
{
"name": "TypeScript",
"bytes": "1108203"
}
],
"symlink_target": ""
}
|
import atexit
import sys
def all_done():
print('all_done()')
print('Registering')
atexit.register(all_done)
print('Registered')
print('Exiting...')
sys.exit()
|
{
"content_hash": "decb0cecd692d2c93f9fe581501206cc",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 25,
"avg_line_length": 10.625,
"alnum_prop": 0.6705882352941176,
"repo_name": "jasonwee/asus-rt-n14uhp-mrtg",
"id": "ccf8d406a19e9d737827d3cd4cf0de4bd966a7f7",
"size": "172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/lesson_application_building_blocks/atexit_sys_exit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "45876"
},
{
"name": "HTML",
"bytes": "107072"
},
{
"name": "JavaScript",
"bytes": "161335"
},
{
"name": "Python",
"bytes": "6923750"
},
{
"name": "Shell",
"bytes": "7616"
}
],
"symlink_target": ""
}
|
stringVar = S('"a String"', "application/json")
booleanVar = S(False, "application/json")
integerVar = S(42, "application/json")
listVar = S(["Waldo", "Hugo", "Kermit"], "application/json")
|
{
"content_hash": "5622df16b08b57db7b0ae31d33f1ae35",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 60,
"avg_line_length": 27.571428571428573,
"alnum_prop": 0.6683937823834197,
"repo_name": "camunda/camunda-spin",
"id": "ba873f16e602782522fe5f2a908e65483c6fb0ff",
"size": "193",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dataformat-json-jackson/src/test/resources/org/camunda/spin/python/json/tree/JsonTreeMapObjectToJsonPythonTest.shouldMapPrimitives.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "24425"
},
{
"name": "Java",
"bytes": "714834"
},
{
"name": "JavaScript",
"bytes": "41714"
},
{
"name": "Python",
"bytes": "20444"
},
{
"name": "Ruby",
"bytes": "20491"
},
{
"name": "XSLT",
"bytes": "299"
}
],
"symlink_target": ""
}
|
"""
Django settings for gomera_app project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '116=fnvh6%rpbmxsbzlacw36ad0c#7$njq=h+v!kcikr52+357'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'gomera',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'gomera_app.urls'
WSGI_APPLICATION = 'gomera_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = ('static',)
|
{
"content_hash": "324292cb150cbfd3c395417561c7f82e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 71,
"avg_line_length": 24.785714285714285,
"alnum_prop": 0.7219020172910663,
"repo_name": "amartinez1/geo-clinic",
"id": "c55b2649f6726a4023a29840b04261489bbff6fd",
"size": "2082",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gomera_app/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "120236"
},
{
"name": "JavaScript",
"bytes": "9537"
},
{
"name": "Python",
"bytes": "4594"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.dispatch import Signal
# signal used when an Event has been created or updated
event_updated = Signal(providing_args=['instance'])
# signal used when an EventGroup has been created or updated
eventgroup_updated = Signal(providing_args=['instance'])
|
{
"content_hash": "233a0a37c82005d045f1d2ce087e1736",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 60,
"avg_line_length": 37.75,
"alnum_prop": 0.7814569536423841,
"repo_name": "ox-it/talks.ox",
"id": "26535ff3b22d6d7319fb339f6e8ed963f92be375",
"size": "302",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talks/events/signals.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "23891"
},
{
"name": "Dockerfile",
"bytes": "750"
},
{
"name": "HTML",
"bytes": "117234"
},
{
"name": "JavaScript",
"bytes": "98316"
},
{
"name": "Makefile",
"bytes": "417"
},
{
"name": "Python",
"bytes": "312877"
},
{
"name": "RobotFramework",
"bytes": "18436"
}
],
"symlink_target": ""
}
|
__all__ = [
"timeseries",
"exponential_distributions",
"harmonic_oscillators",
"gaussian_work",
"HarmonicOscillatorsTestCase",
"ExponentialTestCase",
]
from pymbar.testsystems.harmonic_oscillators import HarmonicOscillatorsTestCase
from pymbar.testsystems.exponential_distributions import ExponentialTestCase
from pymbar.testsystems.timeseries import correlated_timeseries_example
from pymbar.testsystems.gaussian_work import gaussian_work_example
|
{
"content_hash": "c04c23f9d46f4b2f08ad99c2b8ad443e",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 36.38461538461539,
"alnum_prop": 0.8012684989429175,
"repo_name": "Lnaden/pymbar",
"id": "8fcf1aba775de491fd963f5769fe7a84b0947dcb",
"size": "473",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pymbar/testsystems/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1893"
},
{
"name": "HTML",
"bytes": "81"
},
{
"name": "PowerShell",
"bytes": "3091"
},
{
"name": "Python",
"bytes": "490293"
},
{
"name": "Shell",
"bytes": "2228"
}
],
"symlink_target": ""
}
|
"""GPU implementation of //testing/skia_gold_common/skia_gold_properties.py."""
import subprocess
import sys
from typing import Optional
import gpu_path_util
from skia_gold_common import skia_gold_properties
class GpuSkiaGoldProperties(skia_gold_properties.SkiaGoldProperties):
@staticmethod
def _GetGitOriginMainHeadSha1() -> Optional[str]:
try:
return subprocess.check_output(
['git', 'rev-parse', 'origin/main'],
shell=_IsWin(),
cwd=gpu_path_util.CHROMIUM_SRC_DIR).decode('utf-8').strip()
except subprocess.CalledProcessError:
return None
def _IsWin() -> bool:
return sys.platform == 'win32'
|
{
"content_hash": "57a238488693aabf9bdfeca56cb9b37b",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 79,
"avg_line_length": 26.32,
"alnum_prop": 0.7021276595744681,
"repo_name": "chromium/chromium",
"id": "2947eabf3ea462cfec04eeddc161a3dedf7732ad",
"size": "798",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "content/test/gpu/gpu_tests/skia_gold/gpu_skia_gold_properties.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from distutils.core import setup
version = "0.15"
"""Setup script for pexif"""
setup (
name = "pexif",
version = version,
description = "A module for editing JPEG EXIF data",
long_description = "This module allows you to parse and edit the EXIF data tags in a JPEG image.",
author = "Ben Leslie",
author_email = "benno@benno.id.au",
url = "http://www.benno.id.au/code/pexif/",
license = "http://www.opensource.org/licenses/mit-license.php",
py_modules = ["pexif"],
scripts = ["scripts/dump_exif.py", "scripts/setgps.py", "scripts/getgps.py", "scripts/noop.py",
"scripts/timezone.py", "scripts/remove_metadata.py"],
platforms = ["any"],
classifiers = ["Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"License :: OSI Approved :: MIT License",
"Topic :: Multimedia :: Graphics"]
)
|
{
"content_hash": "73d8de866a20fe9780068ebc4b83ed34",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 102,
"avg_line_length": 39.84615384615385,
"alnum_prop": 0.5907335907335908,
"repo_name": "bennoleslie/pexif",
"id": "be98223a2a4c47e456790439ed1ad4ea84f648e5",
"size": "1059",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "64265"
}
],
"symlink_target": ""
}
|
import logging
import fmcapi
import time
def test__protocol_port(fmc):
logging.info("Test ProtocolPort. Post, get, put, delete Port Objects.")
starttime = str(int(time.time()))
namer = f"_fmcapi_test_{starttime}"
obj1 = fmcapi.ProtocolPortObjects(fmc=fmc)
obj1.name = namer
obj1.port = "1234"
obj1.protocol = "TCP"
obj1.post()
time.sleep(1)
del obj1
obj1 = fmcapi.ProtocolPortObjects(fmc=fmc, name=namer)
obj1.get()
obj1.port = "5678"
obj1.put()
time.sleep(1)
obj1.delete()
logging.info("Test ProtocolPort done.\n")
|
{
"content_hash": "dcd3972354b7694acebba2a4a12b98fe",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 76,
"avg_line_length": 23.56,
"alnum_prop": 0.6468590831918506,
"repo_name": "daxm/fmcapi",
"id": "13c34311cda0a338835373cf83a0bd2e4d0ee2aa",
"size": "589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unit_tests/protocol_port.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "985"
},
{
"name": "Python",
"bytes": "572788"
},
{
"name": "Shell",
"bytes": "1591"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
import re
import json
import hpOneView.profile as profile
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with given credentials
try:
con.login(credential)
except:
print('Login failed')
def get_server(con, srv, server_id, server_hwt, forcePowerOff):
sht = None
if server_id.upper() == 'UNASSIGNED':
server_hw_types = srv.get_server_hardware_types()
for ht in server_hw_types:
if ht['name'] == server_hwt:
sht = con.get(ht['uri'])
if not sht:
print('Error, server hardware type not found')
sys.exit()
return None, sht
# Get handle for named server and power off in necessary
servers = srv.get_servers()
located_server = None
for server in servers:
ips = server['mpHostInfo']['mpIpAddresses']
for ip in ips:
if server_id == server['name'] or server_id == ip['address']:
located_server = server
if server['state'] != 'NoProfileApplied':
print('\nError: server', server_id, 'already has a profile '
'defined or is being monitored\n')
sys.exit(1)
if server['powerState'] == 'On':
if forcePowerOff:
srv.set_server_powerstate(server, 'Off', force=True)
else:
print('Error: Server', server_id,
' needs to be powered off')
sys.exit(1)
break
if not located_server:
print('Server ', server_id, ' not found')
sys.exit(1)
sht = con.get(located_server['serverHardwareTypeUri'])
if not sht:
print('Error, server hardware type not found')
sys.exit()
return located_server, sht
def define_profile(con, srv, affinity, name, desc, server, sht, boot, bootmode,
fw, hide_flexnics, local_storage, conn_list, san_list, bios_list):
if conn_list:
# read connection list from file
conn = json.loads(open(conn_list).read())
else:
conn = []
if san_list:
# read connection list from file
san = json.loads(open(san_list).read())
else:
san = None
# Affinity is only supported on Blade Servers so set it to None if the
# server hardware type model does not match BL
p = re.compile('.*BL\d.*', re.IGNORECASE)
match = p.match(sht['model'])
if not match:
affinity = None
if server:
serverHardwareUri = server['uri']
else:
serverHardwareUri = None
if conn:
macType = 'Virtual'
wwnType = 'Virtual'
else:
macType = 'Physical'
wwnType = 'Physical'
profile = srv.create_server_profile(affinity=affinity,
biosSettings=bios_list,
bootSettings=boot,
bootModeSetting=bootmode,
profileConnectionV4=conn,
description=desc,
firmwareSettingsV3=fw,
hideUnusedFlexNics=hide_flexnics,
localStorageSettingsV3=local_storage,
macType=macType,
name=name,
sanStorageV3=san,
serverHardwareUri=serverHardwareUri,
serverHardwareTypeUri=sht['uri'],
wwnType=wwnType)
if 'serialNumberType' in profile:
print('\n\nName: ', profile['name'])
print('Description: ', profile['description'])
print('Type: ', profile['type'])
print('wwnType: ', profile['wwnType'])
print('macType: ', profile['macType'])
print('serialNumberType: ', profile['serialNumberType'])
print('Firmware:')
print(' manageFirmware: ', profile['firmware']['manageFirmware'])
print(' forceInstallFirmware: ', profile['firmware']['forceInstallFirmware'])
print(' firmwareBaselineUri: ', profile['firmware']['firmwareBaselineUri'])
print('Bios:')
print(' manageBios: ', profile['bios']['manageBios'])
print(' overriddenSettings: ', profile['bios']['overriddenSettings'])
print('Boot:')
print(' manageBoot: ', profile['boot']['manageBoot'])
print(' order: ', profile['boot']['order'], '\n')
else:
pprint(profile_dict)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Define a server profile''')
parser.add_argument('-a', dest='host', required=True,
help='''
HPE OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HPE OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HPE OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-j', dest='domain', required=False,
default='Local',
help='''
HPE OneView Authorized Login Domain''')
parser.add_argument('-n', dest='name',
required=True,
help='''
Name of the profile''')
parser.add_argument('-d', dest='desc',
required=False,
help='''
Description for the server profile''')
parser.add_argument('-af', dest='affinity',
required=False, choices=['Bay', 'BayAndServer'],
default='Bay',
help='''
This identifies the behavior of the server profile when the server
hardware is removed or replaced.
. Bay: This profile remains with the device bay when the server
hardware is removed or replaced.
. BayAndServer This profile is pinned to both the device bay and
specific server hardware.''')
parser.add_argument('-f', dest='forcePowerOff',
required=False,
action='store_true',
help='''
When set, forces power off of target server.
Avoids error exit if server is up''')
parser.add_argument('-fw', dest='baseline', required=False,
help='''
SPP Baseline file name. e.g. SPP2013090_2013_0830_30.iso''')
parser.add_argument('-mb', dest='disable_manage_boot',
action='store_true',
help='''
Explicitly DISABLE Boot Order Management. This value is enabled by
default and required for Connection boot enablement. If this option is
disabled, then PXE and FC BfS settings are disabled within the entire
Server Profile.''')
parser.add_argument('-bo', dest='boot_order', required=False,
nargs='+',
help='''
Defines the order in which boot will be attempted on the available
devices. Please NOTE the supported boot order is server hardware type
specific. For Gen7 and Gen8 server hardware the possible values are 'CD',
'Floppy', 'USB', 'HardDisk', and 'PXE'. For Gen9 BL server hardware in
Legacy BIOS boot mode, the possible values are 'CD', 'USB', 'HardDisk',
and 'PXE'. For Gen9 BL server hardware in UEFI or UEFI Optimized boot
mode, only one value is allowed and must be either 'HardDisk' or 'PXE'.
For Gen9 DL server hardware in Legacy BIOS boot mode, the possible
values are 'CD', 'USB', 'HardDisk', and 'PXE'. For Gen9 DL server
hardware in UEFI or UEFI Optimized boot mode, boot order configuration
is not supported.
Server boot order defined as a list separated by spaces. For example:
Gen7/8 BIOS Default Boot Order:
-bo CD Floppy USB HardDisk PXE
Gen9 Legacy BIOS Boot Order:
-bo CD USB HardDisk PXE
Gen9 UEFI Default Boot Order:
-bo HardDisk
''')
parser.add_argument('-cl', dest='conn_list',
required=False,
help='''
File with list of connections for this profile in JSON format. This file
can be created with multiple calls to define-connection-list.py''')
parser.add_argument('-sl', dest='san_list',
required=False,
help='''
File with list of SAN Storage connections for this profile in JSON format.
This file can be created with multiple calls to
define-san-storage-list.py''')
parser.add_argument('-bm', dest='boot_mode', required=False,
choices=['UEFI', 'UEFIOptimized', 'BIOS'],
default='BIOS',
help='''
Specify the Gen9 Boot Environment.
Sets the boot mode as one of the following:
. UEFI
. UEFIOptimized
. BIOS
If you select UEFI or UEFI optimized for an HPE ProLiant DL Gen9 rack
mount server, the remaining boot setting available is the PXE boot policy.
For the UEFI or UEFI optimized boot mode options, the boot mode choice
should be based on the expected OS and required boot features for the
server hardware. UEFI optimized boot mode reduces the time the system
spends in POST(Video driver initialization). In order to select the
appropriate boot mode, consider the following:
. If a secure boot is required, the boot mode must be set to UEFI
or UEFI optimized .
. For operating systems that do not support UEFI (such as DOS, or
older versions of Windows and Linux), the boot mode must be set
to BIOS.
. When booting in UEFI mode, Windows 7, Server 2008, or 2008 R2
should not be set to UEFIOptimized.''')
parser.add_argument('-px', dest='pxe', required=False,
choices=['Auto', 'IPv4', 'IPv6',
'IPv4ThenIPv6', 'IPv6ThenIPv4'],
default='IPv4',
help='''
Controls the ordering of the network modes available to the Flexible
LOM (FLB); for example, IPv4 and IPv6.
Select from the following policies:
. Auto
. IPv4 only
. IPv6 only
. IPv4 then IPv6
. IPv6 then IPv4
Setting the policy to Auto means the order of the existing network boot
targets in the UEFI Boot Order list will not be modified, and any new
network boot targets will be added to the end of the list using the
System ROM's default policy.''')
parser.add_argument('-rl', dest='raidlevel', required=False,
choices=['NONE', 'RAID0', 'RAID1'],
help='''
Enable local storage to be managed via the server profile by defining the
RAID level for the logical drive.''')
parser.add_argument('-pn', dest='physnum', required=False,
help='''
The number of physical drives to be used to build the logical drive. The
provided values must be consistent with the selected RAID level and cannot
exceed the maximum supported number of drives for the selected server
hardware type.''')
parser.add_argument('-lb', dest='lboot', required=False,
action='store_true',
help='''
Mark the logical drive as NOT bootable''')
parser.add_argument('-is', dest='init_storage', required=False,
action='store_true',
help='''
Indicates whether the local storage controller should be reset to factory
defaults before applying the local storage settings from the server
profile.
***************** WARNING *****************
Setting this will overwrite an existing logical
disk if present, and without further warning.
***************** WARNING *****************''')
parser.add_argument('-hn', dest='hide_flexnics', required=False,
action='store_false',
help='''
This setting controls the enumeration of physical functions that do not
correspond to connections in a profile. Using this flag will SHOW unused
FlexNICs to the Operating System. Changing this setting may alter the order
of network interfaces in the Operating System. This option sets the 'Hide
Unused FlexNICs' to disabled, eight FlexNICs will be enumerated in the
Operating System as network interfaces for each Flex-10 or FlexFabric
adapter. Configuring Fibre Channel connections on a FlexFabric adapter may
enumerate two storage interfaces, reducing the number of network interfaces
to six. The default (this option is not selected) enables 'Hide Unused
FlexNICs' and may suppress enumeration of FlexNICs that do not correspond
to profile connections. FlexNICs are hidden in pairs, starting with the 4th
pair. For instance, if the 4th FlexNIC on either physical port corresponds
to a profile connection, all eight physical functions are enumerated. If a
profile connection corresponds to the 2nd FlexNIC on either physical port,
but no connection corresponds to the 3rd or 4th FlexNIC on either physical
port, only the 1st and 2nd physical functions are enumerated in the
Operating System.''')
parser.add_argument('-s', dest='server_id', required=True,
help='''
Server identification. There are multiple ways to specify the server id:
. Hostname or IP address of the stand-alone server iLO
. Server Hardware name of a server than has already been imported
into HPE OneView and is listed under Server Hardware
. "UNASSIGNED" for creating an unassigned Server Profile''')
parser.add_argument('-sh', dest='server_hwt', required=False,
help='''
Server hardware type is required for defining an unassigned profile. Note
the Server Hardware Type must be present in the HPE OneView appliance
before it can be used. For example, a single server with the specific server
hardware type must have been added to OneView for that hardware type to
be used. The example script get-server-hardware-types.py with the -l
argument can be used to get a list of server hardware types that have
been imported into the OneView appliance''')
parser.add_argument('-bl', dest='bios_list',
required=False,
help='''
File in JSON format with list of BIOS settings to override for this profile. This file
can be created with a call to get-bios-options.py''')
args = parser.parse_args()
credential = {'authLoginDomain': args.domain.upper(), 'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
srv = hpov.servers(con)
sts = hpov.settings(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
# Invert the boolean value
args.lboot = not args.lboot
if args.boot_order and args.disable_manage_boot:
print('Error: Managed Boot must be enabled to define a boot order')
sys.exit()
if args.server_id.upper() == 'UNASSIGNED' and not args.server_hwt:
print('Error: Server Hardware Type must be specified when defining an'
'unassigned server profile')
sys.exit()
server, sht = get_server(con, srv, args.server_id, args.server_hwt,
args.forcePowerOff)
boot, bootmode = profile.make_boot_settings_dict(srv, sht, args.disable_manage_boot,
args.boot_order, args.boot_mode, args.pxe)
fw_settings = profile.make_firmware_dict(sts, args.baseline)
local_storage = profile.make_local_storage_dict(sht, args.raidlevel, args.lboot,
args.init_storage, args.physnum)
bios = profile.make_bios_dict(args.bios_list)
define_profile(con, srv, args.affinity, args.name, args.desc, server, sht,
boot, bootmode, fw_settings, args.hide_flexnics,
local_storage, args.conn_list, args.san_list, bios)
if __name__ == '__main__':
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
{
"content_hash": "aa410d4d6b9e9d612d38ec70bf58785c",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 105,
"avg_line_length": 43.298329355608594,
"alnum_prop": 0.5887443501267776,
"repo_name": "andreadean5/python-hpOneView",
"id": "a2cf6b2fea11c5a27b7d43f24c0f23bdc2bcbfef",
"size": "19299",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/scripts/define-profile.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "920844"
}
],
"symlink_target": ""
}
|
import mock
import pytest
import sqlalchemy as sa
import sqlalchemy.dialects.mysql
import sqlalchemy.dialects.oracle
import sqlalchemy.dialects.postgresql
import sqlalchemy.dialects.sqlite
from sqlalchemy import inspect
from sqlalchemy_utils import Password, PasswordType, types # noqa
@pytest.fixture
def extra_kwargs():
"""PasswordType extra keyword arguments."""
return {}
@pytest.fixture
def User(Base, extra_kwargs):
class User(Base):
__tablename__ = 'user'
id = sa.Column(sa.Integer, primary_key=True)
password = sa.Column(PasswordType(
schemes=[
'pbkdf2_sha512',
'pbkdf2_sha256',
'md5_crypt',
'hex_md5'
],
deprecated=['md5_crypt', 'hex_md5'],
**extra_kwargs
))
def __repr__(self):
return 'User(%r)' % self.id
return User
@pytest.fixture
def init_models(User):
pass
def onload_callback(schemes, deprecated):
"""
Get onload callback that takes the PasswordType arguments from the config.
"""
def onload(**kwargs):
kwargs['schemes'] = schemes
kwargs['deprecated'] = deprecated
return kwargs
return onload
@pytest.mark.skipif('types.password.passlib is None')
class TestPasswordType(object):
@pytest.mark.parametrize('dialect_module,impl', [
(sqlalchemy.dialects.sqlite, sa.dialects.sqlite.BLOB),
(sqlalchemy.dialects.postgresql, sa.dialects.postgresql.BYTEA),
(sqlalchemy.dialects.oracle, sa.dialects.oracle.RAW),
(sqlalchemy.dialects.mysql, sa.VARBINARY),
])
def test_load_dialect_impl(self, dialect_module, impl):
"""
Should produce the same impl type as Alembic would expect after
inspecing a database
"""
password_type = PasswordType()
assert isinstance(
password_type.load_dialect_impl(dialect_module.dialect()),
impl
)
def test_encrypt(self, User):
"""Should encrypt the password on setting the attribute."""
obj = User()
obj.password = b'b'
assert obj.password.hash != 'b'
assert obj.password.hash.startswith(b'$pbkdf2-sha512$')
def test_check(self, session, User):
"""
Should be able to compare the plaintext against the
encrypted form.
"""
obj = User()
obj.password = 'b'
assert obj.password == 'b'
assert obj.password != 'a'
session.add(obj)
session.commit()
obj = session.query(User).get(obj.id)
assert obj.password == b'b'
assert obj.password != 'a'
def test_check_and_update(self, User):
"""
Should be able to compare the plaintext against a deprecated
encrypted form and have it auto-update to the preferred version.
"""
from passlib.hash import md5_crypt
obj = User()
obj.password = Password(md5_crypt.encrypt('b'))
assert obj.password.hash.decode('utf8').startswith('$1$')
assert obj.password == 'b'
assert obj.password.hash.decode('utf8').startswith('$pbkdf2-sha512$')
def test_auto_column_length(self, User):
"""Should derive the correct column length from the specified schemes.
"""
from passlib.hash import pbkdf2_sha512
kind = inspect(User).c.password.type
# name + rounds + salt + hash + ($ * 4) of largest hash
expected_length = len(pbkdf2_sha512.name)
expected_length += len(str(pbkdf2_sha512.max_rounds))
expected_length += pbkdf2_sha512.max_salt_size
expected_length += pbkdf2_sha512.encoded_checksum_size
expected_length += 4
assert kind.length == expected_length
def test_without_schemes(self):
assert PasswordType(schemes=[]).length == 1024
def test_compare(self, User):
from passlib.hash import md5_crypt
obj = User()
obj.password = Password(md5_crypt.encrypt('b'))
other = User()
other.password = Password(md5_crypt.encrypt('b'))
# Not sure what to assert here; the test raised an error before.
assert obj.password != other.password
def test_set_none(self, session, User):
obj = User()
obj.password = None
assert obj.password is None
session.add(obj)
session.commit()
obj = session.query(User).get(obj.id)
assert obj.password is None
def test_update_none(self, session, User):
"""
Should be able to change a password from ``None`` to a valid
password.
"""
obj = User()
obj.password = None
session.add(obj)
session.commit()
obj = session.query(User).get(obj.id)
obj.password = 'b'
session.commit()
def test_compare_none(self, User):
"""
Should be able to compare a password of ``None``.
"""
obj = User()
obj.password = None
assert obj.password is None
assert obj.password == None # noqa
obj.password = 'b'
assert obj.password is not None
assert obj.password != None # noqa
def test_check_and_update_persist(self, session, User):
"""
When a password is compared, the hash should update if needed to
change the algorithm; and, commit to the database.
"""
from passlib.hash import md5_crypt
obj = User()
obj.password = Password(md5_crypt.encrypt('b'))
session.add(obj)
session.commit()
assert obj.password.hash.decode('utf8').startswith('$1$')
assert obj.password == 'b'
session.commit()
obj = session.query(User).get(obj.id)
assert obj.password.hash.decode('utf8').startswith('$pbkdf2-sha512$')
assert obj.password == 'b'
@pytest.mark.parametrize(
'extra_kwargs',
[
dict(
onload=onload_callback(
schemes=['pbkdf2_sha256'],
deprecated=[],
)
)
]
)
def test_lazy_configuration(self, User):
"""
Field should be able to read the passlib attributes lazily from the
config (e.g. Flask config).
"""
schemes = User.password.type.context.schemes()
assert tuple(schemes) == ('pbkdf2_sha256',)
obj = User()
obj.password = b'b'
assert obj.password.hash.decode('utf8').startswith('$pbkdf2-sha256$')
@pytest.mark.parametrize('max_length', [1, 103])
def test_constant_length(self, max_length):
"""
Test that constant max_length is applied.
"""
typ = PasswordType(max_length=max_length)
assert typ.length == max_length
def test_context_is_lazy(self):
"""
Make sure the init doesn't evaluate the lazy context.
"""
onload = mock.Mock(return_value={})
PasswordType(onload=onload)
assert not onload.called
|
{
"content_hash": "46988d0fe7531ac534fdab68839b72e4",
"timestamp": "",
"source": "github",
"line_count": 257,
"max_line_length": 78,
"avg_line_length": 27.6147859922179,
"alnum_prop": 0.5879949274341271,
"repo_name": "konstantinoskostis/sqlalchemy-utils",
"id": "eea3b32450f74e5621a514f27e64a4417710adcf",
"size": "7097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/types/test_password.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "547618"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from urllib import *
from ..version_info import PY2
if PY2:
from . import error, parse, request, robotparser
|
{
"content_hash": "a7e45188f0a3af720b14eea76fa7c7ec",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 52,
"avg_line_length": 19.375,
"alnum_prop": 0.7290322580645161,
"repo_name": "AbsoluteMSTR/pies",
"id": "d6a12ffb761600b53331d397f4f7e9fefab507dd",
"size": "155",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "pies/urllib/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31432"
},
{
"name": "Shell",
"bytes": "1169"
}
],
"symlink_target": ""
}
|
import os
from cmt.components import ChannelsDynamWave as Component
from . import example_dir
cfg_file = os.path.join(example_dir, 'June_20_67_channels_dynamic_wave.cfg')
def test_irf():
component = Component()
component.initialize(cfg_file)
component.update(1.0)
component.finalize()
|
{
"content_hash": "51b874a06fd5c5bada6aa4120108c09f",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 76,
"avg_line_length": 23.46153846153846,
"alnum_prop": 0.7344262295081967,
"repo_name": "Elchin/topoflow-cmi-testing",
"id": "b5d9a6aad5b97099fc82cf283beaf7c1f04af04d",
"size": "328",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_channels_dynamic_wave.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10117"
}
],
"symlink_target": ""
}
|
from paddle.trainer_config_helpers import *
settings(
batch_size=1000,
learning_rate=1e-5
)
din = data_layer(name='data', size=30)
seq_op = [
first_seq,
last_seq
]
agg_level = [
AggregateLevel.EACH_SEQUENCE,
AggregateLevel.EACH_TIMESTEP
]
opts = []
for op in seq_op:
for al in agg_level:
opts.append(op(input=din, agg_level=al))
outputs(opts)
|
{
"content_hash": "48b779eb4ed196d81586d3f1235bc0cd",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 48,
"avg_line_length": 14.807692307692308,
"alnum_prop": 0.6493506493506493,
"repo_name": "zuowang/Paddle",
"id": "d54a1c49fd3fdf9eb8a675dd94561e6da5b310bc",
"size": "385",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/paddle/trainer_config_helpers/tests/configs/last_first_seq.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "210539"
},
{
"name": "C++",
"bytes": "2694261"
},
{
"name": "CMake",
"bytes": "90483"
},
{
"name": "Cuda",
"bytes": "409839"
},
{
"name": "M4",
"bytes": "39963"
},
{
"name": "Perl",
"bytes": "11412"
},
{
"name": "Python",
"bytes": "826795"
},
{
"name": "Shell",
"bytes": "59063"
}
],
"symlink_target": ""
}
|
import logging
import pprint
import shlex
import sys
from telemetry.core import exceptions
from telemetry.core import util
from telemetry import decorators
from telemetry.internal.backends import browser_backend
from telemetry.internal.backends.chrome import extension_backend
from telemetry.internal.backends.chrome import tab_list_backend
from telemetry.internal.backends.chrome_inspector import devtools_client_backend
from telemetry.internal.browser import user_agent
from telemetry.internal.browser import web_contents
from telemetry.testing import options_for_unittests
import py_utils
class ChromeBrowserBackend(browser_backend.BrowserBackend):
"""An abstract class for chrome browser backends. Provides basic functionality
once a remote-debugger port has been established."""
# It is OK to have abstract methods. pylint: disable=abstract-method
def __init__(self, platform_backend, supports_tab_control,
supports_extensions, browser_options):
super(ChromeBrowserBackend, self).__init__(
platform_backend=platform_backend,
supports_extensions=supports_extensions,
browser_options=browser_options,
tab_list_backend=tab_list_backend.TabListBackend)
self._port = None
self._browser_target = None
self._supports_tab_control = supports_tab_control
self._devtools_client = None
self._output_profile_path = browser_options.output_profile_path
self._extensions_to_load = browser_options.extensions_to_load
if (self.browser_options.dont_override_profile and
not options_for_unittests.AreSet()):
sys.stderr.write('Warning: Not overriding profile. This can cause '
'unexpected effects due to profile-specific settings, '
'such as about:flags settings, cookies, and '
'extensions.\n')
@property
def devtools_client(self):
return self._devtools_client
@property
@decorators.Cache
def extension_backend(self):
if not self.supports_extensions:
return None
return extension_backend.ExtensionBackendDict(self)
def _ArgsNeedProxyServer(self, args):
"""Returns True if args for Chrome indicate the need for proxy server."""
if '--enable-spdy-proxy-auth' in args:
return True
return [arg for arg in args if arg.startswith('--proxy-server=')]
def GetBrowserStartupArgs(self):
assert not '--no-proxy-server' in self.browser_options.extra_browser_args, (
'--no-proxy-server flag is disallowed as Chrome needs to be route to '
'ts_proxy_server')
args = []
args.extend(self.browser_options.extra_browser_args)
args.append('--enable-net-benchmarking')
args.append('--metrics-recording-only')
args.append('--no-default-browser-check')
args.append('--no-first-run')
# Turn on GPU benchmarking extension for all runs. The only side effect of
# the extension being on is that render stats are tracked. This is believed
# to be effectively free. And, by doing so here, it avoids us having to
# programmatically inspect a pageset's actions in order to determine if it
# might eventually scroll.
args.append('--enable-gpu-benchmarking')
if self.browser_options.disable_background_networking:
args.append('--disable-background-networking')
args.extend(self.GetReplayBrowserStartupArgs())
args.extend(user_agent.GetChromeUserAgentArgumentFromType(
self.browser_options.browser_user_agent_type))
extensions = [extension.local_path
for extension in self._extensions_to_load]
extension_str = ','.join(extensions)
if len(extensions) > 0:
args.append('--load-extension=%s' % extension_str)
if self.browser_options.disable_component_extensions_with_background_pages:
args.append('--disable-component-extensions-with-background-pages')
# Disables the start page, as well as other external apps that can
# steal focus or make measurements inconsistent.
if self.browser_options.disable_default_apps:
args.append('--disable-default-apps')
# Disable the search geolocation disclosure infobar, as it is only shown a
# small number of times to users and should not be part of perf comparisons.
args.append('--disable-search-geolocation-disclosure')
if (self.browser_options.logging_verbosity ==
self.browser_options.NON_VERBOSE_LOGGING):
args.extend(['--enable-logging', '--v=0'])
elif (self.browser_options.logging_verbosity ==
self.browser_options.VERBOSE_LOGGING):
args.extend(['--enable-logging', '--v=1'])
elif (self.browser_options.logging_verbosity ==
self.browser_options.SUPER_VERBOSE_LOGGING):
args.extend(['--enable-logging', '--v=2'])
return args
def GetReplayBrowserStartupArgs(self):
replay_args = []
network_backend = self.platform_backend.network_controller_backend
if not network_backend.is_initialized:
return []
proxy_port = network_backend.forwarder.port_pair.remote_port
replay_args.append('--proxy-server=socks://localhost:%s' % proxy_port)
if not network_backend.is_test_ca_installed:
# Ignore certificate errors if the platform backend has not created
# and installed a root certificate.
replay_args.append('--ignore-certificate-errors')
return replay_args
def HasBrowserFinishedLaunching(self):
assert self._port, 'No DevTools port info available.'
return devtools_client_backend.IsDevToolsAgentAvailable(
self._port,
self._browser_target, self)
def _InitDevtoolsClientBackend(self, remote_devtools_port=None):
""" Initiate the devtool client backend which allow browser connection
through browser' devtool.
Args:
remote_devtools_port: The remote devtools port, if
any. Otherwise assumed to be the same as self._port.
"""
assert not self._devtools_client, (
'Devtool client backend cannot be init twice')
self._devtools_client = devtools_client_backend.DevToolsClientBackend(
self._port, self._browser_target,
remote_devtools_port or self._port, self)
def _WaitForBrowserToComeUp(self):
""" Wait for browser to come up. """
try:
timeout = self.browser_options.browser_startup_timeout
py_utils.WaitFor(self.HasBrowserFinishedLaunching, timeout=timeout)
except (py_utils.TimeoutException, exceptions.ProcessGoneException) as e:
if not self.IsBrowserRunning():
raise exceptions.BrowserGoneException(self.browser, e)
raise exceptions.BrowserConnectionGoneException(self.browser, e)
def _WaitForExtensionsToLoad(self):
""" Wait for all extensions to load.
Be sure to check whether the browser_backend supports_extensions before
calling this method.
"""
assert self._supports_extensions
assert self._devtools_client, (
'Waiting for extensions required devtool client to be initiated first')
try:
py_utils.WaitFor(self._AllExtensionsLoaded, timeout=60)
except py_utils.TimeoutException:
logging.error('ExtensionsToLoad: ' + repr(
[e.extension_id for e in self._extensions_to_load]))
logging.error('Extension list: ' + pprint.pformat(
self.extension_backend, indent=4))
raise
def _AllExtensionsLoaded(self):
# Extension pages are loaded from an about:blank page,
# so we need to check that the document URL is the extension
# page in addition to the ready state.
for e in self._extensions_to_load:
try:
extension_objects = self.extension_backend[e.extension_id]
except KeyError:
return False
for extension_object in extension_objects:
try:
res = extension_object.EvaluateJavaScript(
"""
document.URL.lastIndexOf({{ url }}, 0) == 0 &&
(document.readyState == 'complete' ||
document.readyState == 'interactive')
""",
url='chrome-extension://%s/' % e.extension_id)
except exceptions.EvaluateException:
# If the inspected page is not ready, we will get an error
# when we evaluate a JS expression, but we can just keep polling
# until the page is ready (crbug.com/251913).
res = None
# TODO(tengs): We don't have full support for getting the Chrome
# version before launch, so for now we use a generic workaround to
# check for an extension binding bug in old versions of Chrome.
# See crbug.com/263162 for details.
if res and extension_object.EvaluateJavaScript(
'chrome.runtime == null'):
extension_object.Reload()
if not res:
return False
return True
@property
def browser_directory(self):
raise NotImplementedError()
@property
def profile_directory(self):
raise NotImplementedError()
@property
def supports_tab_control(self):
return self._supports_tab_control
@property
def supports_tracing(self):
return True
def StartTracing(self, trace_options,
timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
"""
Args:
trace_options: An tracing_options.TracingOptions instance.
"""
return self.devtools_client.StartChromeTracing(trace_options, timeout)
def StopTracing(self):
self.devtools_client.StopChromeTracing()
def CollectTracingData(self, trace_data_builder):
self.devtools_client.CollectChromeTracingData(trace_data_builder)
def GetProcessName(self, cmd_line):
"""Returns a user-friendly name for the process of the given |cmd_line|."""
if not cmd_line:
# TODO(tonyg): Eventually we should make all of these known and add an
# assertion.
return 'unknown'
if 'nacl_helper_bootstrap' in cmd_line:
return 'nacl_helper_bootstrap'
if ':sandboxed_process' in cmd_line:
return 'renderer'
if ':privileged_process' in cmd_line:
return 'gpu-process'
args = shlex.split(cmd_line)
types = [arg.split('=')[1] for arg in args if arg.startswith('--type=')]
if not types:
return 'browser'
return types[0]
def Close(self):
if self._devtools_client:
self._devtools_client.Close()
self._devtools_client = None
@property
def supports_system_info(self):
return self.GetSystemInfo() != None
def GetSystemInfo(self):
# TODO(crbug.com/706336): Remove this condional branch once crbug.com/704024
# is fixed.
if util.IsRunningOnCrosDevice():
return self.devtools_client.GetSystemInfo(timeout=30)
return self.devtools_client.GetSystemInfo(timeout=10)
@property
def supports_memory_dumping(self):
return True
def DumpMemory(self, timeout=None):
return self.devtools_client.DumpMemory(timeout=timeout)
@property
def supports_overriding_memory_pressure_notifications(self):
return True
def SetMemoryPressureNotificationsSuppressed(
self, suppressed, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
self.devtools_client.SetMemoryPressureNotificationsSuppressed(
suppressed, timeout)
def SimulateMemoryPressureNotification(
self, pressure_level, timeout=web_contents.DEFAULT_WEB_CONTENTS_TIMEOUT):
self.devtools_client.SimulateMemoryPressureNotification(
pressure_level, timeout)
@property
def supports_cpu_metrics(self):
return True
@property
def supports_memory_metrics(self):
return True
@property
def supports_power_metrics(self):
return True
|
{
"content_hash": "b59e8577bcb707c32203509912c76c91",
"timestamp": "",
"source": "github",
"line_count": 309,
"max_line_length": 80,
"avg_line_length": 37.420711974110034,
"alnum_prop": 0.6982616967914901,
"repo_name": "catapult-project/catapult-csm",
"id": "2133ca53af3a7e4c13c08e1e0477c0b3e4285132",
"size": "11726",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "telemetry/telemetry/internal/backends/chrome/chrome_browser_backend.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
}
|
from .server import WechatConfig, WechatServer
from .models.common import TestStorage
from .config import VERSION
from .controllers.envtest import env_test
from .log import set_logging
__version__ = VERSION
instanceList = []
def new_instance():
newInstance = WechatServer(None, None, None)
instanceList.append(newInstance)
return newInstance
originInstance = new_instance()
# I really want to use sys.modules[__name__] = originInstance
# but it makes auto-fill a real mess, so forgive me for my following **
# actually it toke me less than 30 seconds, god bless Uganda
set_logging = set_logging
update_config = originInstance.update_config
run = originInstance.run
msg_register = originInstance.msg_register
send = originInstance.send
access_token = originInstance.access_token
clear_quota = originInstance.clear_quota
application = originInstance.application
chat = originInstance.chat
common = originInstance.common
customerservice = originInstance.customerservice
menu = originInstance.menu
messages = originInstance.messages
oauth2 = originInstance.oauth2
statistics = originInstance.statistics
templatemsgs = originInstance.templatemsgs
users = originInstance.users
utils = originInstance.utils
wrapped = originInstance.wrapped
|
{
"content_hash": "8ac1aad8df1c3795813004d2e8264157",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 30.74418604651163,
"alnum_prop": 0.7586989409984871,
"repo_name": "littlecodersh/itchatmp",
"id": "00042bb6c3a9cfcbbbd649bb59a35bf15b3859c7",
"size": "1322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "itchatmp/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157270"
}
],
"symlink_target": ""
}
|
def extractFullybookedtranslationsWordpressCom(item):
'''
Parser for 'fullybookedtranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
{
"content_hash": "4025d92e94923151b22a731fa22aba2e",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 104,
"avg_line_length": 27.857142857142858,
"alnum_prop": 0.6512820512820513,
"repo_name": "fake-name/ReadableWebProxy",
"id": "e799eab720a0b7a09805d6faf6bdc3c139bb556d",
"size": "586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractFullybookedtranslationsWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
}
|
from .codec_service import CodecService
from .crud_service import CRUDService
from .netconf_service import NetconfService
from .executor_service import ExecutorService
from ydk.ext.services import Datastore
__all__ = [ "CodecService", "CRUDService",
"ExecutorService", "NetconfService", "Datastore" ]
|
{
"content_hash": "8306cbe32b93453480f96f57086b90c2",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 62,
"avg_line_length": 35,
"alnum_prop": 0.765079365079365,
"repo_name": "psykokwak4/ydk-gen",
"id": "e34750907c7315755cf350c49cc781d776854c23",
"size": "1029",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "sdk/python/core/ydk/services/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "14940"
},
{
"name": "C",
"bytes": "834"
},
{
"name": "C++",
"bytes": "2491676"
},
{
"name": "CMake",
"bytes": "46417"
},
{
"name": "CSS",
"bytes": "67"
},
{
"name": "Makefile",
"bytes": "32099"
},
{
"name": "Objective-C",
"bytes": "4625"
},
{
"name": "Python",
"bytes": "713839"
},
{
"name": "Ruby",
"bytes": "4023"
},
{
"name": "Shell",
"bytes": "20553"
}
],
"symlink_target": ""
}
|
import re
import os
def parse(document_name):
with open(document_name, 'r') as html_file:
pattern = re.compile('https://exhentai.org/g/[0-9]+/[0-9a-z]{10}/')
file_text = html_file.read()
matches = re.findall(pattern, file_text)
return matches
if __name__ == "__main__":
with open("urls.txt", 'w') as result_file:
document_name = "/Users/christophorus/Downloads/php/view-source_https___exhentai.org_favorites.php.html"
if os.path.isfile(document_name):
print("Now parsing document " + document_name)
result_url_list = parse(document_name)
for line in result_url_list[1::2]:
result_file.write(" " + line)
print("Document " + document_name + " has been parsed!")
for index in range(1, 100):
document_name = "/Users/christophorus/Downloads/php/view-source_https___exhentai.org_favorites.php_page={}.html".format(index)
if os.path.isfile(document_name):
print("Now parsing document " + document_name)
result_url_list = parse(document_name)
for line in result_url_list[1::2]:
result_file.write(" " + line)
print("Document " + document_name + " has been parsed!")
print("All documents are parsed!!!")
|
{
"content_hash": "ad0439fb48dd085296fbf42202ad4669",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 138,
"avg_line_length": 43.32258064516129,
"alnum_prop": 0.5822784810126582,
"repo_name": "ChristophorusX/ehentai-downloader",
"id": "31daedbbab1e6db2d1c249580809e0b41ec23e7a",
"size": "1363",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "extractor.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "188057"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('employees', '0002_schedule'),
]
operations = [
migrations.RenameField(
model_name='schedule',
old_name='employee_id',
new_name='employee',
),
migrations.AlterField(
model_name='schedule',
name='employee_sub',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='sub_schedule_set', to='employees.Employee'),
),
]
|
{
"content_hash": "040021dd296e8f473e17374787120614",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 162,
"avg_line_length": 27.916666666666668,
"alnum_prop": 0.6119402985074627,
"repo_name": "lmann4/cis526-final-project",
"id": "8c3a2e3664d2a475d2e9314097a99742e03ae55f",
"size": "741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "resource_management/apps/employees/migrations/0003_auto_20170505_1938.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3269"
},
{
"name": "HTML",
"bytes": "14840"
},
{
"name": "JavaScript",
"bytes": "38240"
},
{
"name": "Python",
"bytes": "27222"
}
],
"symlink_target": ""
}
|
import re
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
# pylint: disable=too-many-branches,too-many-statements,too-many-arguments
def run(self, terms, variables=None, zones_enabled=True, short_version=None,
deployment_type=None, **kwargs):
priorities = []
if short_version is None or deployment_type is None:
if 'openshift' not in variables:
raise AnsibleError("This lookup module requires openshift_facts to be run prior to use")
if deployment_type is None:
if 'common' not in variables['openshift'] or 'deployment_type' not in variables['openshift']['common']:
raise AnsibleError("This lookup module requires that the deployment_type be set")
deployment_type = variables['openshift']['common']['deployment_type']
if short_version is None:
if 'short_version' in variables['openshift']['common']:
short_version = variables['openshift']['common']['short_version']
elif 'openshift_release' in variables:
release = variables['openshift_release']
if release.startswith('v'):
short_version = release[1:]
else:
short_version = release
short_version = '.'.join(short_version.split('.')[0:2])
elif 'openshift_version' in variables:
version = variables['openshift_version']
short_version = '.'.join(version.split('.')[0:2])
else:
# pylint: disable=line-too-long
raise AnsibleError("Either OpenShift needs to be installed or openshift_release needs to be specified")
if deployment_type == 'origin':
if short_version not in ['1.1', '1.2', '1.3', '1.4', '1.5', '3.6', '3.7', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
elif deployment_type == 'openshift-enterprise':
if short_version not in ['3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', 'latest']:
raise AnsibleError("Unknown short_version %s" % short_version)
else:
raise AnsibleError("Unknown deployment_type %s" % deployment_type)
if deployment_type == 'origin':
# convert short_version to origin short_version
short_version = re.sub('^1.', '3.', short_version)
if short_version == 'latest':
short_version = '3.7'
if short_version == '3.1':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1}
])
if short_version == '3.2':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1},
{'name': 'NodeAffinityPriority', 'weight': 1}
])
if short_version == '3.3':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1},
{'name': 'NodeAffinityPriority', 'weight': 1},
{'name': 'TaintTolerationPriority', 'weight': 1}
])
if short_version == '3.4':
priorities.extend([
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'SelectorSpreadPriority', 'weight': 1},
{'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
{'name': 'NodeAffinityPriority', 'weight': 1},
{'name': 'TaintTolerationPriority', 'weight': 1},
{'name': 'InterPodAffinityPriority', 'weight': 1}
])
if short_version in ['3.5', '3.6', '3.7']:
priorities.extend([
{'name': 'SelectorSpreadPriority', 'weight': 1},
{'name': 'InterPodAffinityPriority', 'weight': 1},
{'name': 'LeastRequestedPriority', 'weight': 1},
{'name': 'BalancedResourceAllocation', 'weight': 1},
{'name': 'NodePreferAvoidPodsPriority', 'weight': 10000},
{'name': 'NodeAffinityPriority', 'weight': 1},
{'name': 'TaintTolerationPriority', 'weight': 1}
])
if zones_enabled:
zone_priority = {
'name': 'Zone',
'argument': {
'serviceAntiAffinity': {
'label': 'zone'
}
},
'weight': 2
}
priorities.append(zone_priority)
return priorities
|
{
"content_hash": "222db506bc1b0c1673904546ac2ebf13",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 119,
"avg_line_length": 44.18260869565217,
"alnum_prop": 0.5308010234205865,
"repo_name": "ivanhorvath/openshift-tools",
"id": "fe488f49c73921649d56d5c9125699302fdf3cfd",
"size": "5118",
"binary": false,
"copies": "11",
"ref": "refs/heads/prod",
"path": "openshift/installer/vendored/openshift-ansible-3.7.42-1/roles/openshift_master_facts/lookup_plugins/openshift_master_facts_default_priorities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "588"
},
{
"name": "Dockerfile",
"bytes": "70267"
},
{
"name": "Go",
"bytes": "382164"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "146500"
},
{
"name": "JavaScript",
"bytes": "2380"
},
{
"name": "Makefile",
"bytes": "3324"
},
{
"name": "PHP",
"bytes": "35793"
},
{
"name": "Python",
"bytes": "37739486"
},
{
"name": "Shell",
"bytes": "1643890"
},
{
"name": "Vim script",
"bytes": "1836"
}
],
"symlink_target": ""
}
|
'''
A Multilayer Perceptron implementation example using TensorFlow library.
This example is using the MNIST database of handwritten digits
(http://yann.lecun.com/exdb/mnist/)
Author: Aymeric Damien
Project: https://github.com/aymericdamien/TensorFlow-Examples/
'''
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# Parameters
learning_rate = 0.001
training_epochs = 15
batch_size = 100
display_step = 1
# Network Parameters
n_hidden_1 = 256 # 1st layer number of features
n_hidden_2 = 256 # 2nd layer number of features
n_input = 784 # MNIST data input (img shape: 28*28)
n_classes = 10 # MNIST total classes (0-9 digits)
def load_data():
# Import MNIST data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=True)
return mnist
def inputs_placeholder():
# tf Graph input
x = tf.placeholder("float", [None, n_input])
y = tf.placeholder("float", [None, n_classes])
return [x, y]
def multilayer_perceptron(x, weights, biases):
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
# Hidden layer with RELU activation
layer_2 = tf.add(tf.matmul(layer_1, weights['h2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
# Output layer with linear activation
out_layer = tf.matmul(layer_2, weights['out']) + biases['out']
return out_layer
def model(x, y):
# Create model
# Store layers weight & bias
weights = {
'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_hidden_2, n_classes]))
}
biases = {
'b1': tf.Variable(tf.random_normal([n_hidden_1])),
'b2': tf.Variable(tf.random_normal([n_hidden_2])),
'out': tf.Variable(tf.random_normal([n_classes]))
}
# Construct model
pred = multilayer_perceptron(x, weights, biases)
# Define loss and optimizer
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
# Test model
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
# Calculate accuracy
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
return [cost, optimizer, accuracy]
def train_test(sess, mnist, x, y, cost, optimizer, accuracy):
'''
data: mnist
graph: x, y, cost, optimize
'''
# Training cycle
for epoch in range(training_epochs):
avg_cost = 0.
total_batch = int(mnist.train.num_examples/batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = mnist.train.next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,
y: batch_y})
# Compute average loss
avg_cost += c / total_batch
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1), "cost=", \
"{:.9f}".format(avg_cost))
print("Optimization Finished!")
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
if __name__ == '__main__':
''' '''
# load data
mnist = load_data()
# define inputs
[x, y] = inputs_placeholder()
# model
[cost, optimizer, accuracy] = model(x,y)
# Launch the graph
with tf.Session() as sess:
# Initializing the variables
init = tf.global_variables_initializer()
sess.run(init)
# train
train_test(sess, mnist, x, y, cost, optimizer, accuracy)
|
{
"content_hash": "1de35b1cad578d01d20913172b682dba",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 89,
"avg_line_length": 28.492753623188406,
"alnum_prop": 0.6169888097660223,
"repo_name": "trhongbinwang/data_science_journey",
"id": "ce6c8198785c9e1005e16ea8fd6a8fa48235183e",
"size": "3932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deep_learning/tensorflow/tutorials/tutorial3/examples/3_NeuralNetworks/multilayer_perceptron.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "59381"
},
{
"name": "Python",
"bytes": "1101944"
},
{
"name": "Shell",
"bytes": "401"
}
],
"symlink_target": ""
}
|
from base64 import b64encode
from itertools import count
import logging
from elftools.dwarf import constants
from .datatype import TypeReference
from .dwarftools import get_attr_val, iter_ranges, iter_expressions
class Variable(object):
vid_gen = count()
def __init__(self, debug_info, die):
assert die.tag in ('DW_TAG_variable', 'DW_TAG_constant', 'DW_TAG_formal_parameter')
self.vid = self.vid_gen.next()
self.name = get_attr_val(die, 'DW_AT_name')
self.type_id = TypeReference(die, 'DW_AT_type', debug_info.types).resolve().compressed_id
self.loc_id = debug_info.locations.insert_DIE_flc(die)
self.is_global = get_attr_val(die, 'DW_AT_external', False) or False
def iter_var_ranges():
for low, high in iter_ranges(die):
try:
low = debug_info.addr2fo(low)
high = debug_info.addr2fo(high)
yield low, high
except (ValueError, TypeError):
pass
self.ranges = list(iter_var_ranges())
if self.type_id is None:
raise ValueError('Variable has no type.')
if self.name is None and die.get_parent().tag not in ('DW_TAG_subprogram'):
raise ValueError('Variable has no name.')
def iter_var_expressions():
optimized_all = True
optimized_any = False
for low, high, expr in iter_expressions(die):
# expr is a list of byte values -- convert to string and encode with base64
expr = b64encode(''.join(chr(c) for c in expr))
if low is None:
# the expression is independent of the location
optimized_all = False
yield low, high, expr
continue
try:
low = debug_info.addr2fo(low)
high = debug_info.addr2fo(high)
optimized_all = False
yield low, high, expr
except ValueError:
optimized_any = True
optimized_any = optimized_any or optimized_all
if optimized_any:
howstr = 'completely' if optimized_all else 'partially'
logging.warn('Variable %s declared at %s was %s optimized out.',
self.name, debug_info.locations.getstr(self.loc_id), howstr)
self.expressions = list(iter_var_expressions())
if not self.expressions:
# variable was optimized out, found ranges are not valid
self.ranges = list()
class Variables(object):
@staticmethod
def iter_var_DIEs(debug_info):
'''Yield all DIEs in the given debug info, which represent variables, constants and parameters.'''
for cu in debug_info.dwarf.iter_CUs():
for die in cu.iter_DIEs():
if die.tag not in ('DW_TAG_variable', 'DW_TAG_constant', 'DW_TAG_formal_parameter'):
continue
if die.tag == 'DW_TAG_formal_parameter':
parent = die.get_parent()
if parent:
if parent.tag == 'DW_TAG_subroutine_type':
# This is not actually a variable, but simply a parameter of a function type. Skip it.
# example: typedef void (foo_fn)(int, int);
# ^ ^
continue
if get_attr_val(parent, 'DW_AT_declaration'):
# This is the parameter of a function, which is only declared (but not defined).
continue
if get_attr_val(die, 'DW_AT_artificial'):
# This is an artificial variable and it has no explicit representation in the source file.
continue
yield die
@staticmethod
def iter_regular_var_DIEs(debug_info):
def is_inlined(die):
if die.tag == 'DW_TAG_compile_unit':
# we've reached the top DIE
return False
if die.tag == 'DW_TAG_subprogram':
# function
if 'DW_AT_inline' in die.attributes:
# inlined functions -- this is an abstract entry
return die.attributes['DW_AT_inline'].value in (constants.DW_INL_inlined,
constants.DW_INL_declared_inlined)
else:
# regular function
return False
elif die.tag == 'DW_TAG_inlined_subroutine':
# inlined function instance
return True
return is_inlined(die.get_parent())
return (die for die in Variables.iter_var_DIEs(debug_info) if not is_inlined(die))
def __init__(self, debug_info):
self.offset2var = {}
for die in self.iter_regular_var_DIEs(debug_info):
try:
self.offset2var[die.offset] = Variable(debug_info, die)
except:
logging.warning("Variable %s with tag %s is unclear, ignoring.", get_attr_val(die, 'DW_AT_name'), getattr(die, 'tag', "<Unknown>"))
logging.debug(die)
def get_by_DIE(self, die):
'''Return the variable object, which was created for the given DIE.'''
return self.offset2var[die.offset]
@property
def variables(self):
'''List of all known variables.'''
return self.offset2var.values()
def store(self, conn):
# Store variables
logging.debug('Storing %i variables.', len(self.variables))
query = 'insert into variables (id, type, name, global, loc) values (?, ?, ?, ?, ?)'
items = ((var.vid, var.type_id, var.name, var.is_global, var.loc_id) for var in self.variables)
conn.executemany(query, items)
# Store variable ranges
logging.debug('Storing %i variable ranges.', sum(len(var.ranges) for var in self.variables))
query = 'insert into variables2ranges (var, lo, hi) values (?, ?, ?)'
items = ((var.vid, low, high) for var in self.variables for low, high in var.ranges)
conn.executemany(query, items)
# Store expressions
logging.debug('Storing %i variable expressions.', sum(len(var.expressions) for var in self.variables))
query = 'insert into variables2expressions (var, lo, hi, expr) values (?, ?, ?, ?)'
items = ((var.vid, low, high, expr) for var in self.variables for low, high, expr in var.expressions)
conn.executemany(query, items)
# Commit changes
conn.commit()
|
{
"content_hash": "55f943ed1333576a9dc8cac76287daef",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 147,
"avg_line_length": 41.920731707317074,
"alnum_prop": 0.5403636363636364,
"repo_name": "Samsung/ADBI",
"id": "14aeaff5fc9f27daa80dc7c4cef8b1c8d7110b0a",
"size": "6875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "idk/cachebuilder/variables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "18203"
},
{
"name": "C",
"bytes": "577712"
},
{
"name": "C++",
"bytes": "150691"
},
{
"name": "Logos",
"bytes": "354"
},
{
"name": "Makefile",
"bytes": "12730"
},
{
"name": "PLSQL",
"bytes": "4139"
},
{
"name": "Python",
"bytes": "335683"
},
{
"name": "Shell",
"bytes": "7998"
}
],
"symlink_target": ""
}
|
import os
import subprocess
from typing import Iterable
import pytest
from dev_tools import shell_tools
from dev_tools.test_utils import only_on_posix
def run(
*,
script_file: str,
tmpdir_factory: pytest.TempdirFactory,
arg: str = '',
setup: str = '',
additional_intercepts: Iterable[str] = (),
) -> subprocess.CompletedProcess:
"""Invokes the given script within a temporary test environment."""
with open(script_file) as f:
script_lines = f.readlines()
# Create a unique temporary directory
dir_path = tmpdir_factory.mktemp('tmp', numbered=True)
file_path = os.path.join(dir_path, 'test-script.sh')
intercepted = [
'python',
'python3',
'pylint',
'env',
'pytest',
'mypy',
'black',
*additional_intercepts,
]
assert script_lines[0] == '#!/usr/bin/env bash\n'
for e in intercepted:
script_lines.insert(1, e + '() {\n echo INTERCEPTED ' + e + ' $@\n}\n')
with open(file_path, 'w') as f:
f.writelines(script_lines)
cmd = r"""
export GIT_CONFIG_GLOBAL=/dev/null
export GIT_CONFIG_SYSTEM=/dev/null
dir=$(git rev-parse --show-toplevel)
cd {}
git init --quiet --initial-branch master
git config --local user.name 'Me'
git config --local user.email '<>'
git commit -m init --allow-empty --quiet --no-gpg-sign
{}
mkdir -p dev_tools
touch dev_tools/pypath
chmod +x ./test-script.sh
./test-script.sh {}
""".format(
dir_path, setup, arg
)
return shell_tools.run(
cmd, log_run_to_stderr=False, shell=True, check=False, capture_output=True
)
@only_on_posix
def test_pytest_changed_files_file_selection(tmpdir_factory):
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD~1',
setup='touch file.py\ngit add -A\ngit commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert result.stdout == ''
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD~1'.\nFound 0 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD~1',
setup='touch file_test.py\ngit add -A\ngit commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert result.stdout == 'INTERCEPTED pytest file_test.py\n'
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD~1'.\nFound 1 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD~1',
setup='touch file.py file_test.py\n'
'git add -A\n'
'git commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert result.stdout == 'INTERCEPTED pytest file_test.py\n'
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD~1'.\nFound 1 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD',
setup='touch file.py file_test.py\n'
'git add -A\n'
'git commit -m test --quiet --no-gpg-sign\n'
'echo x > file_test.py\n',
)
assert result.returncode == 0
assert result.stdout == 'INTERCEPTED pytest file_test.py\n'
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD'.\nFound 1 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD',
setup='touch file.py file_test.py\n'
'git add -A\n'
'git commit -m test --quiet --no-gpg-sign\n'
'echo x > file.py\n',
)
assert result.returncode == 0
assert result.stdout == 'INTERCEPTED pytest file_test.py\n'
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD'.\nFound 1 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD',
setup='touch __init__.py\n'
'git add -A\n'
'git commit -m test --quiet --no-gpg-sign\n'
'echo x > __init__.py\n',
)
assert result.returncode == 0
assert result.stdout == (
'INTERCEPTED pytest cirq-core/cirq/protocols/json_serialization_test.py\n'
)
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD'.\nFound 1 test files associated with changes.\n"
).split()
)
@only_on_posix
def test_pytest_changed_files_branch_selection(tmpdir_factory):
result = run(
script_file='check/pytest-changed-files', tmpdir_factory=tmpdir_factory, arg='HEAD'
)
assert result.returncode == 0
assert result.stdout == ''
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD'.\nFound 0 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files', tmpdir_factory=tmpdir_factory, arg='HEAD~999999'
)
assert result.returncode == 1
assert result.stdout == ''
assert "No revision 'HEAD~999999'." in result.stderr
result = run(script_file='check/pytest-changed-files', tmpdir_factory=tmpdir_factory)
assert result.returncode == 0
assert result.stdout == ''
assert (
result.stderr.split()
== (
"Comparing against revision 'master'.\nFound 0 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
setup='git branch origin/master',
)
assert result.returncode == 0
assert result.stdout == ''
assert (
result.stderr.split()
== (
"Comparing against revision 'origin/master'.\n"
"Found 0 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
setup='git branch upstream/master',
)
assert result.returncode == 0
assert result.stdout == ''
assert (
result.stderr.split()
== (
"Comparing against revision 'upstream/master'.\n"
"Found 0 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
setup='git branch upstream/master; git branch origin/master',
)
assert result.returncode == 0
assert result.stdout == ''
assert (
result.stderr.split()
== (
"Comparing against revision 'upstream/master'.\n"
"Found 0 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
arg='file',
setup='git checkout -b other --quiet\ngit branch -D master --quiet\n',
)
assert result.returncode == 1
assert result.stdout == ''
assert "No revision 'file'." in result.stderr
# Fails on file.
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
arg='file',
setup='touch file\ngit add -A\ngit commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 1
assert result.stdout == ''
assert "No revision 'file'." in result.stderr
# Works when ambiguous between revision and file.
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD',
setup='touch HEAD\ngit add -A\ngit commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert result.stdout == ''
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD'.\nFound 0 test files associated with changes.\n"
).split()
)
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
setup='touch master\ngit add -A\ngit commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert result.stdout == ''
assert (
result.stderr.split()
== (
"Comparing against revision 'master'.\nFound 0 test files associated with changes.\n"
).split()
)
# Works on remotes.
result = run(
script_file='check/pytest-changed-files',
tmpdir_factory=tmpdir_factory,
setup='mkdir alt\n'
'cd alt\n'
'git init --quiet --initial-branch master\n'
'git config --local user.name \'Me\'\n'
'git config --local user.email \'<>\'\n'
'git commit -m tes --quiet --allow-empty --no-gpg-sign\n'
'cd ..\n'
'git remote add origin alt\n'
'git fetch origin master --quiet 2> /dev/null\n',
)
assert result.returncode == 0
assert result.stdout == ''
assert (
result.stderr.split()
== (
"Comparing against revision 'origin/master'.\n"
"Found 0 test files associated with changes.\n"
).split()
)
@only_on_posix
def test_pytest_and_incremental_coverage_branch_selection(tmpdir_factory):
result = run(
script_file='check/pytest-and-incremental-coverage',
tmpdir_factory=tmpdir_factory,
arg='HEAD',
additional_intercepts=['check/pytest'],
)
assert result.returncode == 0
assert result.stdout == (
'INTERCEPTED check/pytest '
'--cov --cov-config=dev_tools/conf/.coveragerc\n'
'The annotate command will be removed in a future version.\n'
'Get in touch if you still use it: ned@nedbatchelder.com\n'
'No data to report.\n'
'INTERCEPTED '
'python dev_tools/check_incremental_coverage_annotations.py HEAD\n'
)
assert result.stderr == "Comparing against revision 'HEAD'.\n"
result = run(
script_file='check/pytest-and-incremental-coverage',
tmpdir_factory=tmpdir_factory,
arg='HEAD~999999',
)
assert result.returncode == 1
assert result.stdout == ''
assert "No revision 'HEAD~999999'." in result.stderr
result = run(
script_file='check/pytest-and-incremental-coverage',
tmpdir_factory=tmpdir_factory,
additional_intercepts=['check/pytest'],
)
assert result.returncode == 0
assert result.stdout == (
'INTERCEPTED check/pytest '
'--cov --cov-config=dev_tools/conf/.coveragerc\n'
'The annotate command will be removed in a future version.\n'
'Get in touch if you still use it: ned@nedbatchelder.com\n'
'No data to report.\n'
'INTERCEPTED '
'python dev_tools/check_incremental_coverage_annotations.py master\n'
)
assert result.stderr == "Comparing against revision 'master'.\n"
result = run(
script_file='check/pytest-and-incremental-coverage',
tmpdir_factory=tmpdir_factory,
setup='git branch origin/master',
additional_intercepts=['check/pytest'],
)
assert result.returncode == 0
assert result.stdout == (
'INTERCEPTED check/pytest '
'--cov --cov-config=dev_tools/conf/.coveragerc\n'
'The annotate command will be removed in a future version.\n'
'Get in touch if you still use it: ned@nedbatchelder.com\n'
'No data to report.\n'
'INTERCEPTED '
'python dev_tools/check_incremental_coverage_annotations.py origin/master\n'
)
assert result.stderr == "Comparing against revision 'origin/master'.\n"
result = run(
script_file='check/pytest-and-incremental-coverage',
tmpdir_factory=tmpdir_factory,
setup='git branch upstream/master',
additional_intercepts=['check/pytest'],
)
assert result.returncode == 0
assert result.stdout == (
'INTERCEPTED check/pytest '
'--cov --cov-config=dev_tools/conf/.coveragerc\n'
'The annotate command will be removed in a future version.\n'
'Get in touch if you still use it: ned@nedbatchelder.com\n'
'No data to report.\n'
'INTERCEPTED '
'python dev_tools/check_incremental_coverage_annotations.py upstream/master\n'
)
assert result.stderr == "Comparing against revision 'upstream/master'.\n"
result = run(
script_file='check/pytest-and-incremental-coverage',
tmpdir_factory=tmpdir_factory,
setup='git branch upstream/master; git branch origin/master',
additional_intercepts=['check/pytest'],
)
assert result.returncode == 0
assert result.stdout == (
'INTERCEPTED check/pytest '
'--cov --cov-config=dev_tools/conf/.coveragerc\n'
'The annotate command will be removed in a future version.\n'
'Get in touch if you still use it: ned@nedbatchelder.com\n'
'No data to report.\n'
'INTERCEPTED '
'python dev_tools/check_incremental_coverage_annotations.py upstream/master\n'
)
assert result.stderr == "Comparing against revision 'upstream/master'.\n"
result = run(
script_file='check/pytest-and-incremental-coverage',
tmpdir_factory=tmpdir_factory,
setup='git checkout -b other --quiet\ngit branch -D master --quiet\n',
additional_intercepts=['check/pytest'],
)
assert result.returncode == 1
assert result.stdout == ''
assert 'No default revision found to compare against' in result.stderr
# Works when ambiguous between revision and file.
result = run(
script_file='check/pytest-and-incremental-coverage',
tmpdir_factory=tmpdir_factory,
arg='HEAD',
setup='touch HEAD\ngit add -A\ngit commit -m test --quiet --no-gpg-sign\n',
additional_intercepts=['check/pytest'],
)
assert result.returncode == 0
assert result.stdout == (
'INTERCEPTED check/pytest '
'--cov --cov-config=dev_tools/conf/.coveragerc\n'
'The annotate command will be removed in a future version.\n'
'Get in touch if you still use it: ned@nedbatchelder.com\n'
'No data to report.\n'
'INTERCEPTED '
'python dev_tools/check_incremental_coverage_annotations.py HEAD\n'
)
assert result.stderr == "Comparing against revision 'HEAD'.\n"
result = run(
script_file='check/pytest-and-incremental-coverage',
tmpdir_factory=tmpdir_factory,
setup='touch master\ngit add -A\ngit commit -m test --quiet --no-gpg-sign\n',
additional_intercepts=['check/pytest'],
)
assert result.returncode == 0
assert result.stdout == (
'INTERCEPTED check/pytest '
'--cov --cov-config=dev_tools/conf/.coveragerc\n'
'The annotate command will be removed in a future version.\n'
'Get in touch if you still use it: ned@nedbatchelder.com\n'
'No data to report.\n'
'INTERCEPTED '
'python dev_tools/check_incremental_coverage_annotations.py master\n'
)
assert result.stderr == "Comparing against revision 'master'.\n"
result = run(
script_file='check/pytest-and-incremental-coverage',
tmpdir_factory=tmpdir_factory,
setup='touch master\n'
'git add -A\n'
'git commit -q -m test --no-gpg-sign\n'
'git branch alt\n'
'touch master2\n'
'git add -A\n'
'git commit -q -m test2 --no-gpg-sign\n'
'git checkout -q alt\n',
additional_intercepts=['check/pytest'],
)
assert result.returncode == 0
assert result.stdout.startswith(
'INTERCEPTED check/pytest '
'--cov --cov-config=dev_tools/conf/.coveragerc\n'
'The annotate command will be removed in a future version.\n'
'Get in touch if you still use it: ned@nedbatchelder.com\n'
'No data to report.\n'
'INTERCEPTED '
'python dev_tools/check_incremental_coverage_annotations.py '
)
assert result.stderr.startswith("Comparing against revision 'master' (merge base ")
@only_on_posix
def test_incremental_format_branch_selection(tmpdir_factory):
result = run(script_file='check/format-incremental', tmpdir_factory=tmpdir_factory, arg='HEAD')
assert result.returncode == 0
assert "No files to format" in result.stdout
assert "Comparing against revision 'HEAD'." in result.stderr
result = run(
script_file='check/format-incremental', tmpdir_factory=tmpdir_factory, arg='HEAD~9999'
)
assert result.returncode == 1
assert result.stdout == ''
assert "No revision 'HEAD~9999'." in result.stderr
result = run(script_file='check/format-incremental', tmpdir_factory=tmpdir_factory)
assert result.returncode == 0
assert "No files to format" in result.stdout
assert "Comparing against revision 'master'." in result.stderr
result = run(
script_file='check/format-incremental',
tmpdir_factory=tmpdir_factory,
setup='git branch origin/master',
)
assert result.returncode == 0
assert "No files to format" in result.stdout
assert "Comparing against revision 'origin/master'." in result.stderr
result = run(
script_file='check/format-incremental',
tmpdir_factory=tmpdir_factory,
setup='git branch upstream/master',
)
assert result.returncode == 0
assert "No files to format" in result.stdout
assert "Comparing against revision 'upstream/master'." in result.stderr
result = run(
script_file='check/format-incremental',
tmpdir_factory=tmpdir_factory,
setup='git branch upstream/master; git branch origin/master',
)
assert result.returncode == 0
assert "No files to format" in result.stdout
assert "Comparing against revision 'upstream/master'." in result.stderr
result = run(
script_file='check/format-incremental',
tmpdir_factory=tmpdir_factory,
setup='git checkout -b other --quiet\ngit branch -D master --quiet\n',
)
assert result.returncode == 1
assert result.stdout == ''
assert 'No default revision found to compare against' in result.stderr
# Works when ambiguous between revision and file.
result = run(
script_file='check/format-incremental',
tmpdir_factory=tmpdir_factory,
arg='HEAD',
setup='touch HEAD.py\ngit add -A\ngit commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert "No files to format" in result.stdout
assert "Comparing against revision 'HEAD'." in result.stderr
result = run(
script_file='check/format-incremental',
tmpdir_factory=tmpdir_factory,
setup='touch master.py\ngit add -A\ngit commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert "No files to format" in result.stdout
assert "Comparing against revision 'master'." in result.stderr
result = run(
script_file='check/format-incremental',
tmpdir_factory=tmpdir_factory,
setup='touch master.py\n'
'git add -A\n'
'git commit -q -m test --no-gpg-sign\n'
'git branch alt\n'
'touch master2.py\n'
'git add -A\n'
'git commit -q -m test2 --no-gpg-sign\n'
'git checkout -q alt\n'
'echo " print(1)" > alt.py\n'
'git add -A\n'
'git commit -q -m test3 --no-gpg-sign\n',
)
assert result.returncode == 0
assert 'INTERCEPTED black --color --check --diff alt.py' in result.stdout
assert result.stderr.startswith("Comparing against revision 'master' (merge base ")
@only_on_posix
def test_pylint_changed_files_file_selection(tmpdir_factory):
result = run(
script_file='check/pylint-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD~1',
setup='touch file.py\ngit add -A\ngit commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert result.stdout == ''
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD~1'.\n"
"Found 0 lintable files associated with changes.\n"
).split()
)
intercepted_prefix = (
'INTERCEPTED env PYTHONPATH=dev_tools pylint --jobs=0 --rcfile=dev_tools/conf/.pylintrc '
)
result = run(
script_file='check/pylint-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD~1',
setup='mkdir cirq\n'
'touch cirq/file.py\n'
'git add -A\n'
'git commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert result.stdout == intercepted_prefix + 'cirq/file.py\n'
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD~1'.\n"
"Found 1 lintable files associated with changes.\n"
).split()
)
result = run(
script_file='check/pylint-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD~1',
setup='mkdir cirq\n'
'touch ignore.py cirq/file.py\n'
'git add -A\n'
'git commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert result.stdout == intercepted_prefix + 'cirq/file.py\n'
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD~1'.\n"
"Found 1 lintable files associated with changes.\n"
).split()
)
result = run(
script_file='check/pylint-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD',
setup='mkdir cirq\n'
'touch ignore.py cirq/file.py\n'
'git add -A\n'
'git commit -m test --quiet --no-gpg-sign\n'
'echo x > cirq/file.py',
)
assert result.returncode == 0
assert result.stdout == intercepted_prefix + 'cirq/file.py\n'
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD'.\n"
"Found 1 lintable files associated with changes.\n"
).split()
)
result = run(
script_file='check/pylint-changed-files',
tmpdir_factory=tmpdir_factory,
arg='HEAD~1',
setup='mkdir -p cirq dev_tools examples ignore\n'
'touch cirq/file.py dev_tools/file.py examples/file.py\n'
'touch ignore/ignore.py\n'
'git add -A\n'
'git commit -m test --quiet --no-gpg-sign\n',
)
assert result.returncode == 0
assert result.stdout == intercepted_prefix + (
'cirq/file.py dev_tools/file.py examples/file.py\n'
)
assert (
result.stderr.split()
== (
"Comparing against revision 'HEAD~1'.\n"
"Found 3 lintable files associated with changes.\n"
).split()
)
|
{
"content_hash": "0fbd41ed85c85364e75bfbef2e8610a2",
"timestamp": "",
"source": "github",
"line_count": 698,
"max_line_length": 99,
"avg_line_length": 33.401146131805156,
"alnum_prop": 0.6107060135540877,
"repo_name": "quantumlib/Cirq",
"id": "792bd5829072c784ea66c89823509ee644e923a8",
"size": "23899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev_tools/bash_scripts_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "4616"
},
{
"name": "HTML",
"bytes": "262"
},
{
"name": "JavaScript",
"bytes": "660"
},
{
"name": "Jupyter Notebook",
"bytes": "672675"
},
{
"name": "Makefile",
"bytes": "634"
},
{
"name": "Python",
"bytes": "8643017"
},
{
"name": "Scilab",
"bytes": "735"
},
{
"name": "Shell",
"bytes": "64230"
},
{
"name": "TypeScript",
"bytes": "91766"
}
],
"symlink_target": ""
}
|
import logging
import couchdb
import urlparse
import json
import urllib2
import threading
import re
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from lr.model import LRNode as sourceLRNode, \
NodeServiceModel, ResourceDataModel, LRNodeModel, defaultCouchServer, appConfig
from lr.lib.base import BaseController, render
from lr.lib import helpers as h
import base64
import pprint
import Queue
log = logging.getLogger(__name__)
class DistributeController(BaseController):
__TARGET_NODE_INFO = 'taget_node_info'
__OK = 'ok'
__ERROR = 'error'
def __before__(self):
self.resource_data = appConfig['couchdb.db.resourcedata']
"""REST Controller styled on the Atom Publishing Protocol"""
# To properly map this controller, ensure your config/routing.py
# file has a resource setup:
# map.resource('distribute', 'distribute')
def destination(self):
"""GET /destination: return node information"""
# url('distribute')
response = {self.__OK: True}
try:
response[self.__TARGET_NODE_INFO] = sourceLRNode.distributeInfo
except Exception as ex:
log.exception(ex)
response["error":"Internal error"]
log.info("received distribute request...returning: \n"+pprint.pformat(response, 4))
return json.dumps(response)
def _getDistinationInfo(self, connection):
# Make sure we only have one slash in the url path. More than one
#confuses pylons routing libary.
destinationURL = urlparse.urljoin(connection.destination_node_url.strip(),
"destination")
request = urllib2.Request(destinationURL)
credential = sourceLRNode.getDistributeCredentialFor(destinationURL)
if credential is not None:
base64string = base64.encodestring('%s:%s' % (credential['username'],credential['password'])).replace("\n", "")
request.add_header("Authorization", "Basic %s" % base64string)
log.info("\n\nAccess destination node at: "+pprint.pformat(request.__dict__))
return json.load(urllib2.urlopen(request))
def _canDistributeTo(self, connection, sourceNodeInfo):
if not connection.active:
return {self.__OK: False,
'connection_id': connection.connection_id,
self.__ERROR: 'Inactive connection'}
result={self.__OK:True, 'connection_id': connection.connection_id }
sourceNodeInfo = h.dictToObject(sourceNodeInfo)
try:
destinationNodeInfo = h.dictToObject(self._getDistinationInfo(connection)[self.__TARGET_NODE_INFO])
result['destinationNodeInfo'] = destinationNodeInfo
# Don't bother going through all the filter out rules if the source and
# destionation nodes are on the same community and network.
if((sourceNodeInfo.community_id == destinationNodeInfo.community_id) and
(sourceNodeInfo.network_id == destinationNodeInfo.network_id) and
not (sourceNodeInfo.gateway_node and destinationNodeInfo.gateway_node)):
pass
elif sourceNodeInfo.node_id == destinationNodeInfo.node_id:
result[self.__ERROR] = "Source and destination node must be different node."
elif ((sourceNodeInfo.gateway_node or destinationNodeInfo.gateway_node) != connection.gateway_connection):
result[self.__ERROR] = " 'gateway_connection' mismatch between nodes and connection data"
elif ((sourceNodeInfo.community_id != destinationNodeInfo.community_id) and
((not sourceNodeInfo.social_community) or (not destinationNodeInfo.social_community))):
result[self.__ERROR] = 'cannot distribute across non social communities'
elif ((sourceNodeInfo.network_id != destinationNodeInfo.network_id) and
((not sourceNodeInfo.gateway_node)or(not destinationNodeInfo.gateway_node))):
result[self.__ERROR] = 'cannot distribute across networks (or communities) unless gateway'
elif ((sourceNodeInfo.gateway_node and destinationNodeInfo.gateway_node)
and (sourceNodeInfo.network_id == destinationNodeInfo.network_id)):
result[self.__ERROR] = 'gateways must only distribute across different networks'
elif (sourceNodeInfo.gateway_node and not destinationNodeInfo.gateway_node):
result[self.__ERROR] = 'gateways can only distribute to gateways'
except urllib2.URLError as ex:
log.exception(ex)
result[self.__ERROR] = "Cannot reach destination node. "+str(ex.reason)
except Exception as ex:
log.exception(ex)
result[self.__ERROR] = "Internal error. Cannot process destination node info"
if result.has_key(self.__ERROR):
result[self.__OK] = False
return result
def _getDistributeDestinations(self):
""""Method to test the connections and returns a list of destionation node
if the connections are valid"""
gatewayConnectionList = []
connectionsStatusInfo = {self.__OK:True, 'connections':[]}
for connection in sourceLRNode.connections:
# Make sure that the connection is active
connectionsStatusInfo['connections'].append(self._canDistributeTo(connection, sourceLRNode.distributeInfo))
if (connectionsStatusInfo['connections'][-1][self.__OK] and
sourceLRNode.distributeInfo['gateway_node'] and
connectionsStatusInfo['connections'][-1]['destinationNodeInfo'].gateway_node and
connection.gateway_connection):
gatewayConnectionList.append(connection)
# Only one gateway connection is allowed, faulty network description
if len(gatewayConnectionList) > 1:
log.info("***Abort distribution. More than one gateway node connection")
connectionsStatusInfo[self.__ERROR] ="only one active gateway connection is allowed, faulty network description"
break
if len (sourceLRNode.connections) == 0:
connectionsStatusInfo[self.__ERROR] ="No connection present for distribution"
if connectionsStatusInfo.has_key(self.__ERROR) :
connectionsStatusInfo[self.__OK] = False
return connectionsStatusInfo
def create(self):
"""POST / distribute start distribution"""
distributeResults = Queue.Queue()
def doDistribution(connectionInfo, server, sourceUrl):
# We want to always use the replication filter function to replicate
# only distributable doc and filter out any other type of documents.
# However we don't have any query arguments until we test if there is any filter.
replicationOptions={'filter':ResourceDataModel.REPLICATION_FILTER,
'source':sourceUrl,
'connection_id': connectionInfo['connection_id'],
'query_params': None}
# If the destination node is using an filter and is not custom use it
# as the query params for the filter function
if ((connectionInfo['destinationNodeInfo'].filter_description is not None ) and
(connectionInfo['destinationNodeInfo'].filter_description.get('custom_filter') == False)):
replicationOptions['query_params'] =connectionInfo['destinationNodeInfo'].filter_description
#if distinationNode['distribute service'] .service_auth["service_authz"] is not None:
#log.info("Destination node '{}' require authentication".format(destinationUrl))
#Try to get the user name and password the url
#destinationUrl = connectionInfo['destinationNodeInfo'].resource_data_url
destinationUrl = connectionInfo['destinationNodeInfo'].incoming_url
credential = sourceLRNode.getDistributeCredentialFor(destinationUrl)
if credential is not None:
parsedUrl = urlparse.urlparse(destinationUrl)
destinationUrl = destinationUrl.replace(parsedUrl.netloc, "{0}:{1}@{2}".format(
credential['username'], credential['password'], parsedUrl.netloc))
if replicationOptions['query_params'] is None:
del replicationOptions['query_params']
replicationOptions['target'] = destinationUrl
authz_header = h.getBasicAuthHeaderFromURL(appConfig['couchdb.url.dbadmin']);
authz_header.update( { 'Content-Type': 'application/json'})
request = urllib2.Request(urlparse.urljoin(appConfig['couchdb.url'], '_replicator'),
headers=authz_header,
data = json.dumps(replicationOptions))
log.info("\n\nReplication started\nSource:{0}\nDestionation:{1}\nArgs:{2}".format(
sourceUrl, destinationUrl, pprint.pformat(replicationOptions)))
results = json.load(urllib2.urlopen(request))
connectionInfo['replication_results'] = results
distributeResults.put(connectionInfo)
log.debug("Replication results: " + pprint.pformat(results))
log.info("Distribute.......\n")
##Check if the distribte service is available on the node.
#if(sourceLRNode.isServiceAvailable(NodeServiceModel.DISTRIBUTE) == False):
#log.info("Distribute not available on node ")
#return
if((sourceLRNode.connections is None) or
(len(sourceLRNode.connections) ==0)):
log.info("No connection present for distribution")
return json.dumps({self.__ERROR:''})
log.info("Connections: \n{0}\n"+pprint.pformat([c.specData for c in sourceLRNode.connections]))
connectionsStatusInfo = self._getDistributeDestinations()
log.debug("\nSource Node Info:\n{0}".format(pprint.pformat(sourceLRNode.distributeInfo)))
log.debug("\n\n Distribute connections:\n{0}\n\n".format(pprint.pformat(connectionsStatusInfo)))
for connectionStatus in connectionsStatusInfo['connections']:
if connectionsStatusInfo.has_key(self.__ERROR) or connectionStatus.has_key(self.__ERROR) == True:
distributeResults.put(connectionStatus)
else:
replicationArgs = (connectionStatus, defaultCouchServer, self.resource_data )
# Use a thread to do the actual replication.
replicationThread = threading.Thread(target=doDistribution, args=replicationArgs)
replicationThread.start()
replicationThread.join()
log.debug("\n\n\n---------------------distribute threads end--------------------\n\n\n")
log.debug("\n\n\n----------Queue results Completed size: {0}--------------\n\n\n".format(distributeResults.qsize()))
connectionsStatusInfo['connections'] = []
while distributeResults.empty() == False:
connectionsStatusInfo['connections'].append(distributeResults.get())
log.debug("\n\n======== DISTRIBUTE RESULTS ============\n\n")
log.debug(pprint.pformat(connectionsStatusInfo))
return json.dumps(connectionsStatusInfo, indent=4)
|
{
"content_hash": "62fd0fe3bbb90a87cd880d88da6bb9d3",
"timestamp": "",
"source": "github",
"line_count": 232,
"max_line_length": 128,
"avg_line_length": 51.952586206896555,
"alnum_prop": 0.6162781050360906,
"repo_name": "LearningRegistry/LearningRegistry",
"id": "ecc14cb4c92a1e51e6ce3d21273899de544ee8f4",
"size": "12655",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "LR/lr/controllers/distribute.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "22994"
},
{
"name": "CSS",
"bytes": "7145"
},
{
"name": "HTML",
"bytes": "22606"
},
{
"name": "Java",
"bytes": "192982"
},
{
"name": "JavaScript",
"bytes": "1202618"
},
{
"name": "Mako",
"bytes": "9795"
},
{
"name": "PHP",
"bytes": "7151"
},
{
"name": "Python",
"bytes": "885233"
},
{
"name": "Shell",
"bytes": "21693"
}
],
"symlink_target": ""
}
|
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.utils import *
p = Bunch(default_parameters)
def fastqc(sample, extension, fastqc_flag):
'''QC check of raw .fastq files using FASTQC.
input:
.fastq file
output:
folder and zipped folder containing html, txt and image files
citation:
Babraham Bioinformatics
link:
http://www.bioinformatics.babraham.ac.uk/projects/fastqc/
parameters from parameters file:
RAW_DATA_DIR:
QC_PATH:
FASTQC_VERSION:
COMPRESSION:
'''
print "sample name is: ", sample
sample = sample + extension
spawn_job(jobname = 'fastqc', SAMPLE = sample, LOG_PATH = p.OMICSPIPE["LOG_PATH"], RESULTS_EMAIL = p.OMICSPIPE["EMAIL"], SCHEDULER = p.OMICSPIPE["SCHEDULER"], walltime = p.FASTQC["WALLTIME"], queue = p.OMICSPIPE["QUEUE"], nodes = p.FASTQC["NODES"], ppn = p.FASTQC["CPU"], memory = p.FASTQC["MEMORY"], script = "/fastqc_drmaa.sh", args_list = [sample, p.FASTQC["PATH"],p.FASTQC['RESULTS'], p.FASTQC["VERSION"], p.FASTQC["COMPRESSION"]])
job_status(jobname = 'fastqc', resultspath = p.FASTQC["RESULTS"] + "/" + sample, SAMPLE = sample, outputfilename = sample + "_2" + "_fastqc.html", FLAG_PATH = p.OMICSPIPE["FLAG_PATH"])
return
if __name__ == '__main__':
fastqc(sample, extension, fastqc_flag)
sys.exit(0)
|
{
"content_hash": "e0a33ad5c29755918fafdedf2e632584",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 439,
"avg_line_length": 44.205882352941174,
"alnum_prop": 0.6067864271457086,
"repo_name": "adammaikai/OmicsPipe2.0",
"id": "bf7dd2a9ac2907bb0c80ced124df5113089abede",
"size": "1526",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "omics_pipe/modules/fastqc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9660"
},
{
"name": "Groff",
"bytes": "126"
},
{
"name": "Perl",
"bytes": "3396"
},
{
"name": "Python",
"bytes": "543104"
},
{
"name": "R",
"bytes": "342554"
},
{
"name": "Shell",
"bytes": "260672"
}
],
"symlink_target": ""
}
|
import os
import sys
import re
import logging
from optparse import OptionParser
from ksscommand import KssCommand, KssCommandException, KssCommandOptException
import __cmd__
try:
import karesansui
from karesansui import __version__
from karesansui.lib.utils import load_locale, execute_command
from karesansui.lib.const import ISCSI_CMD, ISCSI_CMD_OPTION_MODE, \
ISCSI_CMD_OPTION_MODE_NODE, ISCSI_CMD_OPTION_OPERATOR, ISCSI_CMD_OPTION_OPERATOR_DELETE, \
ISCSI_CMD_OPTION_TARGETNAME, ISCSI_CMD_OPTION_PORTAL
except ImportError, e:
print >>sys.stderr, "[Error] some packages not found. - %s" % e
sys.exit(1)
_ = load_locale()
usage = '%prog [options]'
def getopts():
optp = OptionParser(usage=usage, version=__version__)
optp.add_option('-t', '--target', dest='host', help=_('Target host name'), default=None)
optp.add_option('-i', '--iqn', dest='iqn', help=_('Target IQN'), default=None)
return optp.parse_args()
def chkopts(opts):
reg = re.compile("[^a-zA-Z0-9\._:-]")
if opts.iqn:
if reg.search(opts.iqn):
raise KssCommandOptException('ERROR: Illigal option value. option=%s value=%s' % ('-i or --iqn', opts.iqn))
else:
raise KssCommandOptException('ERROR: %s option is required.' % '-i or --iqn')
if opts.host:
if reg.search(opts.host):
raise KssCommandOptException('ERROR: Illigal option value. option=%s value=%s' % ('-t or --target', opts.host))
class DeleteIscsi(KssCommand):
def process(self):
(opts, args) = getopts()
chkopts(opts)
self.up_progress(10)
delete_command_args = [
ISCSI_CMD,
ISCSI_CMD_OPTION_MODE,
ISCSI_CMD_OPTION_MODE_NODE,
ISCSI_CMD_OPTION_OPERATOR,
ISCSI_CMD_OPTION_OPERATOR_DELETE,
ISCSI_CMD_OPTION_TARGETNAME,
opts.iqn,
]
if opts.host:
delete_command_args.append(ISCSI_CMD_OPTION_PORTAL)
delete_command_args.append(opts.host)
(delete_rc,delete_res) = execute_command(delete_command_args)
self.up_progress(50)
if delete_rc != 0:
raise KssCommandException('Failed to delete iSCSI. - host=%s iqn=%s message=%s' % (opts.host, opts.iqn, delete_res))
self.logger.info("Delete iSCSI node successful. - host=%s iqn=%s" % (opts.host, opts.iqn))
print >>sys.stdout, _("Delete iSCSI node successful. - host=%s iqn=%s") % (opts.host, opts.iqn)
return True
if __name__ == "__main__":
target = DeleteIscsi()
sys.exit(target.run())
|
{
"content_hash": "6a5d3d72032675442e18b3b4e495ac2d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 128,
"avg_line_length": 33.5,
"alnum_prop": 0.6261002678913127,
"repo_name": "karesansui/karesansui",
"id": "52cfca89f4f19787eeb420997f350838da0a34a9",
"size": "3789",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "bin/delete_iscsi.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "79865"
},
{
"name": "HTML",
"bytes": "32774"
},
{
"name": "JavaScript",
"bytes": "286445"
},
{
"name": "Makefile",
"bytes": "265"
},
{
"name": "Python",
"bytes": "2226164"
},
{
"name": "Shell",
"bytes": "18293"
}
],
"symlink_target": ""
}
|
''' Provide the ``Application`` class.
Application instances are factories for creating new Bokeh Documents.
When a Bokeh server session is initiated, the Bokeh server asks the Application
for a new Document to service the session. To do this, the Application first
creates a new empty Document, then it passes this new Document to the
``modify_document`` method of each of its handlers. When all handlers have
updated the Document, it is used to service the user session.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from abc import ABCMeta, abstractmethod, abstractproperty
# External imports
from tornado import gen
# Bokeh imports
from ..document import Document
from ..settings import settings
from ..util.future import with_metaclass
from ..util.tornado import yield_for_all_futures
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Application',
'ServerContext',
'SessionContext',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class Application(object):
''' An Application is a factory for Document instances.
'''
# This is so that bokeh.io.show can check if a passed in object is an
# Application without having to import Application directly. This module
# depends on tornado and we have made a commitment that "basic" modules
# will function without bringing in tornado.
_is_a_bokeh_application_class = True
def __init__(self, *handlers, **kwargs):
''' Application factory.
Args:
handlers (seq[Handler]): List of handlers to call.
The URL is taken from the first one only.
Keyword Args:
metadata (dict): arbitrary user-supplied JSON data to make available
with the application.
The server will provide a URL ``http://applicationurl/metadata``
which returns a JSON blob of the form:
.. code-block:: json
{
"data": {
"hi": "hi",
"there": "there"
},
"url": "/myapp"
}
The user-supplied metadata is returned as-is under the
``"data"`` key in the blob.
'''
metadata = kwargs.pop('metadata', None)
if kwargs:
raise TypeError("Invalid keyword argument: %s" %
list(kwargs.keys())[0])
self._static_path = None
self._handlers = []
self._metadata = metadata
for h in handlers:
self.add(h)
# Properties --------------------------------------------------------------
@property
def handlers(self):
''' The ordered list of handlers this Application is configured with.
'''
return tuple(self._handlers)
@property
def metadata(self):
''' Arbitrary user-supplied metadata to associate with this application.
'''
return self._metadata
@property
def safe_to_fork(self):
'''
'''
return all(handler.safe_to_fork for handler in self._handlers)
@property
def static_path(self):
''' Path to any (optional) static resources specified by handlers.
'''
return self._static_path
# Public methods ----------------------------------------------------------
def add(self, handler):
''' Add a handler to the pipeline used to initialize new documents.
Args:
handler (Handler) : a handler for this Application to use to
process Documents
'''
self._handlers.append(handler)
# make sure there is at most one static path
static_paths = set(h.static_path() for h in self.handlers)
static_paths.discard(None)
if len(static_paths) > 1:
raise RuntimeError("More than one static path requested for app: %r" % list(static_paths))
elif len(static_paths) == 1:
self._static_path = static_paths.pop()
else:
self._static_path = None
def create_document(self):
''' Creates and initializes a document using the Application's handlers.
'''
doc = Document()
self.initialize_document(doc)
return doc
def initialize_document(self, doc):
''' Fills in a new document using the Application's handlers.
'''
for h in self._handlers:
# TODO (havocp) we need to check the 'failed' flag on each handler
# and build a composite error display. In develop mode, we want to
# somehow get these errors to the client.
h.modify_document(doc)
if h.failed:
log.error("Error running application handler %r: %s %s ", h, h.error, h.error_detail)
if settings.perform_document_validation():
doc.validate()
def on_server_loaded(self, server_context):
''' Invoked to execute code when a new session is created.
This method calls ``on_server_loaded`` on each handler, in order,
with the server context passed as the only argument.
'''
for h in self._handlers:
h.on_server_loaded(server_context)
def on_server_unloaded(self, server_context):
''' Invoked to execute code when the server cleanly exits. (Before
stopping the server's ``IOLoop``.)
This method calls ``on_server_unloaded`` on each handler, in order,
with the server context passed as the only argument.
.. warning::
In practice this code may not run, since servers are often killed
by a signal.
'''
for h in self._handlers:
h.on_server_unloaded(server_context)
@gen.coroutine
def on_session_created(self, session_context):
''' Invoked to execute code when a new session is created.
This method calls ``on_session_created`` on each handler, in order,
with the session context passed as the only argument.
May return a ``Future`` which will delay session creation until the
``Future`` completes.
'''
for h in self._handlers:
result = h.on_session_created(session_context)
yield yield_for_all_futures(result)
raise gen.Return(None)
@gen.coroutine
def on_session_destroyed(self, session_context):
''' Invoked to execute code when a session is destroyed.
This method calls ``on_session_destroyed`` on each handler, in order,
with the session context passed as the only argument.
Afterwards, ``session_context.destroyed`` will be ``True``.
'''
for h in self._handlers:
result = h.on_session_destroyed(session_context)
yield yield_for_all_futures(result)
raise gen.Return(None)
class ServerContext(with_metaclass(ABCMeta)):
''' A harness for server-specific information and tasks related to
collections of Bokeh sessions.
*This base class is probably not of interest to general users.*
'''
# Properties --------------------------------------------------------------
@abstractproperty
def sessions(self):
''' ``SessionContext`` instances belonging to this application.
*Subclasses must implement this method.*
'''
pass
# Public methods ----------------------------------------------------------
@abstractmethod
def add_next_tick_callback(self, callback):
''' Add a callback to be run on the next tick of the event loop.
*Subclasses must implement this method.*
Args:
callback (callable) : a callback to add
The callback will execute on the next tick of the event loop,
and should have the form ``def callback()`` (i.e. it should
not accept any arguments)
Returns:
an ID that can be used with ``remove_next_tick_callback``.
'''
pass
@abstractmethod
def add_periodic_callback(self, callback, period_milliseconds):
''' Add a callback to be run periodically until it is removed.
*Subclasses must implement this method.*
Args:
callback (callable) : a callback to add
The callback will execute periodically on the event loop
as specified, and should have the form ``def callback()``
(i.e. it should not accept any arguments)
period_milliseconds (int) : number of milliseconds to wait
between executing the callback.
Returns:
an ID that can be used with ``remove_periodic_callback``.
'''
pass
@abstractmethod
def add_timeout_callback(self, callback, timeout_milliseconds):
''' Add a callback to be run once after timeout_milliseconds.
*Subclasses must implement this method.*
Args:
callback (callable) : a callback to add
The callback will execute once on the event loop after the
timeout has passed, and should have the form ``def callback()``
(i.e. it should not accept any arguments)
timeout_milliseconds (int) : number of milliseconds to wait before
executing the callback.
Returns:
an ID that can be used with ``remove_timeout_callback``.
'''
pass
@abstractmethod
def remove_next_tick_callback(self, callback_id):
''' Remove a callback added with ``add_next_tick_callback``, before
it runs.
*Subclasses must implement this method.*
Args:
callback_id : the ID returned from ``add_next_tick_callback``
'''
pass
@abstractmethod
def remove_periodic_callback(self, callback_id):
''' Removes a callback added with ``add_periodic_callback``.
*Subclasses must implement this method.*
Args:
callback_id : the ID returned from ``add_periodic_callback``
'''
pass
@abstractmethod
def remove_timeout_callback(self, callback_id):
''' Remove a callback added with ``add_timeout_callback``, before it
runs.
*Subclasses must implement this method.*
Args:
callback_id : the ID returned from ``add_timeout_callback``
'''
pass
class SessionContext(with_metaclass(ABCMeta)):
''' A harness for server-specific information and tasks related to
Bokeh sessions.
*This base class is probably not of interest to general users.*
'''
def __init__(self, server_context, session_id):
'''
'''
self._server_context = server_context
self._id = session_id
# Properties --------------------------------------------------------------
@abstractproperty
def destroyed(self):
''' If ``True``, the session has been discarded and cannot be used.
A new session with the same ID could be created later but this instance
will not come back to life.
'''
pass
@property
def id(self):
''' The unique ID for the session associated with this context.
'''
return self._id
@property
def server_context(self):
''' The server context for this session context
'''
return self._server_context
# Public methods ----------------------------------------------------------
@abstractmethod
def with_locked_document(self, func):
''' Runs a function with the document lock held, passing the
document to the function.
*Subclasses must implement this method.*
Args:
func (callable): function that takes a single parameter (the Document)
and returns ``None`` or a ``Future``
Returns:
a ``Future`` containing the result of the function
'''
pass
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
{
"content_hash": "320880fe1d4fad1b5e8964d1bab4aba6",
"timestamp": "",
"source": "github",
"line_count": 423,
"max_line_length": 102,
"avg_line_length": 31.319148936170212,
"alnum_prop": 0.5375905797101449,
"repo_name": "stonebig/bokeh",
"id": "02fabd4acb0ec710d1b919479390bad212ea25f1",
"size": "13579",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/application/application.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "423978"
},
{
"name": "CoffeeScript",
"bytes": "1961885"
},
{
"name": "HTML",
"bytes": "1556638"
},
{
"name": "JavaScript",
"bytes": "4741"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1696641"
},
{
"name": "Shell",
"bytes": "14856"
}
],
"symlink_target": ""
}
|
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import state
class unknown_tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/unknown-tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "unknown-tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"router-information",
"tlvs",
"tlv",
"unknown-tlv",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/unknown_tlv/state (container)
YANG Description: Contents of an unknown TLV within the LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/unknown_tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Contents of an unknown TLV within the LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
from . import state
class unknown_tlv(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/router-information/tlvs/tlv/unknown-tlv. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: An unknown TLV within the context. Unknown TLVs are
defined to be the set of TLVs that are not modelled
within the OpenConfig model, or are unknown to the
local system such that it cannot decode their value.
"""
__slots__ = ("_path_helper", "_extmethods", "__state")
_yang_name = "unknown-tlv"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"router-information",
"tlvs",
"tlv",
"unknown-tlv",
]
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/unknown_tlv/state (container)
YANG Description: Contents of an unknown TLV within the LSA
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/unknown_tlv/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: Contents of an unknown TLV within the LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=False)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=False,
)
state = __builtin__.property(_get_state)
_pyangbind_elements = OrderedDict([("state", state)])
|
{
"content_hash": "13cb6445f0324826085d7d87d26e02b5",
"timestamp": "",
"source": "github",
"line_count": 332,
"max_line_length": 375,
"avg_line_length": 37.825301204819276,
"alnum_prop": 0.5746137920050963,
"repo_name": "napalm-automation/napalm-yang",
"id": "ad139a7fbde7e11180961b2511106855b94c5d3b",
"size": "12582",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "napalm_yang/models/openconfig/network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/router_information/tlvs/tlv/unknown_tlv/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "370237"
},
{
"name": "Jupyter Notebook",
"bytes": "152135"
},
{
"name": "Makefile",
"bytes": "1965"
},
{
"name": "Python",
"bytes": "105688785"
},
{
"name": "Roff",
"bytes": "1632"
}
],
"symlink_target": ""
}
|
"""Keras metrics in TF-Ranking.
NOTE: For metrics that compute a ranking, ties are broken randomly. This means
that metrics may be stochastic if items with equal scores are provided.
WARNING: Some metrics (e.g. Recall or MRR) are not well-defined when there are
no relevant items (e.g. if `y_true` has a row of only zeroes). For these cases,
the TF-Ranking metrics will evaluate to `0`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from typing import Any, Dict, List, Optional
import tensorflow.compat.v2 as tf
from tensorflow_ranking.python import metrics_impl
from tensorflow_ranking.python.keras import utils
class RankingMetricKey(object):
"""Ranking metric key strings."""
# Mean Reciprocal Rank. For binary relevance.
MRR = "mrr"
# Average Relevance Position.
ARP = "arp"
# Normalized Discounted Cumulative Gain.
NDCG = "ndcg"
# Discounted Cumulative Gain.
DCG = "dcg"
# Precision. For binary relevance.
PRECISION = "precision"
# Mean Average Precision. For binary relevance.
MAP = "map"
# Intent-aware Precision. For binary relevance of subtopics.
PRECISION_IA = "precision_ia"
# Ordered Pair Accuracy.
ORDERED_PAIR_ACCURACY = "ordered_pair_accuracy"
# Alpha Discounted Cumulative Gain.
ALPHA_DCG = "alpha_dcg"
# Hits. For binary relevance.
HITS = "hits"
def get(key: str,
name: Optional[str] = None,
dtype: Optional[tf.dtypes.DType] = None,
topn: Optional[int] = None,
**kwargs: Dict[str, Any]) -> tf.keras.metrics.Metric:
"""Factory method to get a list of ranking metrics.
Example Usage:
```python
metric = tfr.keras.metics.get(tfr.keras.metrics.RankingMetricKey.MRR)
```
to get Mean Reciprocal Rank.
```python
metric = tfr.keras.metics.get(tfr.keras.metrics.RankingMetricKey.MRR,
topn=2)
```
to get MRR@2.
Args:
key: An attribute of `RankingMetricKey`, defining which metric objects to
return.
name: Name of metrics.
dtype: Dtype of the metrics.
topn: Cutoff of how many items are considered in the metric.
**kwargs: Keyword arguments for the metric object.
Returns:
A tf.keras.metrics.Metric. See `_RankingMetric` signature for more details.
Raises:
ValueError: If key is unsupported.
"""
if not isinstance(key, str):
raise ValueError("Input `key` needs to be string.")
key_to_cls = {
RankingMetricKey.MRR: MRRMetric,
RankingMetricKey.ARP: ARPMetric,
RankingMetricKey.PRECISION: PrecisionMetric,
RankingMetricKey.MAP: MeanAveragePrecisionMetric,
RankingMetricKey.NDCG: NDCGMetric,
RankingMetricKey.DCG: DCGMetric,
RankingMetricKey.ORDERED_PAIR_ACCURACY: OPAMetric,
RankingMetricKey.HITS: HitsMetric,
}
metric_kwargs = {"name": name, "dtype": dtype}
if topn:
metric_kwargs.update({"topn": topn})
if kwargs:
metric_kwargs.update(kwargs)
if key in key_to_cls:
metric_cls = key_to_cls[key]
metric_obj = metric_cls(**metric_kwargs)
else:
raise ValueError("Unsupported metric: {}".format(key))
return metric_obj
def default_keras_metrics(**kwargs) -> List[tf.keras.metrics.Metric]:
"""Returns a list of ranking metrics.
Args:
**kwargs: Additional kwargs to pass to each keras metric.
Returns:
A list of metrics of type `tf.keras.metrics.Metric`.
"""
list_kwargs = [
dict(key="ndcg", topn=topn, name="metric/ndcg_{}".format(topn), **kwargs)
for topn in [1, 3, 5, 10]
] + [
dict(key="arp", name="metric/arp", **kwargs),
dict(key="ordered_pair_accuracy", name="metric/ordered_pair_accuracy",
**kwargs),
dict(key="mrr", name="metric/mrr", **kwargs),
dict(key="precision", name="metric/precision", **kwargs),
dict(key="map", name="metric/map", **kwargs),
dict(key="dcg", name="metric/dcg", **kwargs),
dict(key="ndcg", name="metric/ndcg", **kwargs)
]
return [get(**kwargs) for kwargs in list_kwargs]
class _RankingMetric(tf.keras.metrics.Mean):
"""Implements base ranking metric class.
Please see tf.keras.metrics.Mean for more information about such a class and
https://www.tensorflow.org/tutorials/distribute/custom_training on how to do
customized training.
"""
def __init__(self, name=None, dtype=None, ragged=False, **kwargs):
super(_RankingMetric, self).__init__(name=name, dtype=dtype, **kwargs)
# An instance of `metrics_impl._RankingMetric`.
# Overwrite this in subclasses.
self._metric = None
self._ragged = ragged
def update_state(self, y_true, y_pred, sample_weight=None):
"""Accumulates metric statistics.
`y_true` and `y_pred` should have the same shape.
Args:
y_true: The ground truth values.
y_pred: The predicted values.
sample_weight: Optional weighting of each example. Defaults to 1. Can be a
`Tensor` whose rank is either 0, or the same rank as `y_true`, and must
be broadcastable to `y_true`.
Returns:
Update op.
"""
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
# TODO: Add mask argument for metric.compute() call
per_list_metric_val, per_list_metric_weights = self._metric.compute(
y_true, y_pred, sample_weight)
return super(_RankingMetric, self).update_state(
per_list_metric_val, sample_weight=per_list_metric_weights)
def get_config(self):
config = super(_RankingMetric, self).get_config()
config.update({
"ragged": self._ragged,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class MRRMetric(_RankingMetric):
r"""Mean reciprocal rank (MRR).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
MRR(y, s) = max_i y_i / rank(s_i)
```
NOTE: This metric converts graded relevance to binary relevance by setting
`y_i = 1` if `y_i >= 1`.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> mrr = tfr.keras.metrics.MRRMetric()
>>> mrr(y_true, y_pred).numpy()
0.5
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> mrr = tfr.keras.metrics.MRRMetric(ragged=True)
>>> mrr(y_true, y_pred).numpy()
0.75
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.MRRMetric()])
```
Definition:
$$
\text{MRR}(\{y\}, \{s\}) = \max_i \frac{\bar{y}_i}{\text{rank}(s_i)}
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly and $\bar{y_i}$ are truncated labels:
$$
\bar{y}_i = \begin{cases}
1 & \text{if }y_i \geq 1 \\
0 & \text{else}
\end{cases}
$$
"""
def __init__(self, name=None, topn=None, dtype=None, ragged=False, **kwargs):
super(MRRMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._metric = metrics_impl.MRRMetric(name=name, topn=topn, ragged=ragged)
def get_config(self):
config = super(MRRMetric, self).get_config()
config.update({
"topn": self._topn,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class HitsMetric(_RankingMetric):
r"""Hits@k metric.
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
Hits@k(y, s) = 1.0, if \exists i s.t. y_i >= 1 and rank(s_i) <= k
Hits@k(y, s) = 0.0, otherwise.
```
NOTE: This metric converts graded relevance to binary relevance by setting
`y_i = 1` if `y_i >= 1` and `y_i = 0` if `y_i < 1`.
NOTE: While `topn` could be left as `None` without raising an error, the Hits
metric without `topn` specified would be trivial as it simply measures the
percentage of lists with at least 1 relevant item.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> hits_at_1 = tfr.keras.metrics.HitsMetric(topn=1)
>>> hits_at_1(y_true, y_pred).numpy()
0.0
>>> hits_at_2 = tfr.keras.metrics.HitsMetric(topn=2)
>>> hits_at_2(y_true, y_pred).numpy()
1.0
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 1., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> hits_at_1 = tfr.keras.metrics.HitsMetric(topn=1, ragged=True)
>>> hits_at_1(y_true, y_pred).numpy()
0.5
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.HitsMetric(topn=1)])
```
Definition:
$$
\text{Hits}@k(\{y\}, \{s\}) = \max_{i | y_i \geq 1}
\mathbf{I} [\text{rank}(s_i) \leq k]
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly and $y_i$ are labels.
"""
def __init__(self, name=None, topn=None, dtype=None, ragged=False, **kwargs):
super(HitsMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._metric = metrics_impl.HitsMetric(name=name, topn=topn, ragged=ragged)
def get_config(self):
config = super(HitsMetric, self).get_config()
config.update({
"topn": self._topn,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class ARPMetric(_RankingMetric):
r"""Average relevance position (ARP).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
ARP(y, s) = sum_i (y_i * rank(s_i)) / sum_j y_j
```
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> arp = tfr.keras.metrics.ARPMetric()
>>> arp(y_true, y_pred).numpy()
2.5
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> arp = tfr.keras.metrics.ARPMetric(ragged=True)
>>> arp(y_true, y_pred).numpy()
1.75
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.ARPMetric()])
```
Definition:
$$
\text{ARP}(\{y\}, \{s\}) =
\frac{1}{\sum_i y_i} \sum_i y_i \cdot \text{rank}(s_i)
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly.
"""
def __init__(self, name=None, dtype=None, ragged=False, **kwargs):
super(ARPMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._metric = metrics_impl.ARPMetric(name=name, ragged=ragged)
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class PrecisionMetric(_RankingMetric):
r"""Precision@k (P@k).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
P@K(y, s) = 1/k sum_i I[rank(s_i) < k] y_i
```
NOTE: This metric converts graded relevance to binary relevance by setting
`y_i = 1` if `y_i >= 1`.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> precision_at_2 = tfr.keras.metrics.PrecisionMetric(topn=2)
>>> precision_at_2(y_true, y_pred).numpy()
0.5
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> precision_at_2 = tfr.keras.metrics.PrecisionMetric(topn=2, ragged=True)
>>> precision_at_2(y_true, y_pred).numpy()
0.5
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.PrecisionMetric()])
```
Definition:
$$
\text{P@k}(\{y\}, \{s\}) =
\frac{1}{k} \sum_i I[\text{rank}(s_i) \leq k] \bar{y}_i
$$
where:
* $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores $s$
with ties broken randomly
* $I[]$ is the indicator function:\
$I[\text{cond}] = \begin{cases}
1 & \text{if cond is true}\\
0 & \text{else}\end{cases}
$
* $\bar{y}_i$ are the truncated labels:\
$
\bar{y}_i = \begin{cases}
1 & \text{if }y_i \geq 1 \\
0 & \text{else}
\end{cases}
$
* $k = |y|$ if $k$ is not provided
"""
def __init__(self, name=None, topn=None, dtype=None, ragged=False, **kwargs):
super(PrecisionMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._metric = metrics_impl.PrecisionMetric(name=name, topn=topn,
ragged=ragged)
def get_config(self):
config = super(PrecisionMetric, self).get_config()
config.update({
"topn": self._topn,
})
return config
# TODO Add recall metrics to TF1 in another cl.
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class RecallMetric(_RankingMetric):
r"""Recall@k (R@k).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
R@K(y, s) = sum_i I[rank(s_i) < k] y_i / sum_j y_j
```
NOTE: This metric converts graded relevance to binary relevance by setting
`y_i = 1` if `y_i >= 1`.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> recall_at_2 = tfr.keras.metrics.RecallMetric(topn=2)
>>> recall_at_2(y_true, y_pred).numpy()
0.5
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> recall_at_2 = tfr.keras.metrics.RecallMetric(topn=2, ragged=True)
>>> recall_at_2(y_true, y_pred).numpy()
0.75
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.RecallMetric()])
```
Definition:
$$
\text{R@k}(\{y\}, \{s\}) =
\frac{\sum_i I[\text{rank}(s_i) \leq k] \bar{y}_i}{\sum_j \bar{y}_j}
$$
where:
* $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores $s$
with ties broken randomly
* $I[]$ is the indicator function:\
$I[\text{cond}] = \begin{cases}
1 & \text{if cond is true}\\
0 & \text{else}\end{cases}
$
* $\bar{y}_i$ are the truncated labels:\
$
\bar{y}_i = \begin{cases}
1 & \text{if }y_i \geq 1 \\
0 & \text{else}
\end{cases}
$
* $k = |y|$ if $k$ is not provided
"""
def __init__(self, name=None, topn=None, dtype=None, ragged=False, **kwargs):
super(RecallMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._metric = metrics_impl.RecallMetric(name=name, topn=topn,
ragged=ragged)
def get_config(self):
config = super(RecallMetric, self).get_config()
config.update({
"topn": self._topn,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class PrecisionIAMetric(_RankingMetric):
r"""Precision-IA@k (Pre-IA@k).
Intent-aware Precision@k ([Agrawal et al, 2009][agrawal2009];
[Clarke et al, 2009][clarke2009]) is a precision metric that operates on
subtopics and is typically used for diversification tasks..
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
Pre-IA@k(y, s) = sum_t sum_i I[rank(s_i) <= k] y_{i,t} / (# of subtopics * k)
```
NOTE: The labels `y_true` should be of shape
`[batch_size, list_size, subtopic_size]`, indicating relevance for each
subtopic in the last dimension.
NOTE: This metric converts graded relevance to binary relevance by setting
`y_{i,t} = 1` if `y_{i,t} >= 1`.
Standalone usage:
>>> y_true = [[[0., 1.], [1., 0.], [1., 1.]]]
>>> y_pred = [[3., 1., 2.]]
>>> pre_ia = tfr.keras.metrics.PrecisionIAMetric()
>>> pre_ia(y_true, y_pred).numpy()
0.6666667
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant(
... [[[0., 0.], [1., 0.]], [[1., 1.], [0., 2.], [1., 0.]]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> pre_ia = tfr.keras.metrics.PrecisionIAMetric(ragged=True)
>>> pre_ia(y_true, y_pred).numpy()
0.5833334
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
metrics=[tfr.keras.metrics.PrecisionIAMetric()])
```
Definition:
$$
\text{Pre-IA@k}(y, s) = \frac{1}{\text{# of subtopics} \cdot k}
\sum_t \sum_i I[\text{rank}(s_i) \leq k] y_{i,t}
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly.
References:
- [Diversifying Search Results, Agrawal et al, 2009][agrawal2009]
- [Overview of the TREC 2009 Web Track, Clarke et al, 2009][clarke2009]
[agrawal2009]:
https://www.microsoft.com/en-us/research/publication/diversifying-search-results/
[clarke2009]: https://trec.nist.gov/pubs/trec18/papers/ENT09.OVERVIEW.pdf
"""
def __init__(self,
name=None,
topn=None,
dtype=None,
ragged=False,
**kwargs):
"""Constructor.
Args:
name: A string used as the name for this metric.
topn: A cutoff for how many examples to consider for this metric.
dtype: Data type of the metric output. See `tf.keras.metrics.Metric`.
ragged: A bool indicating whether the supplied tensors are ragged. If
True y_true, y_pred and sample_weight (if providing per-example weights)
need to be ragged tensors with compatible shapes.
**kwargs: Other keyward arguments used in `tf.keras.metrics.Metric`.
"""
super(PrecisionIAMetric, self).__init__(name=name, dtype=dtype,
ragged=ragged, **kwargs)
self._topn = topn
self._metric = metrics_impl.PrecisionIAMetric(name=name, topn=topn,
ragged=ragged)
def get_config(self):
config = super(PrecisionIAMetric, self).get_config()
config.update({
"topn": self._topn,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class MeanAveragePrecisionMetric(_RankingMetric):
r"""Mean average precision (MAP).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
MAP(y, s) = sum_k (P@k(y, s) * rel(k)) / sum_i y_i
rel(k) = y_i if rank(s_i) = k
```
NOTE: This metric converts graded relevance to binary relevance by setting
`y_i = 1` if `y_i >= 1`.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> map_metric = tfr.keras.metrics.MeanAveragePrecisionMetric(topn=2)
>>> map_metric(y_true, y_pred).numpy()
0.25
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> map_metric = tfr.keras.metrics.MeanAveragePrecisionMetric(
... topn=2, ragged=True)
>>> map_metric(y_true, y_pred).numpy()
0.5
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd',
metrics=[tfr.keras.metrics.MeanAveragePrecisionMetric()])
```
Definition:
$$
\text{MAP}(\{y\}, \{s\}) =
\frac{\sum_k P@k(y, s) \cdot \text{rel}(k)}{\sum_j \bar{y}_j} \\
\text{rel}(k) = \max_i I[\text{rank}(s_i) = k] \bar{y}_i
$$
where:
* $P@k(y, s)$ is the Precision at rank $k$. See
`tfr.keras.metrics.PrecisionMetric`.
* $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores $s$
with ties broken randomly
* $I[]$ is the indicator function:\
$I[\text{cond}] = \begin{cases}
1 & \text{if cond is true}\\
0 & \text{else}\end{cases}
$
* $\bar{y}_i$ are the truncated labels:\
$
\bar{y}_i = \begin{cases}
1 & \text{if }y_i \geq 1 \\
0 & \text{else}
\end{cases}
$
"""
def __init__(self, name=None, topn=None, dtype=None, ragged=False, **kwargs):
super(MeanAveragePrecisionMetric, self).__init__(
name=name, dtype=dtype, ragged=ragged, **kwargs)
self._topn = topn
self._metric = metrics_impl.MeanAveragePrecisionMetric(name=name, topn=topn,
ragged=ragged)
def get_config(self):
base_config = super(MeanAveragePrecisionMetric, self).get_config()
config = {
"topn": self._topn,
}
config.update(base_config)
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class NDCGMetric(_RankingMetric):
r"""Normalized discounted cumulative gain (NDCG).
Normalized discounted cumulative gain ([Järvelin et al, 2002][jarvelin2002])
is the normalized version of `tfr.keras.metrics.DCGMetric`.
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
NDCG(y, s) = DCG(y, s) / DCG(y, y)
DCG(y, s) = sum_i gain(y_i) * rank_discount(rank(s_i))
```
NOTE: The `gain_fn` and `rank_discount_fn` should be keras serializable.
Please see `tfr.keras.utils.pow_minus_1` and `tfr.keras.utils.log2_inverse` as
examples when defining user customized functions.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> ndcg = tfr.keras.metrics.NDCGMetric()
>>> ndcg(y_true, y_pred).numpy()
0.6934264
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> ndcg = tfr.keras.metrics.NDCGMetric(ragged=True)
>>> ndcg(y_true, y_pred).numpy()
0.7974351
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.NDCGMetric()])
```
Definition:
$$
\text{NDCG}(\{y\}, \{s\}) =
\frac{\text{DCG}(\{y\}, \{s\})}{\text{DCG}(\{y\}, \{y\})} \\
\text{DCG}(\{y\}, \{s\}) =
\sum_i \text{gain}(y_i) \cdot \text{rank_discount}(\text{rank}(s_i))
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly.
References:
- [Cumulated gain-based evaluation of IR techniques, Järvelin et al,
2002][jarvelin2002]
[jarvelin2002]: https://dl.acm.org/doi/10.1145/582415.582418
"""
def __init__(self,
name=None,
topn=None,
gain_fn=None,
rank_discount_fn=None,
dtype=None,
ragged=False,
**kwargs):
super(NDCGMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._gain_fn = gain_fn or utils.pow_minus_1
self._rank_discount_fn = rank_discount_fn or utils.log2_inverse
self._metric = metrics_impl.NDCGMetric(
name=name,
topn=topn,
gain_fn=self._gain_fn,
rank_discount_fn=self._rank_discount_fn,
ragged=ragged)
def get_config(self):
base_config = super(NDCGMetric, self).get_config()
config = {
"topn": self._topn,
"gain_fn": self._gain_fn,
"rank_discount_fn": self._rank_discount_fn,
}
config.update(base_config)
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class DCGMetric(_RankingMetric):
r"""Discounted cumulative gain (DCG).
Discounted cumulative gain ([Järvelin et al, 2002][jarvelin2002]).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
DCG(y, s) = sum_i gain(y_i) * rank_discount(rank(s_i))
```
NOTE: The `gain_fn` and `rank_discount_fn` should be keras serializable.
Please see `tfr.keras.utils.pow_minus_1` and `tfr.keras.utils.log2_inverse` as
examples when defining user customized functions.
Standalone usage:
>>> y_true = [[0., 1., 1.]]
>>> y_pred = [[3., 1., 2.]]
>>> dcg = tfr.keras.metrics.DCGMetric()
>>> dcg(y_true, y_pred).numpy()
1.1309297
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> dcg = tfr.keras.metrics.DCGMetric(ragged=True)
>>> dcg(y_true, y_pred).numpy()
2.065465
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.DCGMetric()])
```
Definition:
$$
\text{DCG}(\{y\}, \{s\}) =
\sum_i \text{gain}(y_i) \cdot \text{rank_discount}(\text{rank}(s_i))
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly.
References:
- [Cumulated gain-based evaluation of IR techniques, Järvelin et al,
2002][jarvelin2002]
[jarvelin2002]: https://dl.acm.org/doi/10.1145/582415.582418
"""
def __init__(self,
name=None,
topn=None,
gain_fn=None,
rank_discount_fn=None,
dtype=None,
ragged=False,
**kwargs):
super(DCGMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._gain_fn = gain_fn or utils.pow_minus_1
self._rank_discount_fn = rank_discount_fn or utils.log2_inverse
self._metric = metrics_impl.DCGMetric(
name=name,
topn=topn,
gain_fn=self._gain_fn,
rank_discount_fn=self._rank_discount_fn,
ragged=ragged)
def get_config(self):
base_config = super(DCGMetric, self).get_config()
config = {
"topn": self._topn,
"gain_fn": self._gain_fn,
"rank_discount_fn": self._rank_discount_fn,
}
config.update(base_config)
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class AlphaDCGMetric(_RankingMetric):
r"""Alpha discounted cumulative gain (alphaDCG).
Alpha discounted cumulative gain ([Clarke et al, 2008][clarke2008];
[Clarke et al, 2009][clarke2009]) is a cumulative gain metric that operates
on subtopics and is typically used for diversification tasks.
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
alphaDCG(y, s) = sum_t sum_i gain(y_{i,t}) * rank_discount(rank(s_i))
gain(y_{i,t}) = (1 - alpha)^(sum_j I[rank(s_j) < rank(s_i)] * gain(y_{j,t}))
```
NOTE: The labels `y_true` should be of shape
`[batch_size, list_size, subtopic_size]`, indicating relevance for each
subtopic in the last dimension.
NOTE: The `rank_discount_fn` should be keras serializable. Please see
`tfr.keras.utils.log2_inverse` as an example when defining user customized
functions.
Standalone usage:
>>> y_true = [[[0., 1.], [1., 0.], [1., 1.]]]
>>> y_pred = [[3., 1., 2.]]
>>> alpha_dcg = tfr.keras.metrics.AlphaDCGMetric()
>>> alpha_dcg(y_true, y_pred).numpy()
2.1963947
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant(
... [[[0., 0.], [1., 0.]], [[1., 1.], [0., 2.], [1., 0.]]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> alpha_dcg = tfr.keras.metrics.AlphaDCGMetric(ragged=True)
>>> alpha_dcg(y_true, y_pred).numpy()
1.8184297
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.AlphaDCGMetric()])
```
Definition:
$$
\alpha\text{DCG}(y, s) =
\sum_t \sum_i \text{gain}(y_{i, t}, \alpha)
\text{ rank_discount}(\text{rank}(s_i))\\
\text{gain}(y_{i, t}, \alpha) =
y_{i, t} (1 - \alpha)^{\sum_j I[\text{rank}(s_j) < \text{rank}(s_i)] y_{j, t}}
$$
where $\text{rank}(s_i)$ is the rank of item $i$ after sorting by scores
$s$ with ties broken randomly and $I[]$ is the indicator function:
$$
I[\text{cond}] = \begin{cases}
1 & \text{if cond is true}\\
0 & \text{else}\end{cases}
$$
References:
- [Novelty and diversity in information retrieval evaluation, Clarke et al,
2008][clarke2008]
- [Overview of the TREC 2009 Web Track, Clarke et al, 2009][clarke2009]
[clarke2008]: https://dl.acm.org/doi/10.1145/1390334.1390446
[clarke2009]: https://trec.nist.gov/pubs/trec18/papers/ENT09.OVERVIEW.pdf
"""
def __init__(self,
name="alpha_dcg_metric",
topn=None,
alpha=0.5,
rank_discount_fn=None,
seed=None,
dtype=None,
ragged=False,
**kwargs):
"""Construct the ranking metric class for alpha-DCG.
Args:
name: A string used as the name for this metric.
topn: A cutoff for how many examples to consider for this metric.
alpha: A float between 0 and 1, parameter used in definition of alpha-DCG.
Introduced as an assessor error in judging whether a document is
covering a subtopic of the query.
rank_discount_fn: A function of rank discounts. Default is set to
`1 / log2(rank+1)`. The `rank_discount_fn` should be keras serializable.
Please see the `log2_inverse` above as an example when defining user
customized functions.
seed: The ops-level random seed used in shuffle ties in `sort_by_scores`.
dtype: Data type of the metric output. See `tf.keras.metrics.Metric`.
ragged: A bool indicating whether the supplied tensors are ragged. If
True y_true, y_pred and sample_weight (if providing per-example weights)
need to be ragged tensors with compatible shapes.
**kwargs: Other keyward arguments used in `tf.keras.metrics.Metric`.
"""
super(AlphaDCGMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._topn = topn
self._alpha = alpha
self._rank_discount_fn = rank_discount_fn or utils.log2_inverse
self._seed = seed
self._metric = metrics_impl.AlphaDCGMetric(
name=name,
topn=topn,
alpha=alpha,
rank_discount_fn=self._rank_discount_fn,
seed=seed,
ragged=ragged)
def get_config(self):
config = super(AlphaDCGMetric, self).get_config()
config.update({
"topn": self._topn,
"alpha": self._alpha,
"rank_discount_fn": self._rank_discount_fn,
"seed": self._seed,
})
return config
@tf.keras.utils.register_keras_serializable(package="tensorflow_ranking")
class OPAMetric(_RankingMetric):
r"""Ordered pair accuracy (OPA).
For each list of scores `s` in `y_pred` and list of labels `y` in `y_true`:
```
OPA(y, s) = sum_i sum_j I[s_i > s_j] I[y_i > y_j] / sum_i sum_j I[y_i > y_j]
```
NOTE: Pairs with equal labels (`y_i = y_j`) are always ignored. Pairs with
equal scores (`s_i = s_j`) are considered incorrectly ordered.
Standalone usage:
>>> y_true = [[0., 1., 2.]]
>>> y_pred = [[3., 1., 2.]]
>>> opa = tfr.keras.metrics.OPAMetric()
>>> opa(y_true, y_pred).numpy()
0.33333334
>>> # Using ragged tensors
>>> y_true = tf.ragged.constant([[0., 1.], [1., 2., 0.]])
>>> y_pred = tf.ragged.constant([[2., 1.], [2., 5., 4.]])
>>> opa = tfr.keras.metrics.OPAMetric(ragged=True)
>>> opa(y_true, y_pred).numpy()
0.5
Usage with the `compile()` API:
```python
model.compile(optimizer='sgd', metrics=[tfr.keras.metrics.OPAMetric()])
```
Definition:
$$
\text{OPA}(\{y\}, \{s\}) =
\frac{\sum_i \sum_j I[s_i > s_j] I[y_i > y_j]}{\sum_i \sum_j I[y_i > y_j]}
$$
where $I[]$ is the indicator function:
$$
I[\text{cond}] = \begin{cases}
1 & \text{if cond is true}\\
0 & \text{else}\end{cases}
$$
"""
def __init__(self, name=None, dtype=None, ragged=False, **kwargs):
super(OPAMetric, self).__init__(name=name, dtype=dtype, ragged=ragged,
**kwargs)
self._metric = metrics_impl.OPAMetric(name=name, ragged=ragged)
|
{
"content_hash": "fd54574eb91e21a9db7a84823b7e6e75",
"timestamp": "",
"source": "github",
"line_count": 1051,
"max_line_length": 83,
"avg_line_length": 30.174119885823025,
"alnum_prop": 0.6039163749881752,
"repo_name": "tensorflow/ranking",
"id": "6dfb31f64f92c13eef08e5dd75528c4dd2cb2662",
"size": "32313",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_ranking/python/keras/metrics.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "42681"
},
{
"name": "Python",
"bytes": "1396830"
},
{
"name": "Shell",
"bytes": "2133"
},
{
"name": "Starlark",
"bytes": "30704"
}
],
"symlink_target": ""
}
|
"""A simple number and datetime addition JSON API.
Run the app:
$ python examples/flaskrestful_example.py
Try the following with httpie (a cURL-like utility, http://httpie.org):
$ pip install httpie
$ http GET :5001/
$ http GET :5001/ name==Ada
$ http POST :5001/add x=40 y=2
$ http POST :5001/dateadd value=1973-04-10 addend=63
$ http POST :5001/dateadd value=2014-10-23 addend=525600 unit=minutes
"""
import datetime as dt
from flask import Flask
from flask.ext import restful
from webargs import fields, ValidationError
from webargs.flaskparser import use_args, use_kwargs, parser
app = Flask(__name__)
api = restful.Api(app)
class IndexResource(restful.Resource):
"""A welcome page."""
hello_args = {
'name': fields.Str(missing='Friend')
}
@use_args(hello_args)
def get(self, args):
return {'message': 'Welcome, {}!'.format(args['name'])}
class AddResource(restful.Resource):
"""An addition endpoint."""
add_args = {
'x': fields.Float(required=True),
'y': fields.Float(required=True),
}
@use_kwargs(add_args)
def post(self, x, y):
"""An addition endpoint."""
return {'result': x + y}
def validate_unit(val):
if val not in ['minutes', 'days']:
raise ValidationError("Unit must be either 'minutes' or 'days'.")
class DateAddResource(restful.Resource):
dateadd_args = {
'value': fields.DateTime(required=False),
'addend': fields.Int(required=True, validate=lambda val: val >= 0),
'unit': fields.Str(validate=validate_unit)
}
@use_kwargs(dateadd_args)
def post(self, value, addend, unit):
"""A datetime adder endpoint."""
value = value or dt.datetime.utcnow()
if unit == 'minutes':
delta = dt.timedelta(minutes=addend)
else:
delta = dt.timedelta(days=addend)
result = value + delta
return {'result': result.isoformat()}
# This error handler is necessary for usage with Flask-RESTful
@parser.error_handler
def handle_request_parsing_error(err):
"""webargs error handler that uses Flask-RESTful's abort function to return
a JSON error response to the client.
"""
restful.abort(422, errors=err.messages)
if __name__ == '__main__':
api.add_resource(IndexResource, '/')
api.add_resource(AddResource, '/add')
api.add_resource(DateAddResource, '/dateadd')
app.run(port=5001, debug=True)
|
{
"content_hash": "4a0abf25133c9708e3f71d7accc78327",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 28.413793103448278,
"alnum_prop": 0.642799352750809,
"repo_name": "hyunchel/webargs",
"id": "fde08fd6d26efb0220f911371e05dc7061b439da",
"size": "2496",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "examples/flaskrestful_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "124189"
}
],
"symlink_target": ""
}
|
from __future__ import with_statement
import os
import sys
import re
import string
import inspect, traceback
# get type names
from types import *
from m5.util.grammar import Grammar
debug=False
###################
# Utility functions
#
# Indent every line in string 's' by two spaces
# (except preprocessor directives).
# Used to make nested code blocks look pretty.
#
def indent(s):
return re.sub(r'(?m)^(?!#)', ' ', s)
#
# Munge a somewhat arbitrarily formatted piece of Python code
# (e.g. from a format 'let' block) into something whose indentation
# will get by the Python parser.
#
# The two keys here are that Python will give a syntax error if
# there's any whitespace at the beginning of the first line, and that
# all lines at the same lexical nesting level must have identical
# indentation. Unfortunately the way code literals work, an entire
# let block tends to have some initial indentation. Rather than
# trying to figure out what that is and strip it off, we prepend 'if
# 1:' to make the let code the nested block inside the if (and have
# the parser automatically deal with the indentation for us).
#
# We don't want to do this if (1) the code block is empty or (2) the
# first line of the block doesn't have any whitespace at the front.
def fixPythonIndentation(s):
# get rid of blank lines first
s = re.sub(r'(?m)^\s*\n', '', s);
if (s != '' and re.match(r'[ \t]', s[0])):
s = 'if 1:\n' + s
return s
class ISAParserError(Exception):
"""Exception class for parser errors"""
def __init__(self, first, second=None):
if second is None:
self.lineno = 0
self.string = first
else:
self.lineno = first
self.string = second
def __str__(self):
return self.string
def error(*args):
raise ISAParserError(*args)
####################
# Template objects.
#
# Template objects are format strings that allow substitution from
# the attribute spaces of other objects (e.g. InstObjParams instances).
labelRE = re.compile(r'(?<!%)%\(([^\)]+)\)[sd]')
class Template(object):
def __init__(self, parser, t):
self.parser = parser
self.template = t
def subst(self, d):
myDict = None
# Protect non-Python-dict substitutions (e.g. if there's a printf
# in the templated C++ code)
template = self.parser.protectNonSubstPercents(self.template)
# CPU-model-specific substitutions are handled later (in GenCode).
template = self.parser.protectCpuSymbols(template)
# Build a dict ('myDict') to use for the template substitution.
# Start with the template namespace. Make a copy since we're
# going to modify it.
myDict = self.parser.templateMap.copy()
if isinstance(d, InstObjParams):
# If we're dealing with an InstObjParams object, we need
# to be a little more sophisticated. The instruction-wide
# parameters are already formed, but the parameters which
# are only function wide still need to be generated.
compositeCode = ''
myDict.update(d.__dict__)
# The "operands" and "snippets" attributes of the InstObjParams
# objects are for internal use and not substitution.
del myDict['operands']
del myDict['snippets']
snippetLabels = [l for l in labelRE.findall(template)
if d.snippets.has_key(l)]
snippets = dict([(s, self.parser.mungeSnippet(d.snippets[s]))
for s in snippetLabels])
myDict.update(snippets)
compositeCode = ' '.join(map(str, snippets.values()))
# Add in template itself in case it references any
# operands explicitly (like Mem)
compositeCode += ' ' + template
operands = SubOperandList(self.parser, compositeCode, d.operands)
myDict['op_decl'] = operands.concatAttrStrings('op_decl')
if operands.readPC or operands.setPC:
myDict['op_decl'] += 'TheISA::PCState __parserAutoPCState;\n'
# In case there are predicated register reads and write, declare
# the variables for register indicies. It is being assumed that
# all the operands in the OperandList are also in the
# SubOperandList and in the same order. Otherwise, it is
# expected that predication would not be used for the operands.
if operands.predRead:
myDict['op_decl'] += 'uint8_t _sourceIndex = 0;\n'
if operands.predWrite:
myDict['op_decl'] += 'uint8_t M5_VAR_USED _destIndex = 0;\n'
is_src = lambda op: op.is_src
is_dest = lambda op: op.is_dest
myDict['op_src_decl'] = \
operands.concatSomeAttrStrings(is_src, 'op_src_decl')
myDict['op_dest_decl'] = \
operands.concatSomeAttrStrings(is_dest, 'op_dest_decl')
if operands.readPC:
myDict['op_src_decl'] += \
'TheISA::PCState __parserAutoPCState;\n'
if operands.setPC:
myDict['op_dest_decl'] += \
'TheISA::PCState __parserAutoPCState;\n'
myDict['op_rd'] = operands.concatAttrStrings('op_rd')
if operands.readPC:
myDict['op_rd'] = '__parserAutoPCState = xc->pcState();\n' + \
myDict['op_rd']
# Compose the op_wb string. If we're going to write back the
# PC state because we changed some of its elements, we'll need to
# do that as early as possible. That allows later uncoordinated
# modifications to the PC to layer appropriately.
reordered = list(operands.items)
reordered.reverse()
op_wb_str = ''
pcWbStr = 'xc->pcState(__parserAutoPCState);\n'
for op_desc in reordered:
if op_desc.isPCPart() and op_desc.is_dest:
op_wb_str = op_desc.op_wb + pcWbStr + op_wb_str
pcWbStr = ''
else:
op_wb_str = op_desc.op_wb + op_wb_str
myDict['op_wb'] = op_wb_str
elif isinstance(d, dict):
# if the argument is a dictionary, we just use it.
myDict.update(d)
elif hasattr(d, '__dict__'):
# if the argument is an object, we use its attribute map.
myDict.update(d.__dict__)
else:
raise TypeError, "Template.subst() arg must be or have dictionary"
return template % myDict
# Convert to string. This handles the case when a template with a
# CPU-specific term gets interpolated into another template or into
# an output block.
def __str__(self):
return self.parser.expandCpuSymbolsToString(self.template)
################
# Format object.
#
# A format object encapsulates an instruction format. It must provide
# a defineInst() method that generates the code for an instruction
# definition.
class Format(object):
def __init__(self, id, params, code):
self.id = id
self.params = params
label = 'def format ' + id
self.user_code = compile(fixPythonIndentation(code), label, 'exec')
param_list = string.join(params, ", ")
f = '''def defInst(_code, _context, %s):
my_locals = vars().copy()
exec _code in _context, my_locals
return my_locals\n''' % param_list
c = compile(f, label + ' wrapper', 'exec')
exec c
self.func = defInst
def defineInst(self, parser, name, args, lineno):
parser.updateExportContext()
context = parser.exportContext.copy()
if len(name):
Name = name[0].upper()
if len(name) > 1:
Name += name[1:]
context.update({ 'name' : name, 'Name' : Name })
try:
vars = self.func(self.user_code, context, *args[0], **args[1])
except Exception, exc:
if debug:
raise
error(lineno, 'error defining "%s": %s.' % (name, exc))
for k in vars.keys():
if k not in ('header_output', 'decoder_output',
'exec_output', 'decode_block'):
del vars[k]
return GenCode(parser, **vars)
# Special null format to catch an implicit-format instruction
# definition outside of any format block.
class NoFormat(object):
def __init__(self):
self.defaultInst = ''
def defineInst(self, parser, name, args, lineno):
error(lineno,
'instruction definition "%s" with no active format!' % name)
###############
# GenCode class
#
# The GenCode class encapsulates generated code destined for various
# output files. The header_output and decoder_output attributes are
# strings containing code destined for decoder.hh and decoder.cc
# respectively. The decode_block attribute contains code to be
# incorporated in the decode function itself (that will also end up in
# decoder.cc). The exec_output attribute is a dictionary with a key
# for each CPU model name; the value associated with a particular key
# is the string of code for that CPU model's exec.cc file. The
# has_decode_default attribute is used in the decode block to allow
# explicit default clauses to override default default clauses.
class GenCode(object):
# Constructor. At this point we substitute out all CPU-specific
# symbols. For the exec output, these go into the per-model
# dictionary. For all other output types they get collapsed into
# a single string.
def __init__(self, parser,
header_output = '', decoder_output = '', exec_output = '',
decode_block = '', has_decode_default = False):
self.parser = parser
self.header_output = parser.expandCpuSymbolsToString(header_output)
self.decoder_output = parser.expandCpuSymbolsToString(decoder_output)
self.exec_output = exec_output
self.decode_block = decode_block
self.has_decode_default = has_decode_default
# Write these code chunks out to the filesystem. They will be properly
# interwoven by the write_top_level_files().
def emit(self):
if self.header_output:
self.parser.get_file('header').write(self.header_output)
if self.decoder_output:
self.parser.get_file('decoder').write(self.decoder_output)
if self.exec_output:
self.parser.get_file('exec').write(self.exec_output)
if self.decode_block:
self.parser.get_file('decode_block').write(self.decode_block)
# Override '+' operator: generate a new GenCode object that
# concatenates all the individual strings in the operands.
def __add__(self, other):
return GenCode(self.parser,
self.header_output + other.header_output,
self.decoder_output + other.decoder_output,
self.exec_output + other.exec_output,
self.decode_block + other.decode_block,
self.has_decode_default or other.has_decode_default)
# Prepend a string (typically a comment) to all the strings.
def prepend_all(self, pre):
self.header_output = pre + self.header_output
self.decoder_output = pre + self.decoder_output
self.decode_block = pre + self.decode_block
self.exec_output = pre + self.exec_output
# Wrap the decode block in a pair of strings (e.g., 'case foo:'
# and 'break;'). Used to build the big nested switch statement.
def wrap_decode_block(self, pre, post = ''):
self.decode_block = pre + indent(self.decode_block) + post
#####################################################################
#
# Bitfield Operator Support
#
#####################################################################
bitOp1ArgRE = re.compile(r'<\s*(\w+)\s*:\s*>')
bitOpWordRE = re.compile(r'(?<![\w\.])([\w\.]+)<\s*(\w+)\s*:\s*(\w+)\s*>')
bitOpExprRE = re.compile(r'\)<\s*(\w+)\s*:\s*(\w+)\s*>')
def substBitOps(code):
# first convert single-bit selectors to two-index form
# i.e., <n> --> <n:n>
code = bitOp1ArgRE.sub(r'<\1:\1>', code)
# simple case: selector applied to ID (name)
# i.e., foo<a:b> --> bits(foo, a, b)
code = bitOpWordRE.sub(r'bits(\1, \2, \3)', code)
# if selector is applied to expression (ending in ')'),
# we need to search backward for matching '('
match = bitOpExprRE.search(code)
while match:
exprEnd = match.start()
here = exprEnd - 1
nestLevel = 1
while nestLevel > 0:
if code[here] == '(':
nestLevel -= 1
elif code[here] == ')':
nestLevel += 1
here -= 1
if here < 0:
sys.exit("Didn't find '('!")
exprStart = here+1
newExpr = r'bits(%s, %s, %s)' % (code[exprStart:exprEnd+1],
match.group(1), match.group(2))
code = code[:exprStart] + newExpr + code[match.end():]
match = bitOpExprRE.search(code)
return code
#####################################################################
#
# Code Parser
#
# The remaining code is the support for automatically extracting
# instruction characteristics from pseudocode.
#
#####################################################################
# Force the argument to be a list. Useful for flags, where a caller
# can specify a singleton flag or a list of flags. Also usful for
# converting tuples to lists so they can be modified.
def makeList(arg):
if isinstance(arg, list):
return arg
elif isinstance(arg, tuple):
return list(arg)
elif not arg:
return []
else:
return [ arg ]
class Operand(object):
'''Base class for operand descriptors. An instance of this class
(or actually a class derived from this one) represents a specific
operand for a code block (e.g, "Rc.sq" as a dest). Intermediate
derived classes encapsulates the traits of a particular operand
type (e.g., "32-bit integer register").'''
def buildReadCode(self, func = None):
subst_dict = {"name": self.base_name,
"func": func,
"reg_idx": self.reg_spec,
"ctype": self.ctype}
if hasattr(self, 'src_reg_idx'):
subst_dict['op_idx'] = self.src_reg_idx
code = self.read_code % subst_dict
return '%s = %s;\n' % (self.base_name, code)
def buildWriteCode(self, func = None):
subst_dict = {"name": self.base_name,
"func": func,
"reg_idx": self.reg_spec,
"ctype": self.ctype,
"final_val": self.base_name}
if hasattr(self, 'dest_reg_idx'):
subst_dict['op_idx'] = self.dest_reg_idx
code = self.write_code % subst_dict
return '''
{
%s final_val = %s;
%s;
if (traceData) { traceData->setData(final_val); }
}''' % (self.dflt_ctype, self.base_name, code)
def __init__(self, parser, full_name, ext, is_src, is_dest):
self.full_name = full_name
self.ext = ext
self.is_src = is_src
self.is_dest = is_dest
# The 'effective extension' (eff_ext) is either the actual
# extension, if one was explicitly provided, or the default.
if ext:
self.eff_ext = ext
elif hasattr(self, 'dflt_ext'):
self.eff_ext = self.dflt_ext
if hasattr(self, 'eff_ext'):
self.ctype = parser.operandTypeMap[self.eff_ext]
# Finalize additional fields (primarily code fields). This step
# is done separately since some of these fields may depend on the
# register index enumeration that hasn't been performed yet at the
# time of __init__(). The register index enumeration is affected
# by predicated register reads/writes. Hence, we forward the flags
# that indicate whether or not predication is in use.
def finalize(self, predRead, predWrite):
self.flags = self.getFlags()
self.constructor = self.makeConstructor(predRead, predWrite)
self.op_decl = self.makeDecl()
if self.is_src:
self.op_rd = self.makeRead(predRead)
self.op_src_decl = self.makeDecl()
else:
self.op_rd = ''
self.op_src_decl = ''
if self.is_dest:
self.op_wb = self.makeWrite(predWrite)
self.op_dest_decl = self.makeDecl()
else:
self.op_wb = ''
self.op_dest_decl = ''
def isMem(self):
return 0
def isReg(self):
return 0
def isFloatReg(self):
return 0
def isIntReg(self):
return 0
def isCCReg(self):
return 0
def isControlReg(self):
return 0
def isPCState(self):
return 0
def isPCPart(self):
return self.isPCState() and self.reg_spec
def hasReadPred(self):
return self.read_predicate != None
def hasWritePred(self):
return self.write_predicate != None
def getFlags(self):
# note the empty slice '[:]' gives us a copy of self.flags[0]
# instead of a reference to it
my_flags = self.flags[0][:]
if self.is_src:
my_flags += self.flags[1]
if self.is_dest:
my_flags += self.flags[2]
return my_flags
def makeDecl(self):
# Note that initializations in the declarations are solely
# to avoid 'uninitialized variable' errors from the compiler.
return self.ctype + ' ' + self.base_name + ' = 0;\n';
class IntRegOperand(Operand):
def isReg(self):
return 1
def isIntReg(self):
return 1
def makeConstructor(self, predRead, predWrite):
c_src = ''
c_dest = ''
if self.is_src:
c_src = '\n\t_srcRegIdx[_numSrcRegs++] = %s;' % (self.reg_spec)
if self.hasReadPred():
c_src = '\n\tif (%s) {%s\n\t}' % \
(self.read_predicate, c_src)
if self.is_dest:
c_dest = '\n\t_destRegIdx[_numDestRegs++] = %s;' % \
(self.reg_spec)
c_dest += '\n\t_numIntDestRegs++;'
if self.hasWritePred():
c_dest = '\n\tif (%s) {%s\n\t}' % \
(self.write_predicate, c_dest)
return c_src + c_dest
def makeRead(self, predRead):
if (self.ctype == 'float' or self.ctype == 'double'):
error('Attempt to read integer register as FP')
if self.read_code != None:
return self.buildReadCode('readIntRegOperand')
int_reg_val = ''
if predRead:
int_reg_val = 'xc->readIntRegOperand(this, _sourceIndex++)'
if self.hasReadPred():
int_reg_val = '(%s) ? %s : 0' % \
(self.read_predicate, int_reg_val)
else:
int_reg_val = 'xc->readIntRegOperand(this, %d)' % self.src_reg_idx
return '%s = %s;\n' % (self.base_name, int_reg_val)
def makeWrite(self, predWrite):
if (self.ctype == 'float' or self.ctype == 'double'):
error('Attempt to write integer register as FP')
if self.write_code != None:
return self.buildWriteCode('setIntRegOperand')
if predWrite:
wp = 'true'
if self.hasWritePred():
wp = self.write_predicate
wcond = 'if (%s)' % (wp)
windex = '_destIndex++'
else:
wcond = ''
windex = '%d' % self.dest_reg_idx
wb = '''
%s
{
%s final_val = %s;
xc->setIntRegOperand(this, %s, final_val);\n
if (traceData) { traceData->setData(final_val); }
}''' % (wcond, self.ctype, self.base_name, windex)
return wb
class FloatRegOperand(Operand):
def isReg(self):
return 1
def isFloatReg(self):
return 1
def makeConstructor(self, predRead, predWrite):
c_src = ''
c_dest = ''
if self.is_src:
c_src = '\n\t_srcRegIdx[_numSrcRegs++] = %s + FP_Reg_Base;' % \
(self.reg_spec)
if self.is_dest:
c_dest = \
'\n\t_destRegIdx[_numDestRegs++] = %s + FP_Reg_Base;' % \
(self.reg_spec)
c_dest += '\n\t_numFPDestRegs++;'
return c_src + c_dest
def makeRead(self, predRead):
bit_select = 0
if (self.ctype == 'float' or self.ctype == 'double'):
func = 'readFloatRegOperand'
else:
func = 'readFloatRegOperandBits'
if self.read_code != None:
return self.buildReadCode(func)
if predRead:
rindex = '_sourceIndex++'
else:
rindex = '%d' % self.src_reg_idx
return '%s = xc->%s(this, %s);\n' % \
(self.base_name, func, rindex)
def makeWrite(self, predWrite):
if (self.ctype == 'float' or self.ctype == 'double'):
func = 'setFloatRegOperand'
else:
func = 'setFloatRegOperandBits'
if self.write_code != None:
return self.buildWriteCode(func)
if predWrite:
wp = '_destIndex++'
else:
wp = '%d' % self.dest_reg_idx
wp = 'xc->%s(this, %s, final_val);' % (func, wp)
wb = '''
{
%s final_val = %s;
%s\n
if (traceData) { traceData->setData(final_val); }
}''' % (self.ctype, self.base_name, wp)
return wb
class CCRegOperand(Operand):
def isReg(self):
return 1
def isCCReg(self):
return 1
def makeConstructor(self, predRead, predWrite):
c_src = ''
c_dest = ''
if self.is_src:
c_src = '\n\t_srcRegIdx[_numSrcRegs++] = %s + CC_Reg_Base;' % \
(self.reg_spec)
if self.hasReadPred():
c_src = '\n\tif (%s) {%s\n\t}' % \
(self.read_predicate, c_src)
if self.is_dest:
c_dest = \
'\n\t_destRegIdx[_numDestRegs++] = %s + CC_Reg_Base;' % \
(self.reg_spec)
c_dest += '\n\t_numCCDestRegs++;'
if self.hasWritePred():
c_dest = '\n\tif (%s) {%s\n\t}' % \
(self.write_predicate, c_dest)
return c_src + c_dest
def makeRead(self, predRead):
if (self.ctype == 'float' or self.ctype == 'double'):
error('Attempt to read condition-code register as FP')
if self.read_code != None:
return self.buildReadCode('readCCRegOperand')
int_reg_val = ''
if predRead:
int_reg_val = 'xc->readCCRegOperand(this, _sourceIndex++)'
if self.hasReadPred():
int_reg_val = '(%s) ? %s : 0' % \
(self.read_predicate, int_reg_val)
else:
int_reg_val = 'xc->readCCRegOperand(this, %d)' % self.src_reg_idx
return '%s = %s;\n' % (self.base_name, int_reg_val)
def makeWrite(self, predWrite):
if (self.ctype == 'float' or self.ctype == 'double'):
error('Attempt to write condition-code register as FP')
if self.write_code != None:
return self.buildWriteCode('setCCRegOperand')
if predWrite:
wp = 'true'
if self.hasWritePred():
wp = self.write_predicate
wcond = 'if (%s)' % (wp)
windex = '_destIndex++'
else:
wcond = ''
windex = '%d' % self.dest_reg_idx
wb = '''
%s
{
%s final_val = %s;
xc->setCCRegOperand(this, %s, final_val);\n
if (traceData) { traceData->setData(final_val); }
}''' % (wcond, self.ctype, self.base_name, windex)
return wb
class ControlRegOperand(Operand):
def isReg(self):
return 1
def isControlReg(self):
return 1
def makeConstructor(self, predRead, predWrite):
c_src = ''
c_dest = ''
if self.is_src:
c_src = \
'\n\t_srcRegIdx[_numSrcRegs++] = %s + Misc_Reg_Base;' % \
(self.reg_spec)
if self.is_dest:
c_dest = \
'\n\t_destRegIdx[_numDestRegs++] = %s + Misc_Reg_Base;' % \
(self.reg_spec)
return c_src + c_dest
def makeRead(self, predRead):
bit_select = 0
if (self.ctype == 'float' or self.ctype == 'double'):
error('Attempt to read control register as FP')
if self.read_code != None:
return self.buildReadCode('readMiscRegOperand')
if predRead:
rindex = '_sourceIndex++'
else:
rindex = '%d' % self.src_reg_idx
return '%s = xc->readMiscRegOperand(this, %s);\n' % \
(self.base_name, rindex)
def makeWrite(self, predWrite):
if (self.ctype == 'float' or self.ctype == 'double'):
error('Attempt to write control register as FP')
if self.write_code != None:
return self.buildWriteCode('setMiscRegOperand')
if predWrite:
windex = '_destIndex++'
else:
windex = '%d' % self.dest_reg_idx
wb = 'xc->setMiscRegOperand(this, %s, %s);\n' % \
(windex, self.base_name)
wb += 'if (traceData) { traceData->setData(%s); }' % \
self.base_name
return wb
class MemOperand(Operand):
def isMem(self):
return 1
def makeConstructor(self, predRead, predWrite):
return ''
def makeDecl(self):
# Declare memory data variable.
return '%s %s;\n' % (self.ctype, self.base_name)
def makeRead(self, predRead):
if self.read_code != None:
return self.buildReadCode()
return ''
def makeWrite(self, predWrite):
if self.write_code != None:
return self.buildWriteCode()
return ''
class PCStateOperand(Operand):
def makeConstructor(self, predRead, predWrite):
return ''
def makeRead(self, predRead):
if self.reg_spec:
# A component of the PC state.
return '%s = __parserAutoPCState.%s();\n' % \
(self.base_name, self.reg_spec)
else:
# The whole PC state itself.
return '%s = xc->pcState();\n' % self.base_name
def makeWrite(self, predWrite):
if self.reg_spec:
# A component of the PC state.
return '__parserAutoPCState.%s(%s);\n' % \
(self.reg_spec, self.base_name)
else:
# The whole PC state itself.
return 'xc->pcState(%s);\n' % self.base_name
def makeDecl(self):
ctype = 'TheISA::PCState'
if self.isPCPart():
ctype = self.ctype
# Note that initializations in the declarations are solely
# to avoid 'uninitialized variable' errors from the compiler.
return '%s %s = 0;\n' % (ctype, self.base_name)
def isPCState(self):
return 1
class OperandList(object):
'''Find all the operands in the given code block. Returns an operand
descriptor list (instance of class OperandList).'''
def __init__(self, parser, code):
self.items = []
self.bases = {}
# delete strings and comments so we don't match on operands inside
for regEx in (stringRE, commentRE):
code = regEx.sub('', code)
# search for operands
next_pos = 0
while 1:
match = parser.operandsRE.search(code, next_pos)
if not match:
# no more matches: we're done
break
op = match.groups()
# regexp groups are operand full name, base, and extension
(op_full, op_base, op_ext) = op
# if the token following the operand is an assignment, this is
# a destination (LHS), else it's a source (RHS)
is_dest = (assignRE.match(code, match.end()) != None)
is_src = not is_dest
# see if we've already seen this one
op_desc = self.find_base(op_base)
if op_desc:
if op_desc.ext != op_ext:
error('Inconsistent extensions for operand %s' % \
op_base)
op_desc.is_src = op_desc.is_src or is_src
op_desc.is_dest = op_desc.is_dest or is_dest
else:
# new operand: create new descriptor
op_desc = parser.operandNameMap[op_base](parser,
op_full, op_ext, is_src, is_dest)
self.append(op_desc)
# start next search after end of current match
next_pos = match.end()
self.sort()
# enumerate source & dest register operands... used in building
# constructor later
self.numSrcRegs = 0
self.numDestRegs = 0
self.numFPDestRegs = 0
self.numIntDestRegs = 0
self.numCCDestRegs = 0
self.numMiscDestRegs = 0
self.memOperand = None
# Flags to keep track if one or more operands are to be read/written
# conditionally.
self.predRead = False
self.predWrite = False
for op_desc in self.items:
if op_desc.isReg():
if op_desc.is_src:
op_desc.src_reg_idx = self.numSrcRegs
self.numSrcRegs += 1
if op_desc.is_dest:
op_desc.dest_reg_idx = self.numDestRegs
self.numDestRegs += 1
if op_desc.isFloatReg():
self.numFPDestRegs += 1
elif op_desc.isIntReg():
self.numIntDestRegs += 1
elif op_desc.isCCReg():
self.numCCDestRegs += 1
elif op_desc.isControlReg():
self.numMiscDestRegs += 1
elif op_desc.isMem():
if self.memOperand:
error("Code block has more than one memory operand.")
self.memOperand = op_desc
# Check if this operand has read/write predication. If true, then
# the microop will dynamically index source/dest registers.
self.predRead = self.predRead or op_desc.hasReadPred()
self.predWrite = self.predWrite or op_desc.hasWritePred()
if parser.maxInstSrcRegs < self.numSrcRegs:
parser.maxInstSrcRegs = self.numSrcRegs
if parser.maxInstDestRegs < self.numDestRegs:
parser.maxInstDestRegs = self.numDestRegs
if parser.maxMiscDestRegs < self.numMiscDestRegs:
parser.maxMiscDestRegs = self.numMiscDestRegs
# now make a final pass to finalize op_desc fields that may depend
# on the register enumeration
for op_desc in self.items:
op_desc.finalize(self.predRead, self.predWrite)
def __len__(self):
return len(self.items)
def __getitem__(self, index):
return self.items[index]
def append(self, op_desc):
self.items.append(op_desc)
self.bases[op_desc.base_name] = op_desc
def find_base(self, base_name):
# like self.bases[base_name], but returns None if not found
# (rather than raising exception)
return self.bases.get(base_name)
# internal helper function for concat[Some]Attr{Strings|Lists}
def __internalConcatAttrs(self, attr_name, filter, result):
for op_desc in self.items:
if filter(op_desc):
result += getattr(op_desc, attr_name)
return result
# return a single string that is the concatenation of the (string)
# values of the specified attribute for all operands
def concatAttrStrings(self, attr_name):
return self.__internalConcatAttrs(attr_name, lambda x: 1, '')
# like concatAttrStrings, but only include the values for the operands
# for which the provided filter function returns true
def concatSomeAttrStrings(self, filter, attr_name):
return self.__internalConcatAttrs(attr_name, filter, '')
# return a single list that is the concatenation of the (list)
# values of the specified attribute for all operands
def concatAttrLists(self, attr_name):
return self.__internalConcatAttrs(attr_name, lambda x: 1, [])
# like concatAttrLists, but only include the values for the operands
# for which the provided filter function returns true
def concatSomeAttrLists(self, filter, attr_name):
return self.__internalConcatAttrs(attr_name, filter, [])
def sort(self):
self.items.sort(lambda a, b: a.sort_pri - b.sort_pri)
class SubOperandList(OperandList):
'''Find all the operands in the given code block. Returns an operand
descriptor list (instance of class OperandList).'''
def __init__(self, parser, code, master_list):
self.items = []
self.bases = {}
# delete strings and comments so we don't match on operands inside
for regEx in (stringRE, commentRE):
code = regEx.sub('', code)
# search for operands
next_pos = 0
while 1:
match = parser.operandsRE.search(code, next_pos)
if not match:
# no more matches: we're done
break
op = match.groups()
# regexp groups are operand full name, base, and extension
(op_full, op_base, op_ext) = op
# find this op in the master list
op_desc = master_list.find_base(op_base)
if not op_desc:
error('Found operand %s which is not in the master list!'
% op_base)
else:
# See if we've already found this operand
op_desc = self.find_base(op_base)
if not op_desc:
# if not, add a reference to it to this sub list
self.append(master_list.bases[op_base])
# start next search after end of current match
next_pos = match.end()
self.sort()
self.memOperand = None
# Whether the whole PC needs to be read so parts of it can be accessed
self.readPC = False
# Whether the whole PC needs to be written after parts of it were
# changed
self.setPC = False
# Whether this instruction manipulates the whole PC or parts of it.
# Mixing the two is a bad idea and flagged as an error.
self.pcPart = None
# Flags to keep track if one or more operands are to be read/written
# conditionally.
self.predRead = False
self.predWrite = False
for op_desc in self.items:
if op_desc.isPCPart():
self.readPC = True
if op_desc.is_dest:
self.setPC = True
if op_desc.isPCState():
if self.pcPart is not None:
if self.pcPart and not op_desc.isPCPart() or \
not self.pcPart and op_desc.isPCPart():
error("Mixed whole and partial PC state operands.")
self.pcPart = op_desc.isPCPart()
if op_desc.isMem():
if self.memOperand:
error("Code block has more than one memory operand.")
self.memOperand = op_desc
# Check if this operand has read/write predication. If true, then
# the microop will dynamically index source/dest registers.
self.predRead = self.predRead or op_desc.hasReadPred()
self.predWrite = self.predWrite or op_desc.hasWritePred()
# Regular expression object to match C++ strings
stringRE = re.compile(r'"([^"\\]|\\.)*"')
# Regular expression object to match C++ comments
# (used in findOperands())
commentRE = re.compile(r'(^)?[^\S\n]*/(?:\*(.*?)\*/[^\S\n]*|/[^\n]*)($)?',
re.DOTALL | re.MULTILINE)
# Regular expression object to match assignment statements (used in
# findOperands()). If the code immediately following the first
# appearance of the operand matches this regex, then the operand
# appears to be on the LHS of an assignment, and is thus a
# destination. basically we're looking for an '=' that's not '=='.
# The heinous tangle before that handles the case where the operand
# has an array subscript.
assignRE = re.compile(r'(\[[^\]]+\])?\s*=(?!=)', re.MULTILINE)
def makeFlagConstructor(flag_list):
if len(flag_list) == 0:
return ''
# filter out repeated flags
flag_list.sort()
i = 1
while i < len(flag_list):
if flag_list[i] == flag_list[i-1]:
del flag_list[i]
else:
i += 1
pre = '\n\tflags['
post = '] = true;'
code = pre + string.join(flag_list, post + pre) + post
return code
# Assume all instruction flags are of the form 'IsFoo'
instFlagRE = re.compile(r'Is.*')
# OpClass constants end in 'Op' except No_OpClass
opClassRE = re.compile(r'.*Op|No_OpClass')
class InstObjParams(object):
def __init__(self, parser, mnem, class_name, base_class = '',
snippets = {}, opt_args = []):
self.mnemonic = mnem
self.class_name = class_name
self.base_class = base_class
if not isinstance(snippets, dict):
snippets = {'code' : snippets}
compositeCode = ' '.join(map(str, snippets.values()))
self.snippets = snippets
self.operands = OperandList(parser, compositeCode)
# The header of the constructor declares the variables to be used
# in the body of the constructor.
header = ''
header += '\n\t_numSrcRegs = 0;'
header += '\n\t_numDestRegs = 0;'
header += '\n\t_numFPDestRegs = 0;'
header += '\n\t_numIntDestRegs = 0;'
header += '\n\t_numCCDestRegs = 0;'
self.constructor = header + \
self.operands.concatAttrStrings('constructor')
self.flags = self.operands.concatAttrLists('flags')
self.op_class = None
# Optional arguments are assumed to be either StaticInst flags
# or an OpClass value. To avoid having to import a complete
# list of these values to match against, we do it ad-hoc
# with regexps.
for oa in opt_args:
if instFlagRE.match(oa):
self.flags.append(oa)
elif opClassRE.match(oa):
self.op_class = oa
else:
error('InstObjParams: optional arg "%s" not recognized '
'as StaticInst::Flag or OpClass.' % oa)
# Make a basic guess on the operand class if not set.
# These are good enough for most cases.
if not self.op_class:
if 'IsStore' in self.flags:
# The order matters here: 'IsFloating' and 'IsInteger' are
# usually set in FP instructions because of the base
# register
if 'IsFloating' in self.flags:
self.op_class = 'FloatMemWriteOp'
else:
self.op_class = 'MemWriteOp'
elif 'IsLoad' in self.flags or 'IsPrefetch' in self.flags:
# The order matters here: 'IsFloating' and 'IsInteger' are
# usually set in FP instructions because of the base
# register
if 'IsFloating' in self.flags:
self.op_class = 'FloatMemReadOp'
else:
self.op_class = 'MemReadOp'
elif 'IsFloating' in self.flags:
self.op_class = 'FloatAddOp'
else:
self.op_class = 'IntAluOp'
# add flag initialization to contructor here to include
# any flags added via opt_args
self.constructor += makeFlagConstructor(self.flags)
# if 'IsFloating' is set, add call to the FP enable check
# function (which should be provided by isa_desc via a declare)
if 'IsFloating' in self.flags:
self.fp_enable_check = 'fault = checkFpEnableFault(xc);'
else:
self.fp_enable_check = ''
##############
# Stack: a simple stack object. Used for both formats (formatStack)
# and default cases (defaultStack). Simply wraps a list to give more
# stack-like syntax and enable initialization with an argument list
# (as opposed to an argument that's a list).
class Stack(list):
def __init__(self, *items):
list.__init__(self, items)
def push(self, item):
self.append(item);
def top(self):
return self[-1]
# Format a file include stack backtrace as a string
def backtrace(filename_stack):
fmt = "In file included from %s:"
return "\n".join([fmt % f for f in filename_stack])
#######################
#
# LineTracker: track filenames along with line numbers in PLY lineno fields
# PLY explicitly doesn't do anything with 'lineno' except propagate
# it. This class lets us tie filenames with the line numbers with a
# minimum of disruption to existing increment code.
#
class LineTracker(object):
def __init__(self, filename, lineno=1):
self.filename = filename
self.lineno = lineno
# Overload '+=' for increments. We need to create a new object on
# each update else every token ends up referencing the same
# constantly incrementing instance.
def __iadd__(self, incr):
return LineTracker(self.filename, self.lineno + incr)
def __str__(self):
return "%s:%d" % (self.filename, self.lineno)
# In case there are places where someone really expects a number
def __int__(self):
return self.lineno
#######################
#
# ISA Parser
# parses ISA DSL and emits C++ headers and source
#
class ISAParser(Grammar):
class CpuModel(object):
def __init__(self, name, filename, includes, strings):
self.name = name
self.filename = filename
self.includes = includes
self.strings = strings
def __init__(self, output_dir):
super(ISAParser, self).__init__()
self.output_dir = output_dir
self.filename = None # for output file watermarking/scaremongering
self.cpuModels = [
ISAParser.CpuModel('ExecContext',
'generic_cpu_exec.cc',
'#include "cpu/exec_context.hh"',
{ "CPU_exec_context" : "ExecContext" }),
]
# variable to hold templates
self.templateMap = {}
# This dictionary maps format name strings to Format objects.
self.formatMap = {}
# Track open files and, if applicable, how many chunks it has been
# split into so far.
self.files = {}
self.splits = {}
# isa_name / namespace identifier from namespace declaration.
# before the namespace declaration, None.
self.isa_name = None
self.namespace = None
# The format stack.
self.formatStack = Stack(NoFormat())
# The default case stack.
self.defaultStack = Stack(None)
# Stack that tracks current file and line number. Each
# element is a tuple (filename, lineno) that records the
# *current* filename and the line number in the *previous*
# file where it was included.
self.fileNameStack = Stack()
symbols = ('makeList', 're', 'string')
self.exportContext = dict([(s, eval(s)) for s in symbols])
self.maxInstSrcRegs = 0
self.maxInstDestRegs = 0
self.maxMiscDestRegs = 0
def __getitem__(self, i): # Allow object (self) to be
return getattr(self, i) # passed to %-substitutions
# Change the file suffix of a base filename:
# (e.g.) decoder.cc -> decoder-g.cc.inc for 'global' outputs
def suffixize(self, s, sec):
extn = re.compile('(\.[^\.]+)$') # isolate extension
if self.namespace:
return extn.sub(r'-ns\1.inc', s) # insert some text on either side
else:
return extn.sub(r'-g\1.inc', s)
# Get the file object for emitting code into the specified section
# (header, decoder, exec, decode_block).
def get_file(self, section):
if section == 'decode_block':
filename = 'decode-method.cc.inc'
else:
if section == 'header':
file = 'decoder.hh'
else:
file = '%s.cc' % section
filename = self.suffixize(file, section)
try:
return self.files[filename]
except KeyError: pass
f = self.open(filename)
self.files[filename] = f
# The splittable files are the ones with many independent
# per-instruction functions - the decoder's instruction constructors
# and the instruction execution (execute()) methods. These both have
# the suffix -ns.cc.inc, meaning they are within the namespace part
# of the ISA, contain object-emitting C++ source, and are included
# into other top-level files. These are the files that need special
# #define's to allow parts of them to be compiled separately. Rather
# than splitting the emissions into separate files, the monolithic
# output of the ISA parser is maintained, but the value (or lack
# thereof) of the __SPLIT definition during C preprocessing will
# select the different chunks. If no 'split' directives are used,
# the cpp emissions have no effect.
if re.search('-ns.cc.inc$', filename):
print >>f, '#if !defined(__SPLIT) || (__SPLIT == 1)'
self.splits[f] = 1
# ensure requisite #include's
elif filename in ['decoder-g.cc.inc', 'exec-g.cc.inc']:
print >>f, '#include "decoder.hh"'
elif filename == 'decoder-g.hh.inc':
print >>f, '#include "base/bitfield.hh"'
return f
# Weave together the parts of the different output sections by
# #include'ing them into some very short top-level .cc/.hh files.
# These small files make it much clearer how this tool works, since
# you directly see the chunks emitted as files that are #include'd.
def write_top_level_files(self):
dep = self.open('inc.d', bare=True)
# decoder header - everything depends on this
file = 'decoder.hh'
with self.open(file) as f:
inc = []
fn = 'decoder-g.hh.inc'
assert(fn in self.files)
f.write('#include "%s"\n' % fn)
inc.append(fn)
fn = 'decoder-ns.hh.inc'
assert(fn in self.files)
f.write('namespace %s {\n#include "%s"\n}\n'
% (self.namespace, fn))
inc.append(fn)
print >>dep, file+':', ' '.join(inc)
# decoder method - cannot be split
file = 'decoder.cc'
with self.open(file) as f:
inc = []
fn = 'decoder-g.cc.inc'
assert(fn in self.files)
f.write('#include "%s"\n' % fn)
inc.append(fn)
fn = 'decode-method.cc.inc'
# is guaranteed to have been written for parse to complete
f.write('#include "%s"\n' % fn)
inc.append(fn)
inc.append("decoder.hh")
print >>dep, file+':', ' '.join(inc)
extn = re.compile('(\.[^\.]+)$')
# instruction constructors
splits = self.splits[self.get_file('decoder')]
file_ = 'inst-constrs.cc'
for i in range(1, splits+1):
if splits > 1:
file = extn.sub(r'-%d\1' % i, file_)
else:
file = file_
with self.open(file) as f:
inc = []
fn = 'decoder-g.cc.inc'
assert(fn in self.files)
f.write('#include "%s"\n' % fn)
inc.append(fn)
fn = 'decoder-ns.cc.inc'
assert(fn in self.files)
print >>f, 'namespace %s {' % self.namespace
if splits > 1:
print >>f, '#define __SPLIT %u' % i
print >>f, '#include "%s"' % fn
print >>f, '}'
inc.append(fn)
inc.append("decoder.hh")
print >>dep, file+':', ' '.join(inc)
# instruction execution per-CPU model
splits = self.splits[self.get_file('exec')]
for cpu in self.cpuModels:
for i in range(1, splits+1):
if splits > 1:
file = extn.sub(r'_%d\1' % i, cpu.filename)
else:
file = cpu.filename
with self.open(file) as f:
inc = []
fn = 'exec-g.cc.inc'
assert(fn in self.files)
f.write('#include "%s"\n' % fn)
inc.append(fn)
f.write(cpu.includes+"\n")
fn = 'exec-ns.cc.inc'
assert(fn in self.files)
print >>f, 'namespace %s {' % self.namespace
print >>f, '#define CPU_EXEC_CONTEXT %s' \
% cpu.strings['CPU_exec_context']
if splits > 1:
print >>f, '#define __SPLIT %u' % i
print >>f, '#include "%s"' % fn
print >>f, '}'
inc.append(fn)
inc.append("decoder.hh")
print >>dep, file+':', ' '.join(inc)
# max_inst_regs.hh
self.update('max_inst_regs.hh',
'''namespace %(namespace)s {
const int MaxInstSrcRegs = %(maxInstSrcRegs)d;
const int MaxInstDestRegs = %(maxInstDestRegs)d;
const int MaxMiscDestRegs = %(maxMiscDestRegs)d;\n}\n''' % self)
print >>dep, 'max_inst_regs.hh:'
dep.close()
scaremonger_template ='''// DO NOT EDIT
// This file was automatically generated from an ISA description:
// %(filename)s
''';
#####################################################################
#
# Lexer
#
# The PLY lexer module takes two things as input:
# - A list of token names (the string list 'tokens')
# - A regular expression describing a match for each token. The
# regexp for token FOO can be provided in two ways:
# - as a string variable named t_FOO
# - as the doc string for a function named t_FOO. In this case,
# the function is also executed, allowing an action to be
# associated with each token match.
#
#####################################################################
# Reserved words. These are listed separately as they are matched
# using the same regexp as generic IDs, but distinguished in the
# t_ID() function. The PLY documentation suggests this approach.
reserved = (
'BITFIELD', 'DECODE', 'DECODER', 'DEFAULT', 'DEF', 'EXEC', 'FORMAT',
'HEADER', 'LET', 'NAMESPACE', 'OPERAND_TYPES', 'OPERANDS',
'OUTPUT', 'SIGNED', 'SPLIT', 'TEMPLATE'
)
# List of tokens. The lex module requires this.
tokens = reserved + (
# identifier
'ID',
# integer literal
'INTLIT',
# string literal
'STRLIT',
# code literal
'CODELIT',
# ( ) [ ] { } < > , ; . : :: *
'LPAREN', 'RPAREN',
'LBRACKET', 'RBRACKET',
'LBRACE', 'RBRACE',
'LESS', 'GREATER', 'EQUALS',
'COMMA', 'SEMI', 'DOT', 'COLON', 'DBLCOLON',
'ASTERISK',
# C preprocessor directives
'CPPDIRECTIVE'
# The following are matched but never returned. commented out to
# suppress PLY warning
# newfile directive
# 'NEWFILE',
# endfile directive
# 'ENDFILE'
)
# Regular expressions for token matching
t_LPAREN = r'\('
t_RPAREN = r'\)'
t_LBRACKET = r'\['
t_RBRACKET = r'\]'
t_LBRACE = r'\{'
t_RBRACE = r'\}'
t_LESS = r'\<'
t_GREATER = r'\>'
t_EQUALS = r'='
t_COMMA = r','
t_SEMI = r';'
t_DOT = r'\.'
t_COLON = r':'
t_DBLCOLON = r'::'
t_ASTERISK = r'\*'
# Identifiers and reserved words
reserved_map = { }
for r in reserved:
reserved_map[r.lower()] = r
def t_ID(self, t):
r'[A-Za-z_]\w*'
t.type = self.reserved_map.get(t.value, 'ID')
return t
# Integer literal
def t_INTLIT(self, t):
r'-?(0x[\da-fA-F]+)|\d+'
try:
t.value = int(t.value,0)
except ValueError:
error(t.lexer.lineno, 'Integer value "%s" too large' % t.value)
t.value = 0
return t
# String literal. Note that these use only single quotes, and
# can span multiple lines.
def t_STRLIT(self, t):
r"(?m)'([^'])+'"
# strip off quotes
t.value = t.value[1:-1]
t.lexer.lineno += t.value.count('\n')
return t
# "Code literal"... like a string literal, but delimiters are
# '{{' and '}}' so they get formatted nicely under emacs c-mode
def t_CODELIT(self, t):
r"(?m)\{\{([^\}]|}(?!\}))+\}\}"
# strip off {{ & }}
t.value = t.value[2:-2]
t.lexer.lineno += t.value.count('\n')
return t
def t_CPPDIRECTIVE(self, t):
r'^\#[^\#].*\n'
t.lexer.lineno += t.value.count('\n')
return t
def t_NEWFILE(self, t):
r'^\#\#newfile\s+"[^"]*"\n'
self.fileNameStack.push(t.lexer.lineno)
t.lexer.lineno = LineTracker(t.value[11:-2])
def t_ENDFILE(self, t):
r'^\#\#endfile\n'
t.lexer.lineno = self.fileNameStack.pop()
#
# The functions t_NEWLINE, t_ignore, and t_error are
# special for the lex module.
#
# Newlines
def t_NEWLINE(self, t):
r'\n+'
t.lexer.lineno += t.value.count('\n')
# Comments
def t_comment(self, t):
r'//.*'
# Completely ignored characters
t_ignore = ' \t\x0c'
# Error handler
def t_error(self, t):
error(t.lexer.lineno, "illegal character '%s'" % t.value[0])
t.skip(1)
#####################################################################
#
# Parser
#
# Every function whose name starts with 'p_' defines a grammar
# rule. The rule is encoded in the function's doc string, while
# the function body provides the action taken when the rule is
# matched. The argument to each function is a list of the values
# of the rule's symbols: t[0] for the LHS, and t[1..n] for the
# symbols on the RHS. For tokens, the value is copied from the
# t.value attribute provided by the lexer. For non-terminals, the
# value is assigned by the producing rule; i.e., the job of the
# grammar rule function is to set the value for the non-terminal
# on the LHS (by assigning to t[0]).
#####################################################################
# The LHS of the first grammar rule is used as the start symbol
# (in this case, 'specification'). Note that this rule enforces
# that there will be exactly one namespace declaration, with 0 or
# more global defs/decls before and after it. The defs & decls
# before the namespace decl will be outside the namespace; those
# after will be inside. The decoder function is always inside the
# namespace.
def p_specification(self, t):
'specification : opt_defs_and_outputs top_level_decode_block'
for f in self.splits.iterkeys():
f.write('\n#endif\n')
for f in self.files.itervalues(): # close ALL the files;
f.close() # not doing so can cause compilation to fail
self.write_top_level_files()
t[0] = True
# 'opt_defs_and_outputs' is a possibly empty sequence of def and/or
# output statements. Its productions do the hard work of eventually
# instantiating a GenCode, which are generally emitted (written to disk)
# as soon as possible, except for the decode_block, which has to be
# accumulated into one large function of nested switch/case blocks.
def p_opt_defs_and_outputs_0(self, t):
'opt_defs_and_outputs : empty'
def p_opt_defs_and_outputs_1(self, t):
'opt_defs_and_outputs : defs_and_outputs'
def p_defs_and_outputs_0(self, t):
'defs_and_outputs : def_or_output'
def p_defs_and_outputs_1(self, t):
'defs_and_outputs : defs_and_outputs def_or_output'
# The list of possible definition/output statements.
# They are all processed as they are seen.
def p_def_or_output(self, t):
'''def_or_output : name_decl
| def_format
| def_bitfield
| def_bitfield_struct
| def_template
| def_operand_types
| def_operands
| output
| global_let
| split'''
# Utility function used by both invocations of splitting - explicit
# 'split' keyword and split() function inside "let {{ }};" blocks.
def split(self, sec, write=False):
assert(sec != 'header' and "header cannot be split")
f = self.get_file(sec)
self.splits[f] += 1
s = '\n#endif\n#if __SPLIT == %u\n' % self.splits[f]
if write:
f.write(s)
else:
return s
# split output file to reduce compilation time
def p_split(self, t):
'split : SPLIT output_type SEMI'
assert(self.isa_name and "'split' not allowed before namespace decl")
self.split(t[2], True)
def p_output_type(self, t):
'''output_type : DECODER
| HEADER
| EXEC'''
t[0] = t[1]
# ISA name declaration looks like "namespace <foo>;"
def p_name_decl(self, t):
'name_decl : NAMESPACE ID SEMI'
assert(self.isa_name == None and "Only 1 namespace decl permitted")
self.isa_name = t[2]
self.namespace = t[2] + 'Inst'
# Output blocks 'output <foo> {{...}}' (C++ code blocks) are copied
# directly to the appropriate output section.
# Massage output block by substituting in template definitions and
# bit operators. We handle '%'s embedded in the string that don't
# indicate template substitutions (or CPU-specific symbols, which
# get handled in GenCode) by doubling them first so that the
# format operation will reduce them back to single '%'s.
def process_output(self, s):
s = self.protectNonSubstPercents(s)
# protects cpu-specific symbols too
s = self.protectCpuSymbols(s)
return substBitOps(s % self.templateMap)
def p_output(self, t):
'output : OUTPUT output_type CODELIT SEMI'
kwargs = { t[2]+'_output' : self.process_output(t[3]) }
GenCode(self, **kwargs).emit()
# global let blocks 'let {{...}}' (Python code blocks) are
# executed directly when seen. Note that these execute in a
# special variable context 'exportContext' to prevent the code
# from polluting this script's namespace.
def p_global_let(self, t):
'global_let : LET CODELIT SEMI'
def _split(sec):
return self.split(sec)
self.updateExportContext()
self.exportContext["header_output"] = ''
self.exportContext["decoder_output"] = ''
self.exportContext["exec_output"] = ''
self.exportContext["decode_block"] = ''
self.exportContext["split"] = _split
split_setup = '''
def wrap(func):
def split(sec):
globals()[sec + '_output'] += func(sec)
return split
split = wrap(split)
del wrap
'''
# This tricky setup (immediately above) allows us to just write
# (e.g.) "split('exec')" in the Python code and the split #ifdef's
# will automatically be added to the exec_output variable. The inner
# Python execution environment doesn't know about the split points,
# so we carefully inject and wrap a closure that can retrieve the
# next split's #define from the parser and add it to the current
# emission-in-progress.
try:
exec split_setup+fixPythonIndentation(t[2]) in self.exportContext
except Exception, exc:
if debug:
raise
error(t.lineno(1), 'In global let block: %s' % exc)
GenCode(self,
header_output=self.exportContext["header_output"],
decoder_output=self.exportContext["decoder_output"],
exec_output=self.exportContext["exec_output"],
decode_block=self.exportContext["decode_block"]).emit()
# Define the mapping from operand type extensions to C++ types and
# bit widths (stored in operandTypeMap).
def p_def_operand_types(self, t):
'def_operand_types : DEF OPERAND_TYPES CODELIT SEMI'
try:
self.operandTypeMap = eval('{' + t[3] + '}')
except Exception, exc:
if debug:
raise
error(t.lineno(1),
'In def operand_types: %s' % exc)
# Define the mapping from operand names to operand classes and
# other traits. Stored in operandNameMap.
def p_def_operands(self, t):
'def_operands : DEF OPERANDS CODELIT SEMI'
if not hasattr(self, 'operandTypeMap'):
error(t.lineno(1),
'error: operand types must be defined before operands')
try:
user_dict = eval('{' + t[3] + '}', self.exportContext)
except Exception, exc:
if debug:
raise
error(t.lineno(1), 'In def operands: %s' % exc)
self.buildOperandNameMap(user_dict, t.lexer.lineno)
# A bitfield definition looks like:
# 'def [signed] bitfield <ID> [<first>:<last>]'
# This generates a preprocessor macro in the output file.
def p_def_bitfield_0(self, t):
'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT COLON INTLIT GREATER SEMI'
expr = 'bits(machInst, %2d, %2d)' % (t[6], t[8])
if (t[2] == 'signed'):
expr = 'sext<%d>(%s)' % (t[6] - t[8] + 1, expr)
hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
GenCode(self, header_output=hash_define).emit()
# alternate form for single bit: 'def [signed] bitfield <ID> [<bit>]'
def p_def_bitfield_1(self, t):
'def_bitfield : DEF opt_signed BITFIELD ID LESS INTLIT GREATER SEMI'
expr = 'bits(machInst, %2d, %2d)' % (t[6], t[6])
if (t[2] == 'signed'):
expr = 'sext<%d>(%s)' % (1, expr)
hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
GenCode(self, header_output=hash_define).emit()
# alternate form for structure member: 'def bitfield <ID> <ID>'
def p_def_bitfield_struct(self, t):
'def_bitfield_struct : DEF opt_signed BITFIELD ID id_with_dot SEMI'
if (t[2] != ''):
error(t.lineno(1),
'error: structure bitfields are always unsigned.')
expr = 'machInst.%s' % t[5]
hash_define = '#undef %s\n#define %s\t%s\n' % (t[4], t[4], expr)
GenCode(self, header_output=hash_define).emit()
def p_id_with_dot_0(self, t):
'id_with_dot : ID'
t[0] = t[1]
def p_id_with_dot_1(self, t):
'id_with_dot : ID DOT id_with_dot'
t[0] = t[1] + t[2] + t[3]
def p_opt_signed_0(self, t):
'opt_signed : SIGNED'
t[0] = t[1]
def p_opt_signed_1(self, t):
'opt_signed : empty'
t[0] = ''
def p_def_template(self, t):
'def_template : DEF TEMPLATE ID CODELIT SEMI'
if t[3] in self.templateMap:
print "warning: template %s already defined" % t[3]
self.templateMap[t[3]] = Template(self, t[4])
# An instruction format definition looks like
# "def format <fmt>(<params>) {{...}};"
def p_def_format(self, t):
'def_format : DEF FORMAT ID LPAREN param_list RPAREN CODELIT SEMI'
(id, params, code) = (t[3], t[5], t[7])
self.defFormat(id, params, code, t.lexer.lineno)
# The formal parameter list for an instruction format is a
# possibly empty list of comma-separated parameters. Positional
# (standard, non-keyword) parameters must come first, followed by
# keyword parameters, followed by a '*foo' parameter that gets
# excess positional arguments (as in Python). Each of these three
# parameter categories is optional.
#
# Note that we do not support the '**foo' parameter for collecting
# otherwise undefined keyword args. Otherwise the parameter list
# is (I believe) identical to what is supported in Python.
#
# The param list generates a tuple, where the first element is a
# list of the positional params and the second element is a dict
# containing the keyword params.
def p_param_list_0(self, t):
'param_list : positional_param_list COMMA nonpositional_param_list'
t[0] = t[1] + t[3]
def p_param_list_1(self, t):
'''param_list : positional_param_list
| nonpositional_param_list'''
t[0] = t[1]
def p_positional_param_list_0(self, t):
'positional_param_list : empty'
t[0] = []
def p_positional_param_list_1(self, t):
'positional_param_list : ID'
t[0] = [t[1]]
def p_positional_param_list_2(self, t):
'positional_param_list : positional_param_list COMMA ID'
t[0] = t[1] + [t[3]]
def p_nonpositional_param_list_0(self, t):
'nonpositional_param_list : keyword_param_list COMMA excess_args_param'
t[0] = t[1] + t[3]
def p_nonpositional_param_list_1(self, t):
'''nonpositional_param_list : keyword_param_list
| excess_args_param'''
t[0] = t[1]
def p_keyword_param_list_0(self, t):
'keyword_param_list : keyword_param'
t[0] = [t[1]]
def p_keyword_param_list_1(self, t):
'keyword_param_list : keyword_param_list COMMA keyword_param'
t[0] = t[1] + [t[3]]
def p_keyword_param(self, t):
'keyword_param : ID EQUALS expr'
t[0] = t[1] + ' = ' + t[3].__repr__()
def p_excess_args_param(self, t):
'excess_args_param : ASTERISK ID'
# Just concatenate them: '*ID'. Wrap in list to be consistent
# with positional_param_list and keyword_param_list.
t[0] = [t[1] + t[2]]
# End of format definition-related rules.
##############
#
# A decode block looks like:
# decode <field1> [, <field2>]* [default <inst>] { ... }
#
def p_top_level_decode_block(self, t):
'top_level_decode_block : decode_block'
codeObj = t[1]
codeObj.wrap_decode_block('''
StaticInstPtr
%(isa_name)s::Decoder::decodeInst(%(isa_name)s::ExtMachInst machInst)
{
using namespace %(namespace)s;
''' % self, '}')
codeObj.emit()
def p_decode_block(self, t):
'decode_block : DECODE ID opt_default LBRACE decode_stmt_list RBRACE'
default_defaults = self.defaultStack.pop()
codeObj = t[5]
# use the "default defaults" only if there was no explicit
# default statement in decode_stmt_list
if not codeObj.has_decode_default:
codeObj += default_defaults
codeObj.wrap_decode_block('switch (%s) {\n' % t[2], '}\n')
t[0] = codeObj
# The opt_default statement serves only to push the "default
# defaults" onto defaultStack. This value will be used by nested
# decode blocks, and used and popped off when the current
# decode_block is processed (in p_decode_block() above).
def p_opt_default_0(self, t):
'opt_default : empty'
# no default specified: reuse the one currently at the top of
# the stack
self.defaultStack.push(self.defaultStack.top())
# no meaningful value returned
t[0] = None
def p_opt_default_1(self, t):
'opt_default : DEFAULT inst'
# push the new default
codeObj = t[2]
codeObj.wrap_decode_block('\ndefault:\n', 'break;\n')
self.defaultStack.push(codeObj)
# no meaningful value returned
t[0] = None
def p_decode_stmt_list_0(self, t):
'decode_stmt_list : decode_stmt'
t[0] = t[1]
def p_decode_stmt_list_1(self, t):
'decode_stmt_list : decode_stmt decode_stmt_list'
if (t[1].has_decode_default and t[2].has_decode_default):
error(t.lineno(1), 'Two default cases in decode block')
t[0] = t[1] + t[2]
#
# Decode statement rules
#
# There are four types of statements allowed in a decode block:
# 1. Format blocks 'format <foo> { ... }'
# 2. Nested decode blocks
# 3. Instruction definitions.
# 4. C preprocessor directives.
# Preprocessor directives found in a decode statement list are
# passed through to the output, replicated to all of the output
# code streams. This works well for ifdefs, so we can ifdef out
# both the declarations and the decode cases generated by an
# instruction definition. Handling them as part of the grammar
# makes it easy to keep them in the right place with respect to
# the code generated by the other statements.
def p_decode_stmt_cpp(self, t):
'decode_stmt : CPPDIRECTIVE'
t[0] = GenCode(self, t[1], t[1], t[1], t[1])
# A format block 'format <foo> { ... }' sets the default
# instruction format used to handle instruction definitions inside
# the block. This format can be overridden by using an explicit
# format on the instruction definition or with a nested format
# block.
def p_decode_stmt_format(self, t):
'decode_stmt : FORMAT push_format_id LBRACE decode_stmt_list RBRACE'
# The format will be pushed on the stack when 'push_format_id'
# is processed (see below). Once the parser has recognized
# the full production (though the right brace), we're done
# with the format, so now we can pop it.
self.formatStack.pop()
t[0] = t[4]
# This rule exists so we can set the current format (& push the
# stack) when we recognize the format name part of the format
# block.
def p_push_format_id(self, t):
'push_format_id : ID'
try:
self.formatStack.push(self.formatMap[t[1]])
t[0] = ('', '// format %s' % t[1])
except KeyError:
error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
# Nested decode block: if the value of the current field matches
# the specified constant(s), do a nested decode on some other field.
def p_decode_stmt_decode(self, t):
'decode_stmt : case_list COLON decode_block'
case_list = t[1]
codeObj = t[3]
# just wrap the decoding code from the block as a case in the
# outer switch statement.
codeObj.wrap_decode_block('\n%s\n' % ''.join(case_list))
codeObj.has_decode_default = (case_list == ['default:'])
t[0] = codeObj
# Instruction definition (finally!).
def p_decode_stmt_inst(self, t):
'decode_stmt : case_list COLON inst SEMI'
case_list = t[1]
codeObj = t[3]
codeObj.wrap_decode_block('\n%s' % ''.join(case_list), 'break;\n')
codeObj.has_decode_default = (case_list == ['default:'])
t[0] = codeObj
# The constant list for a decode case label must be non-empty, and must
# either be the keyword 'default', or made up of one or more
# comma-separated integer literals or strings which evaluate to
# constants when compiled as C++.
def p_case_list_0(self, t):
'case_list : DEFAULT'
t[0] = ['default:']
def prep_int_lit_case_label(self, lit):
if lit >= 2**32:
return 'case ULL(%#x): ' % lit
else:
return 'case %#x: ' % lit
def prep_str_lit_case_label(self, lit):
return 'case %s: ' % lit
def p_case_list_1(self, t):
'case_list : INTLIT'
t[0] = [self.prep_int_lit_case_label(t[1])]
def p_case_list_2(self, t):
'case_list : STRLIT'
t[0] = [self.prep_str_lit_case_label(t[1])]
def p_case_list_3(self, t):
'case_list : case_list COMMA INTLIT'
t[0] = t[1]
t[0].append(self.prep_int_lit_case_label(t[3]))
def p_case_list_4(self, t):
'case_list : case_list COMMA STRLIT'
t[0] = t[1]
t[0].append(self.prep_str_lit_case_label(t[3]))
# Define an instruction using the current instruction format
# (specified by an enclosing format block).
# "<mnemonic>(<args>)"
def p_inst_0(self, t):
'inst : ID LPAREN arg_list RPAREN'
# Pass the ID and arg list to the current format class to deal with.
currentFormat = self.formatStack.top()
codeObj = currentFormat.defineInst(self, t[1], t[3], t.lexer.lineno)
args = ','.join(map(str, t[3]))
args = re.sub('(?m)^', '//', args)
args = re.sub('^//', '', args)
comment = '\n// %s::%s(%s)\n' % (currentFormat.id, t[1], args)
codeObj.prepend_all(comment)
t[0] = codeObj
# Define an instruction using an explicitly specified format:
# "<fmt>::<mnemonic>(<args>)"
def p_inst_1(self, t):
'inst : ID DBLCOLON ID LPAREN arg_list RPAREN'
try:
format = self.formatMap[t[1]]
except KeyError:
error(t.lineno(1), 'instruction format "%s" not defined.' % t[1])
codeObj = format.defineInst(self, t[3], t[5], t.lexer.lineno)
comment = '\n// %s::%s(%s)\n' % (t[1], t[3], t[5])
codeObj.prepend_all(comment)
t[0] = codeObj
# The arg list generates a tuple, where the first element is a
# list of the positional args and the second element is a dict
# containing the keyword args.
def p_arg_list_0(self, t):
'arg_list : positional_arg_list COMMA keyword_arg_list'
t[0] = ( t[1], t[3] )
def p_arg_list_1(self, t):
'arg_list : positional_arg_list'
t[0] = ( t[1], {} )
def p_arg_list_2(self, t):
'arg_list : keyword_arg_list'
t[0] = ( [], t[1] )
def p_positional_arg_list_0(self, t):
'positional_arg_list : empty'
t[0] = []
def p_positional_arg_list_1(self, t):
'positional_arg_list : expr'
t[0] = [t[1]]
def p_positional_arg_list_2(self, t):
'positional_arg_list : positional_arg_list COMMA expr'
t[0] = t[1] + [t[3]]
def p_keyword_arg_list_0(self, t):
'keyword_arg_list : keyword_arg'
t[0] = t[1]
def p_keyword_arg_list_1(self, t):
'keyword_arg_list : keyword_arg_list COMMA keyword_arg'
t[0] = t[1]
t[0].update(t[3])
def p_keyword_arg(self, t):
'keyword_arg : ID EQUALS expr'
t[0] = { t[1] : t[3] }
#
# Basic expressions. These constitute the argument values of
# "function calls" (i.e. instruction definitions in the decode
# block) and default values for formal parameters of format
# functions.
#
# Right now, these are either strings, integers, or (recursively)
# lists of exprs (using Python square-bracket list syntax). Note
# that bare identifiers are trated as string constants here (since
# there isn't really a variable namespace to refer to).
#
def p_expr_0(self, t):
'''expr : ID
| INTLIT
| STRLIT
| CODELIT'''
t[0] = t[1]
def p_expr_1(self, t):
'''expr : LBRACKET list_expr RBRACKET'''
t[0] = t[2]
def p_list_expr_0(self, t):
'list_expr : expr'
t[0] = [t[1]]
def p_list_expr_1(self, t):
'list_expr : list_expr COMMA expr'
t[0] = t[1] + [t[3]]
def p_list_expr_2(self, t):
'list_expr : empty'
t[0] = []
#
# Empty production... use in other rules for readability.
#
def p_empty(self, t):
'empty :'
pass
# Parse error handler. Note that the argument here is the
# offending *token*, not a grammar symbol (hence the need to use
# t.value)
def p_error(self, t):
if t:
error(t.lexer.lineno, "syntax error at '%s'" % t.value)
else:
error("unknown syntax error")
# END OF GRAMMAR RULES
def updateExportContext(self):
# create a continuation that allows us to grab the current parser
def wrapInstObjParams(*args):
return InstObjParams(self, *args)
self.exportContext['InstObjParams'] = wrapInstObjParams
self.exportContext.update(self.templateMap)
def defFormat(self, id, params, code, lineno):
'''Define a new format'''
# make sure we haven't already defined this one
if id in self.formatMap:
error(lineno, 'format %s redefined.' % id)
# create new object and store in global map
self.formatMap[id] = Format(id, params, code)
def expandCpuSymbolsToDict(self, template):
'''Expand template with CPU-specific references into a
dictionary with an entry for each CPU model name. The entry
key is the model name and the corresponding value is the
template with the CPU-specific refs substituted for that
model.'''
# Protect '%'s that don't go with CPU-specific terms
t = re.sub(r'%(?!\(CPU_)', '%%', template)
result = {}
for cpu in self.cpuModels:
result[cpu.name] = t % cpu.strings
return result
def expandCpuSymbolsToString(self, template):
'''*If* the template has CPU-specific references, return a
single string containing a copy of the template for each CPU
model with the corresponding values substituted in. If the
template has no CPU-specific references, it is returned
unmodified.'''
if template.find('%(CPU_') != -1:
return reduce(lambda x,y: x+y,
self.expandCpuSymbolsToDict(template).values())
else:
return template
def protectCpuSymbols(self, template):
'''Protect CPU-specific references by doubling the
corresponding '%'s (in preparation for substituting a different
set of references into the template).'''
return re.sub(r'%(?=\(CPU_)', '%%', template)
def protectNonSubstPercents(self, s):
'''Protect any non-dict-substitution '%'s in a format string
(i.e. those not followed by '(')'''
return re.sub(r'%(?!\()', '%%', s)
def buildOperandNameMap(self, user_dict, lineno):
operand_name = {}
for op_name, val in user_dict.iteritems():
# Check if extra attributes have been specified.
if len(val) > 9:
error(lineno, 'error: too many attributes for operand "%s"' %
base_cls_name)
# Pad val with None in case optional args are missing
val += (None, None, None, None)
base_cls_name, dflt_ext, reg_spec, flags, sort_pri, \
read_code, write_code, read_predicate, write_predicate = val[:9]
# Canonical flag structure is a triple of lists, where each list
# indicates the set of flags implied by this operand always, when
# used as a source, and when used as a dest, respectively.
# For simplicity this can be initialized using a variety of fairly
# obvious shortcuts; we convert these to canonical form here.
if not flags:
# no flags specified (e.g., 'None')
flags = ( [], [], [] )
elif isinstance(flags, str):
# a single flag: assumed to be unconditional
flags = ( [ flags ], [], [] )
elif isinstance(flags, list):
# a list of flags: also assumed to be unconditional
flags = ( flags, [], [] )
elif isinstance(flags, tuple):
# it's a tuple: it should be a triple,
# but each item could be a single string or a list
(uncond_flags, src_flags, dest_flags) = flags
flags = (makeList(uncond_flags),
makeList(src_flags), makeList(dest_flags))
# Accumulate attributes of new operand class in tmp_dict
tmp_dict = {}
attrList = ['reg_spec', 'flags', 'sort_pri',
'read_code', 'write_code',
'read_predicate', 'write_predicate']
if dflt_ext:
dflt_ctype = self.operandTypeMap[dflt_ext]
attrList.extend(['dflt_ctype', 'dflt_ext'])
for attr in attrList:
tmp_dict[attr] = eval(attr)
tmp_dict['base_name'] = op_name
# New class name will be e.g. "IntReg_Ra"
cls_name = base_cls_name + '_' + op_name
# Evaluate string arg to get class object. Note that the
# actual base class for "IntReg" is "IntRegOperand", i.e. we
# have to append "Operand".
try:
base_cls = eval(base_cls_name + 'Operand')
except NameError:
error(lineno,
'error: unknown operand base class "%s"' % base_cls_name)
# The following statement creates a new class called
# <cls_name> as a subclass of <base_cls> with the attributes
# in tmp_dict, just as if we evaluated a class declaration.
operand_name[op_name] = type(cls_name, (base_cls,), tmp_dict)
self.operandNameMap = operand_name
# Define operand variables.
operands = user_dict.keys()
extensions = self.operandTypeMap.keys()
operandsREString = r'''
(?<!\w) # neg. lookbehind assertion: prevent partial matches
((%s)(?:_(%s))?) # match: operand with optional '_' then suffix
(?!\w) # neg. lookahead assertion: prevent partial matches
''' % (string.join(operands, '|'), string.join(extensions, '|'))
self.operandsRE = re.compile(operandsREString, re.MULTILINE|re.VERBOSE)
# Same as operandsREString, but extension is mandatory, and only two
# groups are returned (base and ext, not full name as above).
# Used for subtituting '_' for '.' to make C++ identifiers.
operandsWithExtREString = r'(?<!\w)(%s)_(%s)(?!\w)' \
% (string.join(operands, '|'), string.join(extensions, '|'))
self.operandsWithExtRE = \
re.compile(operandsWithExtREString, re.MULTILINE)
def substMungedOpNames(self, code):
'''Munge operand names in code string to make legal C++
variable names. This means getting rid of the type extension
if any. Will match base_name attribute of Operand object.)'''
return self.operandsWithExtRE.sub(r'\1', code)
def mungeSnippet(self, s):
'''Fix up code snippets for final substitution in templates.'''
if isinstance(s, str):
return self.substMungedOpNames(substBitOps(s))
else:
return s
def open(self, name, bare=False):
'''Open the output file for writing and include scary warning.'''
filename = os.path.join(self.output_dir, name)
f = open(filename, 'w')
if f:
if not bare:
f.write(ISAParser.scaremonger_template % self)
return f
def update(self, file, contents):
'''Update the output file only. Scons should handle the case when
the new contents are unchanged using its built-in hash feature.'''
f = self.open(file)
f.write(contents)
f.close()
# This regular expression matches '##include' directives
includeRE = re.compile(r'^\s*##include\s+"(?P<filename>[^"]*)".*$',
re.MULTILINE)
def replace_include(self, matchobj, dirname):
"""Function to replace a matched '##include' directive with the
contents of the specified file (with nested ##includes
replaced recursively). 'matchobj' is an re match object
(from a match of includeRE) and 'dirname' is the directory
relative to which the file path should be resolved."""
fname = matchobj.group('filename')
full_fname = os.path.normpath(os.path.join(dirname, fname))
contents = '##newfile "%s"\n%s\n##endfile\n' % \
(full_fname, self.read_and_flatten(full_fname))
return contents
def read_and_flatten(self, filename):
"""Read a file and recursively flatten nested '##include' files."""
current_dir = os.path.dirname(filename)
try:
contents = open(filename).read()
except IOError:
error('Error including file "%s"' % filename)
self.fileNameStack.push(LineTracker(filename))
# Find any includes and include them
def replace(matchobj):
return self.replace_include(matchobj, current_dir)
contents = self.includeRE.sub(replace, contents)
self.fileNameStack.pop()
return contents
AlreadyGenerated = {}
def _parse_isa_desc(self, isa_desc_file):
'''Read in and parse the ISA description.'''
# The build system can end up running the ISA parser twice: once to
# finalize the build dependencies, and then to actually generate
# the files it expects (in src/arch/$ARCH/generated). This code
# doesn't do anything different either time, however; the SCons
# invocations just expect different things. Since this code runs
# within SCons, we can just remember that we've already run and
# not perform a completely unnecessary run, since the ISA parser's
# effect is idempotent.
if isa_desc_file in ISAParser.AlreadyGenerated:
return
# grab the last three path components of isa_desc_file
self.filename = '/'.join(isa_desc_file.split('/')[-3:])
# Read file and (recursively) all included files into a string.
# PLY requires that the input be in a single string so we have to
# do this up front.
isa_desc = self.read_and_flatten(isa_desc_file)
# Initialize lineno tracker
self.lex.lineno = LineTracker(isa_desc_file)
# Parse.
self.parse_string(isa_desc)
ISAParser.AlreadyGenerated[isa_desc_file] = None
def parse_isa_desc(self, *args, **kwargs):
try:
self._parse_isa_desc(*args, **kwargs)
except ISAParserError, e:
print backtrace(self.fileNameStack)
print "At %s:" % e.lineno
print e
sys.exit(1)
# Called as script: get args from command line.
# Args are: <isa desc file> <output dir>
if __name__ == '__main__':
ISAParser(sys.argv[2]).parse_isa_desc(sys.argv[1])
|
{
"content_hash": "9fa1ab2d2c42d8016b9c7d8749f077a7",
"timestamp": "",
"source": "github",
"line_count": 2403,
"max_line_length": 89,
"avg_line_length": 36.71993341656263,
"alnum_prop": 0.562070763163263,
"repo_name": "SanchayanMaity/gem5",
"id": "8d609ae5ff870b3ebb49db25a9c9bd29ca3387b0",
"size": "90484",
"binary": false,
"copies": "6",
"ref": "refs/heads/CS570",
"path": "src/arch/isa_parser.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "235643"
},
{
"name": "C",
"bytes": "2802978"
},
{
"name": "C++",
"bytes": "16550087"
},
{
"name": "CMake",
"bytes": "2202"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Java",
"bytes": "3179"
},
{
"name": "M4",
"bytes": "49620"
},
{
"name": "Makefile",
"bytes": "49110"
},
{
"name": "Objective-C",
"bytes": "1505"
},
{
"name": "Perl",
"bytes": "33602"
},
{
"name": "Protocol Buffer",
"bytes": "11148"
},
{
"name": "Python",
"bytes": "4547714"
},
{
"name": "Roff",
"bytes": "8783"
},
{
"name": "Shell",
"bytes": "57032"
},
{
"name": "Vim script",
"bytes": "4335"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
}
|
def includeme(config):
config.add_static_view('static', 'static', cache_max_age=3600)
# default.py
config.add_route('home', '/')
# docs.py
config.add_route('list', 'docs/list')
config.add_route('get', 'docs/get/{docid}')
|
{
"content_hash": "24039ec37e1daeead332a85edee13051",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 66,
"avg_line_length": 28.444444444444443,
"alnum_prop": 0.59375,
"repo_name": "sitn/sitn_portal",
"id": "07077a930a824728f2ffed63cfc81b5da9626fae",
"size": "256",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sitn_portal/routes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3153"
},
{
"name": "HTML",
"bytes": "15073"
},
{
"name": "JavaScript",
"bytes": "1646"
},
{
"name": "Python",
"bytes": "8781"
},
{
"name": "TypeScript",
"bytes": "20241"
}
],
"symlink_target": ""
}
|
from six.moves import urllib
from cinder import context
from cinder import exception
from cinder.tests.unit import fake_constants as fake
from cinder.tests.unit.fake_volume import fake_volume_obj
from cinder.tests.unit.volume.drivers.dell_emc import scaleio
from cinder.tests.unit.volume.drivers.dell_emc.scaleio import mocks
from cinder.volume import configuration
class TestExtendVolume(scaleio.TestScaleIODriver):
"""Test cases for ``ScaleIODriver.extend_volume()``"""
""" New sizes for the volume.
Since ScaleIO has a granularity of 8 GB, multiples of 8 always work.
The 7 size should be either rounded up to 8 or raise an exception
based on the round_volume_capacity config setting.
"""
NEW_SIZE = 16
BAD_SIZE = 7
def setUp(self):
"""Setup a test case environment.
Creates fake volume object and sets up the required API responses.
"""
super(TestExtendVolume, self).setUp()
ctx = context.RequestContext('fake', 'fake', auth_token=True)
self.volume = fake_volume_obj(ctx, **{'id': fake.VOLUME_ID,
'provider_id': fake.PROVIDER_ID})
self.volume_name_2x_enc = urllib.parse.quote(
urllib.parse.quote(self.driver._id_to_base64(self.volume.id))
)
self.HTTPS_MOCK_RESPONSES = {
self.RESPONSE_MODE.Valid: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: '"{}"'.format(self.volume.id),
'instances/Volume::{}/action/setVolumeSize'.format(
self.volume.provider_id
): mocks.MockHTTPSResponse({}, 200),
},
self.RESPONSE_MODE.BadStatus: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE,
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: self.BAD_STATUS_RESPONSE,
'instances/Volume::{}/action/setVolumeSize'.format(
self.volume.provider_id): self.BAD_STATUS_RESPONSE,
},
self.RESPONSE_MODE.Invalid: {
'types/Volume/instances/getByName::' +
self.volume_name_2x_enc: None,
'instances/Volume::{}/action/setVolumeSize'.format(
self.volume.provider_id): mocks.MockHTTPSResponse(
{
'errorCode': self.OLD_VOLUME_NOT_FOUND_ERROR,
'message': 'BadStatus Volume Test',
}, 400
),
},
}
def test_bad_login(self):
self.set_https_response_mode(self.RESPONSE_MODE.BadStatus)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.volume,
self.NEW_SIZE)
def test_invalid_volume(self):
self.set_https_response_mode(self.RESPONSE_MODE.Invalid)
self.assertRaises(exception.VolumeBackendAPIException,
self.driver.extend_volume,
self.volume,
self.NEW_SIZE)
def test_extend_volume_bad_size_no_round(self):
self.override_config('sio_round_volume_capacity', False,
configuration.SHARED_CONF_GROUP)
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.extend_volume(self.volume, self.BAD_SIZE)
def test_extend_volume_bad_size_round(self):
self.override_config('sio_round_volume_capacity', True,
configuration.SHARED_CONF_GROUP)
self.driver.extend_volume(self.volume, self.BAD_SIZE)
def test_extend_volume(self):
self.set_https_response_mode(self.RESPONSE_MODE.Valid)
self.driver.extend_volume(self.volume, self.NEW_SIZE)
|
{
"content_hash": "5b452f30fe00457c585a0df04bf176fa",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 42.354838709677416,
"alnum_prop": 0.592028433612592,
"repo_name": "eharney/cinder",
"id": "90e1adcc50394cb872047be85de5d44db55ccc96",
"size": "4581",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "cinder/tests/unit/volume/drivers/dell_emc/scaleio/test_extend_volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "561"
},
{
"name": "Python",
"bytes": "19839107"
},
{
"name": "Shell",
"bytes": "6453"
}
],
"symlink_target": ""
}
|
outputs = [ "out.exr" ]
command = testrender("-r 256 256 -aa 4 --llvm_opt 13 bumptest.xml out.exr")
# Note: we pick this test arbitrarily as the one to verify llvm_opt=13 works
|
{
"content_hash": "9dd5e6658a7cdeda92071878f3570e66",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 76,
"avg_line_length": 44.5,
"alnum_prop": 0.702247191011236,
"repo_name": "lgritz/OpenShadingLanguage",
"id": "dd3646c70451a93f50dbbfcbf0a915fa014eaef0",
"size": "372",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "testsuite/render-bumptest/run.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "328553"
},
{
"name": "C++",
"bytes": "4608721"
},
{
"name": "CMake",
"bytes": "187004"
},
{
"name": "CSS",
"bytes": "4165"
},
{
"name": "Cuda",
"bytes": "49250"
},
{
"name": "GLSL",
"bytes": "939"
},
{
"name": "HTML",
"bytes": "43863"
},
{
"name": "Lex",
"bytes": "28217"
},
{
"name": "Makefile",
"bytes": "17031"
},
{
"name": "Python",
"bytes": "313209"
},
{
"name": "Shell",
"bytes": "38584"
},
{
"name": "TeX",
"bytes": "267458"
},
{
"name": "Yacc",
"bytes": "51183"
}
],
"symlink_target": ""
}
|
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "aspc.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
{
"content_hash": "e87787aa14003b15e37548a489435c1a",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 64,
"avg_line_length": 32.6,
"alnum_prop": 0.7975460122699386,
"repo_name": "aspc/mainsite",
"id": "f70128eaa726355d32230de7fc3c58190f5ad3f0",
"size": "163",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "aspc/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "390533"
},
{
"name": "HTML",
"bytes": "152059"
},
{
"name": "JavaScript",
"bytes": "1825083"
},
{
"name": "Python",
"bytes": "437771"
},
{
"name": "SQLPL",
"bytes": "248"
},
{
"name": "Shell",
"bytes": "8764"
}
],
"symlink_target": ""
}
|
from pygame.locals import *
from zort.hex_model import *
class LevelSceneMode(object):
""" provides various handlers to abstract gameplay into modes
"""
def __init__(self, scene):
self.scene = scene
def handle_click(self, button, cell):
pass
def draw(self, surface):
pass
def get_nearest_cell(self, coords):
_coords = self.scene.view.coords_from_surface(coords)
#if _coords is not None:
# _coords = hex_round(_coords)
return self.scene.view.cell_from_surface(coords)
def update(self, delta, events):
return
for event in events:
if event.type == MOUSEMOTION:
cell = self.get_nearest_cell(event.pos)
if cell:
self.scene.view.highlight_cell(cell)
if event.type == MOUSEBUTTONUP:
cell = self.get_nearest_cell(event.pos)
if cell:
self.handle_click(event.button, cell)
|
{
"content_hash": "02ac7dc74bb45e178f9b489200f6bf58",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 65,
"avg_line_length": 27.86111111111111,
"alnum_prop": 0.5712861415752741,
"repo_name": "bitcraft/pyweek19",
"id": "2f50d8de9abe5f35e0f2c32cbf0d79deefc1cac5",
"size": "1003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "zort/modes/mode.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "190284"
}
],
"symlink_target": ""
}
|
from django import template
register = template.Library()
@register.filter
def col_input_class(taxcalc_param):
cols = len(taxcalc_param.col_fields)
display_cols = 12
display_size = min([int(display_cols / cols), 6])
return "col-xs-{0}".format(display_size)
|
{
"content_hash": "3491986572aff2648d8f1e0851f1182c",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 53,
"avg_line_length": 25.09090909090909,
"alnum_prop": 0.6956521739130435,
"repo_name": "PeterDSteinberg/webapp-public",
"id": "b8b52e2cb93816af4a9722613d0de6f55c2855d3",
"size": "276",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "webapp/apps/taxbrain/templatetags/inputs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "856744"
},
{
"name": "HTML",
"bytes": "61933"
},
{
"name": "JavaScript",
"bytes": "85905"
},
{
"name": "Python",
"bytes": "381167"
},
{
"name": "Shell",
"bytes": "17"
}
],
"symlink_target": ""
}
|
import os
import re
from smtplib import SMTPException
from django import forms
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.contrib.auth import forms as auth_forms
from django.forms.util import ErrorList
import captcha.fields
import commonware.log
import happyforms
from tower import ugettext as _, ugettext_lazy as _lazy
import amo
from amo.utils import log_cef, slug_validator
from .models import (UserProfile, UserNotification, BlacklistedUsername,
BlacklistedEmailDomain, BlacklistedPassword, DjangoUser)
from .widgets import NotificationsSelectMultiple
import users.notifications as email
from . import tasks
log = commonware.log.getLogger('z.users')
admin_re = re.compile('(?=.*\d)(?=.*[a-zA-Z])')
class PasswordMixin:
min_length = 8
error_msg = {'min_length': _('Must be %s characters or more.')
% min_length}
@classmethod
def widget(cls, **kw):
return forms.PasswordInput(attrs={'class': 'password-strength',
'data-min-length': cls.min_length},
**kw)
def clean_password(self, field='password', instance='instance'):
data = self.cleaned_data[field]
if not data:
return data
user = getattr(self, instance, None)
if user and user.pk and user.needs_tougher_password:
if not admin_re.search(data):
raise forms.ValidationError(_('Letters and numbers required.'))
if BlacklistedPassword.blocked(data):
raise forms.ValidationError(_('That password is not allowed.'))
return data
class AuthenticationForm(auth_forms.AuthenticationForm):
username = forms.CharField(max_length=50)
rememberme = forms.BooleanField(required=False)
recaptcha = captcha.fields.ReCaptchaField()
recaptcha_shown = forms.BooleanField(widget=forms.HiddenInput,
required=False)
def __init__(self, request=None, use_recaptcha=False, *args, **kw):
super(AuthenticationForm, self).__init__(*args, **kw)
if not use_recaptcha or not settings.RECAPTCHA_PRIVATE_KEY:
del self.fields['recaptcha']
class PasswordResetForm(auth_forms.PasswordResetForm):
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(PasswordResetForm, self).__init__(*args, **kwargs)
def clean_email(self):
email = self.cleaned_data['email']
self.users_cache = UserProfile.objects.filter(email__iexact=email)
if not self.users_cache:
raise forms.ValidationError(
_("""An email has been sent to the requested account with
further information. If you do not receive an email then
please confirm you have entered the same email address used
during account registration."""))
return email
def save(self, **kw):
for user in self.users_cache:
log.info(u'Password reset email sent for user (%s)' % user)
if user.needs_tougher_password:
log_cef('Password Reset', 5, self.request,
username=user,
signature='PASSWORDRESET',
msg='Privileged user requested password reset')
else:
log_cef('Password Reset', 5, self.request,
username=user,
signature='PASSWORDRESET',
msg='User requested password reset')
try:
# Django calls send_mail() directly and has no option to pass
# in fail_silently, so we have to catch the SMTP error ourselves
super(PasswordResetForm, self).save(**kw)
except SMTPException, e:
log.error("Failed to send mail for (%s): %s" % (user, e))
class SetPasswordForm(auth_forms.SetPasswordForm, PasswordMixin):
new_password1 = forms.CharField(label=_lazy(u'New password'),
min_length=PasswordMixin.min_length,
error_messages=PasswordMixin.error_msg,
widget=PasswordMixin.widget())
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(SetPasswordForm, self).__init__(*args, **kwargs)
# We store our password in the users table, not auth_user like
# Django expects.
if isinstance(self.user, DjangoUser):
self.user = self.user.get_profile()
def clean_new_password1(self):
return self.clean_password(field='new_password1', instance='user')
def save(self, **kw):
# Three different loggers? :(
amo.log(amo.LOG.CHANGE_PASSWORD, user=self.user)
log.info(u'User (%s) changed password with reset form' % self.user)
log_cef('Password Changed', 5, self.request,
username=self.user.username, signature='PASSWORDCHANGED',
msg='User changed password')
super(SetPasswordForm, self).save(**kw)
class UserDeleteForm(forms.Form):
password = forms.CharField(max_length=255, required=True,
widget=forms.PasswordInput(render_value=False))
confirm = forms.BooleanField(required=True)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(UserDeleteForm, self).__init__(*args, **kwargs)
def clean_password(self):
data = self.cleaned_data
amouser = self.request.user.get_profile()
if not amouser.check_password(data["password"]):
raise forms.ValidationError(_("Wrong password entered!"))
def clean(self):
amouser = self.request.user.get_profile()
if amouser.is_developer:
# This is tampering because the form isn't shown on the page if the
# user is a developer
log.warning(u'[Tampering] Attempt to delete developer account (%s)'
% self.request.user)
raise forms.ValidationError("")
class UsernameMixin:
def clean_username(self):
name = self.cleaned_data['username']
slug_validator(name, lower=False,
message=_('Enter a valid username consisting of letters, numbers, '
'underscores or hyphens.'))
if BlacklistedUsername.blocked(name):
raise forms.ValidationError(_('This username cannot be used.'))
# FIXME: Bug 858452. Remove this check when collation of the username
# column is changed to case insensitive.
if (UserProfile.objects.exclude(id=self.instance.id)
.filter(username__iexact=name).exists()):
raise forms.ValidationError(_('This username is already in use.'))
return name
class UserRegisterForm(happyforms.ModelForm, UsernameMixin, PasswordMixin):
"""
For registering users. We're not building off
d.contrib.auth.forms.UserCreationForm because it doesn't do a lot of the
details here, so we'd have to rewrite most of it anyway.
"""
username = forms.CharField(max_length=50)
display_name = forms.CharField(label=_lazy(u'Display Name'), max_length=50,
required=False)
location = forms.CharField(label=_lazy(u'Location'), max_length=100,
required=False)
occupation = forms.CharField(label=_lazy(u'Occupation'), max_length=100,
required=False)
password = forms.CharField(max_length=255,
min_length=PasswordMixin.min_length,
error_messages=PasswordMixin.error_msg,
widget=PasswordMixin.widget(render_value=False))
password2 = forms.CharField(max_length=255,
widget=forms.PasswordInput(render_value=False))
recaptcha = captcha.fields.ReCaptchaField()
homepage = forms.URLField(label=_lazy(u'Homepage'), required=False)
class Meta:
model = UserProfile
fields = ('username', 'display_name', 'location', 'occupation',
'password', 'password2', 'recaptcha', 'homepage', 'email',
'emailhidden')
def __init__(self, *args, **kwargs):
super(UserRegisterForm, self).__init__(*args, **kwargs)
if not settings.RECAPTCHA_PRIVATE_KEY:
del self.fields['recaptcha']
errors = {'invalid': _('This URL has an invalid format. '
'Valid URLs look like '
'http://example.com/my_page.')}
self.fields['homepage'].error_messages = errors
def clean_email(self):
d = self.cleaned_data['email'].split('@')[-1]
if BlacklistedEmailDomain.blocked(d):
raise forms.ValidationError(_('Please use an email address from a '
'different provider to complete '
'your registration.'))
return self.cleaned_data['email']
def clean(self):
super(UserRegisterForm, self).clean()
data = self.cleaned_data
# Passwords
p1 = data.get('password')
p2 = data.get('password2')
# If p1 is invalid because its blocked, this message is non sensical.
if p1 and p1 != p2:
msg = _('The passwords did not match.')
self._errors['password2'] = ErrorList([msg])
if p2:
del data['password2']
return data
class UserEditForm(UserRegisterForm, PasswordMixin):
oldpassword = forms.CharField(max_length=255, required=False,
widget=forms.PasswordInput(render_value=False))
password = forms.CharField(max_length=255, required=False,
min_length=PasswordMixin.min_length,
error_messages=PasswordMixin.error_msg,
widget=PasswordMixin.widget(render_value=False))
password2 = forms.CharField(max_length=255, required=False,
widget=forms.PasswordInput(render_value=False))
photo = forms.FileField(label=_lazy(u'Profile Photo'), required=False)
notifications = forms.MultipleChoiceField(
choices=[],
widget=NotificationsSelectMultiple,
initial=email.NOTIFICATIONS_DEFAULT,
required=False)
def __init__(self, *args, **kwargs):
self.request = kwargs.pop('request', None)
super(UserEditForm, self).__init__(*args, **kwargs)
if self.instance:
default = dict((i, n.default_checked) for i, n
in email.NOTIFICATIONS_BY_ID.items())
user = dict((n.notification_id, n.enabled) for n
in self.instance.notifications.all())
default.update(user)
# Add choices to Notification.
choices = email.NOTIFICATIONS_CHOICES
if not self.instance.is_developer:
choices = email.NOTIFICATIONS_CHOICES_NOT_DEV
# Append a "NEW" message to new notification options.
saved = self.instance.notifications.values_list('notification_id',
flat=True)
self.choices_status = {}
for idx, label in choices:
self.choices_status[idx] = idx not in saved
self.fields['notifications'].choices = choices
self.fields['notifications'].initial = [i for i, v
in default.items() if v]
self.fields['notifications'].widget.form_instance = self
# TODO: We should inherit from a base form not UserRegisterForm
if self.fields.get('recaptcha'):
del self.fields['recaptcha']
class Meta:
model = UserProfile
exclude = ('password', 'picture_type')
def clean(self):
data = self.cleaned_data
amouser = self.request.user.get_profile()
# Passwords
p1 = data.get("password")
p2 = data.get("password2")
if p1 or p2:
if not amouser.check_password(data["oldpassword"]):
msg = _("Wrong password entered!")
self._errors["oldpassword"] = ErrorList([msg])
del data["oldpassword"]
super(UserEditForm, self).clean()
return data
def clean_photo(self):
photo = self.cleaned_data['photo']
if not photo:
return
if photo.content_type not in ('image/png', 'image/jpeg'):
raise forms.ValidationError(
_('Images must be either PNG or JPG.'))
if photo.size > settings.MAX_PHOTO_UPLOAD_SIZE:
raise forms.ValidationError(
_('Please use images smaller than %dMB.' %
(settings.MAX_PHOTO_UPLOAD_SIZE / 1024 / 1024 - 1)))
return photo
def save(self, log_for_developer=True):
u = super(UserEditForm, self).save(commit=False)
data = self.cleaned_data
photo = data['photo']
if photo:
u.picture_type = 'image/png'
tmp_destination = u.picture_path + '__unconverted'
with storage.open(tmp_destination, 'wb') as fh:
for chunk in photo.chunks():
fh.write(chunk)
tasks.resize_photo.delay(tmp_destination, u.picture_path,
set_modified_on=[u])
if data['password']:
u.set_password(data['password'])
log_cef('Password Changed', 5, self.request, username=u.username,
signature='PASSWORDCHANGED', msg='User changed password')
if log_for_developer:
amo.log(amo.LOG.CHANGE_PASSWORD)
log.info(u'User (%s) changed their password' % u)
for (i, n) in email.NOTIFICATIONS_BY_ID.items():
enabled = n.mandatory or (str(i) in data['notifications'])
UserNotification.update_or_create(user=u, notification_id=i,
update={'enabled': enabled})
log.debug(u'User (%s) updated their profile' % u)
u.save()
return u
class BaseAdminUserEditForm(object):
def changed_fields(self):
"""Returns changed_data ignoring these fields."""
return (set(self.changed_data) -
set(['admin_log', 'notifications', 'photo',
'password', 'password2', 'oldpassword']))
def changes(self):
"""A dictionary of changed fields, old, new. Hides password."""
details = dict([(k, (self.initial[k], self.cleaned_data[k]))
for k in self.changed_fields()])
if 'password' in self.changed_data:
details['password'] = ['****', '****']
return details
def clean_anonymize(self):
if (self.cleaned_data['anonymize'] and
self.changed_fields() != set(['anonymize'])):
raise forms.ValidationError(_('To anonymize, enter a reason for'
' the change but do not change any'
' other field.'))
return self.cleaned_data['anonymize']
class AdminUserEditForm(BaseAdminUserEditForm, UserEditForm):
"""This is the form used by admins to edit users' info."""
admin_log = forms.CharField(required=True, label='Reason for change',
widget=forms.Textarea(attrs={'rows': 4}))
confirmationcode = forms.CharField(required=False, max_length=255,
label='Confirmation code')
notes = forms.CharField(required=False, label='Notes',
widget=forms.Textarea(attrs={'rows': 4}))
anonymize = forms.BooleanField(required=False)
def save(self, *args, **kw):
profile = super(AdminUserEditForm, self).save(log_for_developer=False)
if self.cleaned_data['anonymize']:
amo.log(amo.LOG.ADMIN_USER_ANONYMIZED, self.instance,
self.cleaned_data['admin_log'])
profile.anonymize() # This also logs
else:
amo.log(amo.LOG.ADMIN_USER_EDITED, self.instance,
self.cleaned_data['admin_log'], details=self.changes())
log.info('Admin edit user: %s changed fields: %s' %
(self.instance, self.changed_fields()))
if 'password' in self.changes():
log_cef('Password Changed', 5, self.request,
username=self.instance.username,
signature='PASSWORDRESET',
msg='Admin requested password reset',
cs1=self.request.amo_user.username,
cs1Label='AdminName')
return profile
class BlacklistedUsernameAddForm(forms.Form):
"""Form for adding blacklisted username in bulk fashion."""
usernames = forms.CharField(widget=forms.Textarea(
attrs={'cols': 40, 'rows': 16}))
def clean(self):
super(BlacklistedUsernameAddForm, self).clean()
data = self.cleaned_data
if 'usernames' in data:
data['usernames'] = os.linesep.join(
[s.strip() for s in data['usernames'].splitlines()
if s.strip()])
if 'usernames' not in data or data['usernames'] == '':
msg = 'Please enter at least one username to blacklist.'
self._errors['usernames'] = ErrorList([msg])
return data
class BlacklistedEmailDomainAddForm(forms.Form):
"""Form for adding blacklisted user e-mail domains in bulk fashion."""
domains = forms.CharField(
widget=forms.Textarea(attrs={'cols': 40, 'rows': 16}))
def clean(self):
super(BlacklistedEmailDomainAddForm, self).clean()
data = self.cleaned_data
if 'domains' in data:
l = filter(None, [s.strip() for s in data['domains'].splitlines()])
data['domains'] = os.linesep.join(l)
if not data.get('domains', ''):
msg = 'Please enter at least one e-mail domain to blacklist.'
self._errors['domains'] = ErrorList([msg])
return data
class ContactForm(happyforms.Form):
text = forms.CharField(widget=forms.Textarea())
class RemoveForm(happyforms.Form):
remove = forms.BooleanField()
|
{
"content_hash": "1eac07f0902b79a62fa3493e00668fe2",
"timestamp": "",
"source": "github",
"line_count": 466,
"max_line_length": 79,
"avg_line_length": 40.122317596566525,
"alnum_prop": 0.5792373107985238,
"repo_name": "spasovski/zamboni",
"id": "6a643d0f0d6978e48b5daf0b48476bf7684a6f04",
"size": "18697",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "apps/users/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "CSS",
"bytes": "885279"
},
{
"name": "JavaScript",
"bytes": "1677601"
},
{
"name": "Puppet",
"bytes": "13808"
},
{
"name": "Python",
"bytes": "6279560"
},
{
"name": "Shell",
"bytes": "19774"
}
],
"symlink_target": ""
}
|
"""
.. _tut-report:
===============================
Getting started with mne.Report
===============================
:class:`mne.Report` is a way to create interactive HTML summaries of your data.
These reports can show many different visualizations for one or multiple
participants. A common use case is creating diagnostic summaries to check data
quality at different stages in the processing pipeline. The report can show
things like plots of data before and after each preprocessing step, epoch
rejection statistics, MRI slices with overlaid BEM shells, all the way up to
plots of estimated cortical activity.
Compared to a Jupyter notebook, :class:`mne.Report` is easier to deploy (the
HTML pages it generates are self-contained and do not require a running Python
environment) but less flexible (you can't change code and re-run something
directly within the browser). This tutorial covers the basics of building a
:class:`~mne.Report`. As usual, we'll start by importing the modules and data
we need:
"""
# %%
from pathlib import Path
import tempfile
import numpy as np
import scipy.ndimage
import matplotlib.pyplot as plt
import mne
data_path = Path(mne.datasets.sample.data_path(verbose=False))
sample_dir = data_path / 'MEG' / 'sample'
subjects_dir = data_path / 'subjects'
# %%
# Before getting started with :class:`mne.Report`, make sure the files you want
# to render follow the filename conventions defined by MNE:
#
# .. cssclass:: table-bordered
# .. rst-class:: midvalign
#
# =================================== =========================================
# Data object Filename convention (ends with)
# =================================== =========================================
# `~mne.io.Raw` ``-raw.fif(.gz)``, ``-raw_sss.fif(.gz)``,
# ``-raw_tsss.fif(.gz)``,
# ``_meg.fif(.gz)``, ``_eeg.fif(.gz)``,
# ``_ieeg.fif(.gz)``
# events ``-eve.fif(.gz)``
# `~mne.Epochs` ``-epo.fif(.gz)``
# `~mne.Evoked` ``-ave.fif(.gz)``
# `~mne.Covariance` ``-cov.fif(.gz)``
# `~mne.Projection` ``-proj.fif(.gz)``
# `~mne.transforms.Transform` ``-trans.fif(.gz)``
# `~mne.Forward` ``-fwd.fif(.gz)``
# `~mne.minimum_norm.InverseOperator` ``-inv.fif(.gz)``
# =================================== =========================================
#
# Alternatively, the dash ``-`` in the filename may be replaced with an
# underscore ``_``.
#
# The basic process for creating an HTML report is to instantiate the
# :class:`~mne.Report` class and then use one or more of its many methods to
# add content, one element at a time.
#
# You may also use the :meth:`~mne.Report.parse_folder` method to select
# particular files to include in the report. But more on that later.
#
# .. sidebar: Viewing the report
#
# On successful creation of the report, the :meth:`~mne.Report.save` method
# will open the HTML in a new tab in your browser. To disable this, use the
# ``open_browser=False`` parameter of :meth:`~mne.Report.save`.
#
# Adding `~mne.io.Raw` data
# ^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Raw data can be added via the :meth:`mne.Report.add_raw` method. It can
# operate with a path to a raw file and `~mne.io.Raw` objects, and will
# produce – among other output – a slider that allows you to scrub through 10
# equally-spaced 1-second segments of the data:
#
# .. warning::
# In the following example, we crop the raw data to 60 seconds merely to
# speed up processing; this is not usually recommended!
raw_path = sample_dir / 'sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw(raw_path)
raw.pick_types(eeg=True, eog=True, stim=True).crop(tmax=60).load_data()
report = mne.Report(title='Raw example')
# This method also accepts a path, e.g., raw=raw_path
report.add_raw(raw=raw, title='Raw', psd=False) # omit PSD plot
report.save('report_raw.html', overwrite=True)
# %%
# Adding events
# ^^^^^^^^^^^^^
#
# Events can be added via :meth:`mne.Report.add_events`. You also need to
# supply the sampling frequency used during the recording; this information
# is used to generate a meaningful time axis.
events_path = sample_dir / 'sample_audvis_filt-0-40_raw-eve.fif'
events = mne.find_events(raw=raw)
sfreq = raw.info['sfreq']
report = mne.Report(title='Events example')
report.add_events(events=events_path, title='Events from Path', sfreq=sfreq)
report.add_events(events=events, title='Events from "events"', sfreq=sfreq)
report.save('report_events.html', overwrite=True)
# %%
# Adding `~mne.Epochs`
# ^^^^^^^^^^^^^^^^^^^^
#
# Epochs can be added via :meth:`mne.Report.add_epochs`. Note that although
# this method accepts a path to an epochs file too, in the following example
# we only add epochs that we create on the fly from raw data. To demonstrate
# the representation of epochs metadata, we'll add some of that too.
event_id = {
'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4, 'face': 5, 'buttonpress': 32
}
metadata, _, _ = mne.epochs.make_metadata(
events=events,
event_id=event_id,
tmin=-0.2,
tmax=0.5,
sfreq=raw.info['sfreq']
)
epochs = mne.Epochs(
raw=raw, events=events, event_id=event_id, metadata=metadata
)
report = mne.Report(title='Epochs example')
report.add_epochs(epochs=epochs, title='Epochs from "epochs"')
report.save('report_epochs.html', overwrite=True)
# %%
# Adding `~mne.Evoked`
# ^^^^^^^^^^^^^^^^^^^^
#
# Evoked data can be added via :meth:`mne.Report.add_evokeds`. By default, the
# ``Evoked.comment`` attribute of each evoked will be used as a title. We can
# specify custom titles via the ``titles`` parameter. Again, this method
# also accepts the path to an evoked file stored on disk; in the following
# example, however, we load the evokeds manually first, since we only want to
# add a subset of them to the report. The evokeds are not baseline-corrected,
# so we apply baseline correction, too. Lastly, by providing an (optional)
# noise covariance, we can add plots evokeds that were "whitened" using this
# covariance matrix.
#
# By default, this method will produce snapshots at 21 equally-spaced time
# points (or fewer, if the data contains fewer time points). We can adjust this
# via the ``n_time_points`` parameter.
evoked_path = sample_dir / 'sample_audvis-ave.fif'
cov_path = sample_dir / 'sample_audvis-cov.fif'
evokeds = mne.read_evokeds(evoked_path, baseline=(None, 0))
evokeds_subset = evokeds[:2] # The first two
for evoked in evokeds_subset:
evoked.pick('eeg') # just for speed of plotting
report = mne.Report(title='Evoked example')
report.add_evokeds(
evokeds=evokeds_subset,
titles=['evoked 1', # Manually specify titles
'evoked 2'],
noise_cov=cov_path,
n_time_points=5
)
report.save('report_evoked.html', overwrite=True)
# %%
# Adding `~mne.Covariance`
# ^^^^^^^^^^^^^^^^^^^^^^^^
#
# (Noise) covariance objects can be added via
# :meth:`mne.Report.add_covariance`. The method accepts `~mne.Covariance`
# objects and the path to a file on disk. It also expects us to pass an
# `~mne.Info` object or the path to a file to read the measurement info from,
# as well as a title.
cov_path = sample_dir / 'sample_audvis-cov.fif'
report = mne.Report(title='Covariance example')
report.add_covariance(cov=cov_path, info=raw_path, title='Covariance')
report.save('report_cov.html', overwrite=True)
# %%
# Adding `~mne.Projection` vectors
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# `~mne.Projection` vectors can be added via
# :meth:`mne.Report.add_projs`. The method requires an `~mne.Info` object
# (or the path to one) and a title. Projectors found in the `~mne.Info` will
# be visualized. You may also supply a list of `~mne.Projection` objects or
# a path to projectors stored on disk. In this case, the channel information
# is read from the `~mne.Info`, but projectors potentially included will be
# ignored; instead, only the explicitly passed projectors will be plotted.
ecg_proj_path = sample_dir / 'sample_audvis_ecg-proj.fif'
eog_proj_path = sample_dir / 'sample_audvis_eog-proj.fif'
report = mne.Report(title='Projectors example')
report.add_projs(info=raw_path, title='Projs from info')
report.add_projs(info=raw_path, projs=ecg_proj_path,
title='ECG projs from path')
report.add_projs(info=raw_path, projs=eog_proj_path,
title='EOG projs from path')
report.save('report_projs.html', overwrite=True)
# %%
# Adding `~mne.preprocessing.ICA`
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# `~mne.preprocessing.ICA` objects can be added via
# :meth:`mne.Report.add_ica`. Aside from the parameters ``ica`` (that accepts
# an `~mne.preprocessing.ICA` instance or a path to an ICA object stored on
# disk) and the ``title``, there is a third required parameter, ``inst``.
# ``inst`` is used to specify a `~mne.io.Raw` or `~mne.Epochs` object for
# producing ICA property plots and overlay plots demonstrating
# the effects of ICA cleaning. If, instead, you only want to generate ICA
# component topography plots, explicitly pass ``inst=None``.
#
# .. note:: :meth:`mne.Report.add_ica` only works with fitted ICAs.
#
# You can optionally specify for which components to produce topography and
# properties plots by passing ``picks``. By default, all components will be
# shown. It is also possible to pass evoked signals based on ECG and EOG events
# via ``ecg_evoked`` and ``eog_evoked``. This allows you directly see the
# effects of ICA component removal on these artifactual signals.
# Artifact detection scores produced by
# :meth:`~mne.preprocessing.ICA.find_bads_ecg`
# and :meth:`~mne.preprocessing.ICA.find_bads_eog` can be passed via the
# ``ecg_scores`` and ``eog_scores`` parameters, respectively, producing
# visualizations of the scores for each ICA component.
#
# Lastly, by passing ``n_jobs``, you may largely speed up the generation of
# the properties plots by enabling parallel execution.
#
# .. warning::
# In the following example, we request a small number of ICA components
# to estimate, set the threshold for assuming ICA convergence to a very
# liberal value, and only visualize 2 of the components. All of this is
# done to largely reduce the processing time of this tutorial, and is
# usually **not** recommended for an actual data analysis.
ica = mne.preprocessing.ICA(
n_components=5, # fit 5 ICA components
fit_params=dict(tol=0.01) # assume very early on that ICA has converged
)
ica.fit(inst=raw)
# create epochs based on EOG events, find EOG artifacts in the data via pattern
# matching, and exclude the EOG-related ICA components
eog_epochs = mne.preprocessing.create_eog_epochs(raw=raw)
eog_components, eog_scores = ica.find_bads_eog(
inst=eog_epochs,
ch_name='EEG 001', # a channel close to the eye
threshold=1 # lower than the default threshold
)
ica.exclude = eog_components
report = mne.Report(title='ICA example')
report.add_ica(
ica=ica,
title='ICA cleaning',
picks=[0, 1], # only plot the first two components
inst=raw,
eog_evoked=eog_epochs.average(),
eog_scores=eog_scores,
n_jobs=None # could be increased!
)
report.save('report_ica.html', overwrite=True)
# %%
# Adding MRI with BEM
# ^^^^^^^^^^^^^^^^^^^
#
# MRI slices with superimposed traces of the boundary element model (BEM)
# surfaces can be added via :meth:`mne.Report.add_bem`. All you need to pass is
# the FreeSurfer subject name and subjects directory, and a title. To reduce
# the resulting file size, you may pass the ``decim`` parameter to only include
# every n-th volume slice, and ``width`` to specify the width of the resulting
# figures in pixels.
report = mne.Report(title='BEM example')
report.add_bem(
subject='sample', subjects_dir=subjects_dir, title='MRI & BEM',
decim=20,
width=256
)
report.save('report_mri_and_bem.html', overwrite=True)
# %%
# Adding coregistration
# ^^^^^^^^^^^^^^^^^^^^^
#
# The sensor alignment (``head -> mri`` transformation obtained by
# "coregistration") can be visualized via :meth:`mne.Report.add_trans`. The
# method expects the transformation either as a `~mne.transforms.Transform`
# object or as a path to a ``trans.fif`` file, the FreeSurfer subject name and
# subjects directory, and a title. The ``alpha`` parameter can be used to
# control the transparency of the head, where a value of 1 means fully opaque.
trans_path = sample_dir / 'sample_audvis_raw-trans.fif'
report = mne.Report(title='Coregistration example')
report.add_trans(
trans=trans_path, info=raw_path, subject='sample',
subjects_dir=subjects_dir, alpha=1.0, title='Coregistration'
)
report.save('report_coregistration.html', overwrite=True)
# %%
# Adding a `~mne.Forward` solution
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Forward solutions ("leadfields") can be added by passing a `~mne.Forward`
# object or the path to a forward solution stored on disk to
# meth:`mne.Report.add_forward`.
fwd_path = sample_dir / 'sample_audvis-meg-oct-6-fwd.fif'
report = mne.Report(title='Forward solution example')
report.add_forward(forward=fwd_path, title='Forward solution')
report.save('report_forward_sol.html', overwrite=True)
# %%
# Adding an `~mne.minimum_norm.InverseOperator`
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# An inverse operator can be added via :meth:`mne.Report.add_inverse_operator`.
# The method expects an `~mne.minimum_norm.InverseOperator` object or a path to
# one stored on disk, and a title.
inverse_op_path = sample_dir / 'sample_audvis-meg-oct-6-meg-inv.fif'
report = mne.Report(title='Inverse operator example')
report.add_inverse_operator(
inverse_operator=inverse_op_path, title='Inverse operator'
)
report.save('report_inverse_op.html', overwrite=True)
# %%
# Adding a `~mne.SourceEstimate`
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# An inverse solution (also called source estimate or source time course, STC)
# can be added via :meth:`mne.Report.add_stc`. The
# method expects an `~mne.SourceEstimate`, the corresponding FreeSurfer subject
# name and subjects directory, and a title. By default, it will produce
# snapshots at 51 equally-spaced time points (or fewer, if the data contains
# fewer time points). We can adjust this via the ``n_time_points`` parameter.
stc_path = sample_dir / 'sample_audvis-meg'
report = mne.Report(title='Source estimate example')
report.add_stc(
stc=stc_path, subject='sample', subjects_dir=subjects_dir,
title='Source estimate', n_time_points=2 # few for speed
)
report.save('report_inverse_sol.html', overwrite=True)
# %%
# Adding source code (e.g., a Python script)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# It is possible to add code or scripts (e.g., the scripts you used for
# analysis) to the report via :meth:`mne.Report.add_code`. The code blocks will
# be automatically syntax-highlighted. You may pass a string with the
# respective code snippet, or the path to a file. If you pass a path, it
# **must** be a `pathlib.Path` object (and not a string), otherwise it will be
# treated as a code literal.
#
# Optionally, you can specify which programming language to assume for syntax
# highlighting by passing the ``language`` parameter. By default, we'll assume
# the provided code is Python.
mne_init_py_path = Path(mne.__file__) # __init__.py in the MNE-Python root
mne_init_py_content = mne_init_py_path.read_text(encoding='utf-8')
report = mne.Report(title='Code example')
report.add_code(
code=mne_init_py_path,
title="Code from Path"
)
report.add_code(
code=mne_init_py_content,
title="Code from string"
)
report.save('report_code.html', overwrite=True)
# %%
# Adding custom figures
# ^^^^^^^^^^^^^^^^^^^^^
#
# Custom Matplotlib figures can be added via :meth:`~mne.Report.add_figure`.
# Required parameters are the figure and a title. Optionally, may add a caption
# to appear below the figure. You can also specify the image format of the
# image file that will be generated from the figure, so it can be embedded in
# the HTML report.
x = np.linspace(start=0, stop=10, num=100)
y = x**2
fig, ax = plt.subplots()
ax.plot(x, y, ls='--', lw=2, color='blue', label='my function')
ax.set_xlabel('x')
ax.set_ylabel('f(x)')
ax.legend()
report = mne.Report(title='Figure example')
report.add_figure(
fig=fig, title='A custom figure',
caption='A blue dashed line reaches up into the sky …',
image_format='PNG'
)
report.save('report_custom_figure.html', overwrite=True)
plt.close(fig)
# %%
# Multiple figures can be grouped into a single section via the ``section``
# parameter.
fig_1, ax_1 = plt.subplots()
ax_1.plot([1, 2, 3])
fig_2, ax_2 = plt.subplots()
ax_2.plot([3, 2, 1])
section = 'Section example'
report = mne.Report(title='Figure section example')
report.add_figure(
fig=fig_1,
title='Figure 1',
section=section,
tags='fig-1'
)
report.add_figure(
fig=fig_2,
title='Figure 2',
section=section,
tags='fig-2'
)
report.save('report_custom_figure_sections.html', overwrite=True)
plt.close(fig_1)
plt.close(fig_2)
# %%
# The :meth:`mne.Report.add_figure` method can also add multiple figures at
# once. In this case, a slider will appear, allowing users to intuitively
# browse the figures. To make this work, you need to provide a collection o
# figures, a title, and optionally a collection of captions.
#
# In the following example, we will read the MNE logo as a Matplotlib figure
# and rotate it with different angles. Each rotated figure and its respective
# caption will be added to a list, which is then used to create the slider.
mne_logo_path = Path(mne.__file__).parent / 'icons' / 'mne_icon-cropped.png'
fig_array = plt.imread(mne_logo_path)
rotation_angles = np.linspace(start=0, stop=360, num=17)
figs = []
captions = []
for angle in rotation_angles:
# Rotate and remove some rounding errors to avoid Matplotlib warnings
fig_array_rotated = scipy.ndimage.rotate(input=fig_array, angle=angle)
fig_array_rotated = fig_array_rotated.clip(min=0, max=1)
# Create the figure
fig, ax = plt.subplots()
ax.imshow(fig_array_rotated)
ax.set_axis_off()
# Store figure and caption
figs.append(fig)
captions.append(f'Rotation angle: {round(angle, 1)}°')
# can also be a MNEQtBrowser instance
figs.append(raw.plot())
captions.append('... plus a raw data plot')
report = mne.Report(title='Multiple figures example')
report.add_figure(fig=figs, title='Fun with figures! 🥳', caption=captions)
report.save('report_custom_figures.html', overwrite=True)
for fig in figs[:-1]:
plt.close(fig)
figs[-1].close()
del figs
# %%
# Adding image files
# ^^^^^^^^^^^^^^^^^^
#
# Existing images (e.g., photos, screenshots, sketches etc.) can be added
# to the report via :meth:`mne.Report.add_image`. Supported image formats
# include JPEG, PNG, GIF, and SVG (and possibly others). Like with Matplotlib
# figures, you can specify a caption to appear below the image.
report = mne.Report(title='Image example')
report.add_image(
image=mne_logo_path, title='MNE',
caption='Powered by 🧠 🧠 🧠 around the world!'
)
report.save('report_custom_image.html', overwrite=True)
# %%
# Working with tags
# ^^^^^^^^^^^^^^^^^
#
# Each ``add_*`` method accepts a keyword parameter ``tags``, which can be
# used to pass one or more tags to associate with the respective content
# elements. By default, each ``add_*`` method adds a tag describing the data
# type, e.g., ``evoked`` or ``source-estimate``. When viewing the HTML report,
# the ``Filter by tags`` dropdown menu can be used to interactively show or
# hide content with specific tags. This allows you e.g. to only view
# ``evoked`` or ``participant-001`` data, should you have added those tags.
# Visible tags will appear with blue, and hidden tags with gray background
# color.
#
# To toggle the visibility of **all** tags, use the respective checkbox in the
# ``Filter by tags`` dropdown menu, or press :kbd:`T`.
report = mne.Report(title='Tags example')
report.add_image(
image=mne_logo_path,
title='MNE Logo',
tags=('image', 'mne', 'logo', 'open-source')
)
report.save('report_tags.html', overwrite=True)
# %%
# Editing a saved report
# ^^^^^^^^^^^^^^^^^^^^^^
#
# Saving to HTML is a write-only operation, meaning that we cannot read an
# ``.html`` file back as a :class:`~mne.Report` object. In order to be able
# to edit a report once it's no longer in-memory in an active Python session,
# save it as an HDF5 file instead of HTML:
report = mne.Report(title='Saved report example', verbose=True)
report.add_image(image=mne_logo_path, title='MNE 1')
report.save('report_partial.hdf5', overwrite=True)
# %%
# The saved report can be read back and modified or amended. This allows the
# possibility to e.g. run multiple scripts in a processing pipeline, where each
# script adds new content to an existing report.
report_from_disk = mne.open_report('report_partial.hdf5')
report_from_disk.add_image(image=mne_logo_path, title='MNE 2')
report_from_disk.save('report_partial.hdf5', overwrite=True)
# %%
# To make this even easier, :class:`mne.Report` can be used as a
# context manager (note the ``with`` statement)`):
with mne.open_report('report_partial.hdf5') as report:
report.add_image(image=mne_logo_path, title='MNE 3')
report.save('report_final.html', overwrite=True)
# %%
# With the context manager, the updated report is also automatically saved
# back to :file:`report.h5` upon leaving the block.
#
# Adding an entire folder of files
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# We also provide a way to add an entire **folder** of files to the report at
# once, without having to invoke the individual ``add_*`` methods outlined
# above for each file. This approach, while convenient, provides less
# flexibility with respect to content ordering, tags, titles, etc.
#
# For our first example, we'll generate a barebones report for all the
# :file:`.fif` files containing raw data in the sample dataset, by passing the
# pattern ``*raw.fif`` to :meth:`~mne.Report.parse_folder`. We'll omit the
# ``subject`` and ``subjects_dir`` parameters from the :class:`~mne.Report`
# constructor, but we'll also pass ``render_bem=False`` to the
# :meth:`~mne.Report.parse_folder` method — otherwise we would get a warning
# about not being able to render MRI and ``trans`` files without knowing the
# subject. To save some processing time in this tutorial, we're also going to
# disable rendering of the butterfly plots for the `~mne.io.Raw` data by
# passing ``raw_butterfly=False``.
#
# Which files are included depends on both the ``pattern`` parameter passed to
# :meth:`~mne.Report.parse_folder` and also the ``subject`` and
# ``subjects_dir`` parameters provided to the :class:`~mne.Report` constructor.
report = mne.Report(title='parse_folder example')
report.parse_folder(
data_path=data_path, pattern='*raw.fif', render_bem=False,
raw_butterfly=False
)
report.save('report_parse_folder_basic.html', overwrite=True)
# %%
# By default, the power spectral density and SSP projectors of the
# :class:`~mne.io.Raw` files are not shown to speed up report generation. You
# can add them by passing ``raw_psd=True`` and ``projs=True`` to the
# :class:`~mne.Report` constructor. Like in the previous example, we're going
# to omit the butterfly plots by passing ``raw_butterfly=False``. Lastly, let's
# also refine our pattern to select only the filtered raw recording (omitting
# the unfiltered data and the empty-room noise recordings).
pattern = 'sample_audvis_filt-0-40_raw.fif'
report = mne.Report(title='parse_folder example 2', raw_psd=True, projs=True)
report.parse_folder(
data_path=data_path, pattern=pattern, render_bem=False, raw_butterfly=False
)
report.save('report_parse_folder_raw_psd_projs.html', overwrite=True)
# %%
# This time we'll pass a specific ``subject`` and ``subjects_dir`` (even though
# there's only one subject in the sample dataset) and remove our
# ``render_bem=False`` parameter so we can see the MRI slices, with BEM
# contours overlaid on top if available. Since this is computationally
# expensive, we'll also pass the ``mri_decim`` parameter for the benefit of our
# documentation servers, and skip processing the :file:`.fif` files.
report = mne.Report(
title='parse_folder example 3', subject='sample', subjects_dir=subjects_dir
)
report.parse_folder(data_path=data_path, pattern='', mri_decim=25)
report.save('report_parse_folder_mri_bem.html', overwrite=True)
# %%
# Now let's look at how :class:`~mne.Report` handles :class:`~mne.Evoked`
# data (we will skip the MRIs to save computation time).
#
# The MNE sample dataset we're using in this example has **not** been
# baseline-corrected; so let's apply baseline correction this now for the
# report!
#
# To request baseline correction, pass a ``baseline`` argument to
# `~mne.Report`, which should be a tuple with the starting and ending time of
# the baseline period. For more details, see the documentation on
# `~mne.Evoked.apply_baseline`. Here, we will apply baseline correction for a
# baseline period from the beginning of the time interval to time point zero.
#
# Lastly, we want to render the "whitened" evoked data, too. Whitening
# requires us to specify the path to a covariance matrix file via the
# ``cov_fname`` parameter of `~mne.Report`.
#
# Now, let's put all of this together! Here we use a temporary directory
# for speed so we can render a single Evoked instance, using just EEG
# channels.
baseline = (None, 0)
cov_fname = sample_dir / 'sample_audvis-cov.fif'
pattern = 'sample_audvis-no-filter-ave.fif'
evoked = mne.read_evokeds(sample_dir / pattern)[0]
report = mne.Report(
title='parse_folder example 4', baseline=baseline, cov_fname=cov_fname
)
with tempfile.TemporaryDirectory() as path:
evoked.save(Path(path) / pattern)
report.parse_folder(
path, pattern=pattern, render_bem=False, n_time_points_evokeds=5
)
report.save('report_parse_folder_evoked.html', overwrite=True)
# %%
# If you want to actually *view* the noise covariance in the report, make sure
# it is captured by the pattern passed to :meth:`~mne.Report.parse_folder`, and
# also include a source for an :class:`~mne.Info` object (any of the
# :class:`~mne.io.Raw`, :class:`~mne.Epochs` or :class:`~mne.Evoked`
# :file:`.fif` files that contain subject data also contain the measurement
# information and should work):
pattern = 'sample_audvis-cov.fif'
info_fname = sample_dir / 'sample_audvis-ave.fif'
report = mne.Report(title='parse_folder example 5', info_fname=info_fname)
report.parse_folder(
data_path, pattern=pattern, render_bem=False, n_time_points_evokeds=5
)
report.save('report_parse_folder_cov.html', overwrite=True)
# %%
#
# Adding custom HTML (e.g., a description text)
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# The :meth:`~mne.Report.add_html` method allows you to add custom HTML to
# your report. This feature can be very convenient to add short descriptions,
# lists, or reminders to your report (among many other things you can think
# of encoding in HTML).
report = mne.Report(title='Report on hypothesis 1')
my_html = """
<p>We have the following hypothesis:</p>
<ol>
<li>There is a difference between images showing man-made vs. natural
environments</li>
<li>This difference manifests itself most strongly in the amplitude of the
N1 ERP component</li>
</ol>
<p>Below we show several plots and tests of the data.</p>
"""
report.add_html(title='Hypothesis', html=my_html)
report.save('report_add_html.html', overwrite=True)
|
{
"content_hash": "f8382e25719c2e46e1481179454e4835",
"timestamp": "",
"source": "github",
"line_count": 713,
"max_line_length": 79,
"avg_line_length": 38.85694249649369,
"alnum_prop": 0.6921133369427901,
"repo_name": "kingjr/mne-python",
"id": "fe373e535bb53d8cfd55e22a0aad5ae954219e2a",
"size": "27750",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tutorials/intro/70_report.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Csound Document",
"bytes": "24999"
},
{
"name": "JavaScript",
"bytes": "8008"
},
{
"name": "Jinja",
"bytes": "13067"
},
{
"name": "Makefile",
"bytes": "4528"
},
{
"name": "Python",
"bytes": "10062156"
},
{
"name": "Sass",
"bytes": "257"
},
{
"name": "Shell",
"bytes": "19906"
}
],
"symlink_target": ""
}
|
"""Creates windows and posix stub files for a given set of signatures.
For libraries that need to be loaded outside of the standard executable startup
path mechanism, stub files need to be generated for the wanted functions. In
windows, this is done via "def" files and the delay load mechanism. On a posix
system, a set of stub functions need to be generated that dispatch to functions
found via dlsym.
This script takes a set of files, where each file is a list of C-style
signatures (one signature per line). The output is either a windows def file,
or a header + implementation file of stubs suitable for use in a posix system.
This script also handles varidiac functions, e.g.
void printf(const char* s, ...);
TODO(hclam): Fix the situation for varidiac functions.
Stub for the above function will be generated and inside the stub function it
is translated to:
void printf(const char* s, ...) {
printf_ptr(s, (void*)arg1);
}
Only one argument from the varidiac arguments is used and it will be used as
type void*.
"""
__author__ = 'ajwong@chromium.org (Albert J. Wong)'
import optparse
import os
import re
import string
import subprocess
import sys
class Error(Exception):
pass
class BadSignatureError(Error):
pass
class SubprocessError(Error):
def __init__(self, message, error_code):
Error.__init__(self)
self.message = message
self.error_code = error_code
def __str__(self):
return 'Failed with code %s: %s' % (self.message, repr(self.error_code))
# Regular expression used to parse function signatures in the input files.
# The regex is built around identifying the "identifier" for the function name.
# We consider the identifier to be the string that follows these constraints:
#
# 1) Starts with [_a-ZA-Z] (C++ spec 2.10).
# 2) Continues with [_a-ZA-Z0-9] (C++ spec 2.10).
# 3) Preceeds an opening parenthesis by 0 or more whitespace chars.
#
# From that, all preceeding characters are considered the return value.
# Trailing characters should have a substring matching the form (.*). That
# is considered the arguments.
SIGNATURE_REGEX = re.compile('(?P<return_type>.+?)'
'(?P<name>[_a-zA-Z][_a-zA-Z0-9]+)\s*'
'\((?P<params>.*?)\)')
# Used for generating C++ identifiers.
INVALID_C_IDENT_CHARS = re.compile('[^_a-zA-Z0-9]')
# Constants defning the supported file types options.
FILE_TYPE_WIN_X86 = 'windows_lib'
FILE_TYPE_WIN_X64 = 'windows_lib_x64'
FILE_TYPE_POSIX_STUB = 'posix_stubs'
FILE_TYPE_WIN_DEF = 'windows_def'
# Template for generating a stub function definition. Includes a forward
# declaration marking the symbol as weak. This template takes the following
# named parameters.
# return_type: The return type.
# export: The macro used to alter the stub's visibility.
# name: The name of the function.
# params: The parameters to the function.
# return_prefix: 'return ' if this function is not void. '' otherwise.
# arg_list: The arguments used to call the stub function.
STUB_FUNCTION_DEFINITION = (
"""extern %(return_type)s %(name)s(%(params)s) __attribute__((weak));
%(return_type)s %(export)s %(name)s(%(params)s) {
%(return_prefix)s%(name)s_ptr(%(arg_list)s);
}""")
# Template for generating a variadic stub function definition with return
# value.
# Includes a forward declaration marking the symbol as weak.
# This template takes the following named parameters.
# return_type: The return type.
# export: The macro used to alter the stub's visibility.
# name: The name of the function.
# params: The parameters to the function.
# arg_list: The arguments used to call the stub function without the
# variadic argument.
# last_named_arg: Name of the last named argument before the variadic
# argument.
VARIADIC_STUB_FUNCTION_DEFINITION = (
"""extern %(return_type)s %(name)s(%(params)s) __attribute__((weak));
%(return_type)s %(export)s %(name)s(%(params)s) {
va_list args___;
va_start(args___, %(last_named_arg)s);
%(return_type)s ret___ = %(name)s_ptr(%(arg_list)s, va_arg(args___, void*));
va_end(args___);
return ret___;
}""")
# Template for generating a variadic stub function definition without
# return value.
# Includes a forward declaration marking the symbol as weak.
# This template takes the following named parameters.
# name: The name of the function.
# params: The parameters to the function.
# export: The macro used to alter the stub's visibility.
# arg_list: The arguments used to call the stub function without the
# variadic argument.
# last_named_arg: Name of the last named argument before the variadic
# argument.
VOID_VARIADIC_STUB_FUNCTION_DEFINITION = (
"""extern void %(name)s(%(params)s) __attribute__((weak));
void %(export)s %(name)s(%(params)s) {
va_list args___;
va_start(args___, %(last_named_arg)s);
%(name)s_ptr(%(arg_list)s, va_arg(args___, void*));
va_end(args___);
}""")
# Template for the preamble for the stub header file with the header guards,
# standard set of includes, and namespace opener. This template takes the
# following named parameters:
# guard_name: The macro to use as the header guard.
# namespace: The namespace for the stub functions.
STUB_HEADER_PREAMBLE = """// This is generated file. Do not modify directly.
#ifndef %(guard_name)s
#define %(guard_name)s
#include <stdarg.h>
#include <map>
#include <string>
#include <vector>
#include "base/logging.h"
namespace %(namespace)s {
"""
# Template for the end of the stub header. This closes the namespace and the
# header guards. This template takes the following named parameters:
# guard_name: The macro to use as the header guard.
# namespace: The namespace for the stub functions.
STUB_HEADER_CLOSER = """} // namespace %(namespace)s
#endif // %(guard_name)s
"""
# The standard includes needed for the stub implementation file. Takes one
# string substition with the path to the associated stub header file.
IMPLEMENTATION_PREAMBLE = """// This is generated file. Do not modify directly.
#include "%s"
#include <stdlib.h> // For NULL.
#include <dlfcn.h> // For dysym, dlopen.
#include <map>
#include <vector>
"""
# The start and end templates for the enum definitions used by the Umbrella
# initializer.
UMBRELLA_ENUM_START = """// Enum and typedef for umbrella initializer.
enum StubModules {
"""
UMBRELLA_ENUM_END = """ kNumStubModules
};
"""
# Start and end of the extern "C" section for the implementation contents.
IMPLEMENTATION_CONTENTS_C_START = """extern "C" {
"""
IMPLEMENTATION_CONTENTS_C_END = """
} // extern "C"
"""
# Templates for the start and end of a namespace. Takes one parameter, the
# namespace name.
NAMESPACE_START = """namespace %s {
"""
NAMESPACE_END = """} // namespace %s
"""
# Comment to include before the section declaring all the function pointers
# used by the stub functions.
FUNCTION_POINTER_SECTION_COMMENT = (
"""// Static pointers that will hold the location of the real function
// implementations after the module has been loaded.
""")
# Template for the module initialization check function. This template
# takes two parameteres: the function name, and the conditional used to
# verify the module's initialization.
MODULE_INITIALIZATION_CHECK_FUNCTION = (
"""// Returns true if all stubs have been properly initialized.
bool %s() {
if (%s) {
return true;
} else {
return false;
}
}
""")
# Template for the line that initialize the stub pointer. This template takes
# the following named parameters:
# name: The name of the function.
# return_type: The return type.
# params: The parameters to the function.
STUB_POINTER_INITIALIZER = """ %(name)s_ptr =
reinterpret_cast<%(return_type)s (*)(%(parameters)s)>(
dlsym(module, "%(name)s"));
VLOG_IF(1, !%(name)s_ptr) << "Couldn't load %(name)s, dlerror() says:\\n"
<< dlerror();
"""
# Template for module initializer function start and end. This template takes
# one parameter which is the initializer function name.
MODULE_INITIALIZE_START = """// Initializes the module stubs.
void %s(void* module) {
"""
MODULE_INITIALIZE_END = """}
"""
# Template for module uninitializer function start and end. This template
# takes one parameter which is the initializer function name.
MODULE_UNINITIALIZE_START = (
"""// Uninitialize the module stubs. Reset pointers to NULL.
void %s() {
""")
MODULE_UNINITIALIZE_END = """}
"""
# Open namespace and add typedef for internal data structures used by the
# umbrella initializer.
UMBRELLA_INITIALIZER_START = """namespace %s {
typedef std::map<StubModules, void*> StubHandleMap;
"""
# Function close DSOs on error and clean up dangling references.
UMBRELLA_INITIALIZER_CLEANUP_FUNCTION = (
"""static void CloseLibraries(StubHandleMap* stub_handles) {
for (StubHandleMap::const_iterator it = stub_handles->begin();
it != stub_handles->end();
++it) {
dlclose(it->second);
}
stub_handles->clear();
}
""")
# Function to initialize each DSO for the given paths.
UMBRELLA_INITIALIZER_INITIALIZE_FUNCTION_START = (
"""bool InitializeStubs(const StubPathMap& path_map) {
StubHandleMap opened_libraries;
for (int i = 0; i < kNumStubModules; ++i) {
StubModules cur_module = static_cast<StubModules>(i);
// If a module is missing, we fail.
StubPathMap::const_iterator it = path_map.find(cur_module);
if (it == path_map.end()) {
CloseLibraries(&opened_libraries);
return false;
}
// Otherwise, attempt to dlopen the library.
const std::vector<std::string>& paths = it->second;
bool module_opened = false;
for (std::vector<std::string>::const_iterator dso_path = paths.begin();
!module_opened && dso_path != paths.end();
++dso_path) {
void* handle = dlopen(dso_path->c_str(), RTLD_LAZY);
if (handle != NULL) {
module_opened = true;
opened_libraries[cur_module] = handle;
} else {
VLOG(1) << "dlopen(" << dso_path->c_str() << ") failed, "
<< "dlerror() says:\\n" << dlerror();
}
}
if (!module_opened) {
CloseLibraries(&opened_libraries);
return false;
}
}
""")
# Template to generate code to check if each module initializer correctly
# completed, and cleanup on failures. This template takes the following
# named parameters.
# conditional: The conditional expression for successful initialization.
# uninitializers: The statements needed to uninitialize the modules.
UMBRELLA_INITIALIZER_CHECK_AND_CLEANUP = (
""" // Check that each module is initialized correctly.
// Close all previously opened libraries on failure.
if (%(conditional)s) {
%(uninitializers)s;
CloseLibraries(&opened_libraries);
return false;
}
return true;
}
""")
# Template for Initialize, Unininitialize, and IsInitialized functions for each
# module. This template takes the following named parameters:
# initialize: Name of the Initialize function.
# uninitialize: Name of the Uninitialize function.
# is_initialized: Name of the IsInitialized function.
MODULE_FUNCTION_PROTOTYPES = """bool %(is_initialized)s();
void %(initialize)s(void* module);
void %(uninitialize)s();
"""
# Template for umbrella initializer declaration and associated datatypes.
UMBRELLA_INITIALIZER_PROTOTYPE = (
"""typedef std::map<StubModules, std::vector<std::string> > StubPathMap;
// Umbrella initializer for all the modules in this stub file.
bool InitializeStubs(const StubPathMap& path_map);
""")
def ExtractModuleName(infile_path):
"""Infers the module name from the input file path.
The input filename is supposed to be in the form "ModuleName.sigs".
This function splits the filename from the extention on that basename of
the path and returns that as the module name.
Args:
infile_path: String holding the path to the input file.
Returns:
The module name as a string.
"""
basename = os.path.basename(infile_path)
# This loop continously removes suffixes of the filename separated by a "."
# character.
while 1:
new_basename = os.path.splitext(basename)[0]
if basename == new_basename:
break
else:
basename = new_basename
return basename
def ParseSignatures(infile):
"""Parses function signatures in the input file.
This function parses a file of signatures into a list of dictionaries that
represent the function signatures in the input file. Each dictionary has
the following keys:
return_type: A string with the return type.
name: A string with the name of the function.
params: A list of each function parameter declaration (type + name)
The format of the input file is one C-style function signature per line, no
trailing semicolon. Empty lines are allowed. An empty line is a line that
consists purely of whitespace. Lines that begin with a # or // are considered
comment lines and are ignored.
We assume that "int foo(void)" is the same as "int foo()", which is not
true in C where "int foo()" is equivalent to "int foo(...)". Our generated
code is C++, and we do not handle varargs, so this is a case that can be
ignored for now.
Args:
infile: File object holding a text file of function signatures.
Returns:
A list of dictionaries, where each dictionary represents one function
signature.
Raises:
BadSignatureError: A line could not be parsed as a signature.
"""
signatures = []
for line in infile:
line = line.strip()
if line and line[0] != '#' and line[0:2] != '//':
m = SIGNATURE_REGEX.match(line)
if m is None:
raise BadSignatureError('Unparsable line: %s' % line)
signatures.append(
{'return_type': m.group('return_type').strip(),
'name': m.group('name').strip(),
'params': [arg.strip() for arg in m.group('params').split(',')]})
return signatures
def WriteWindowsDefFile(module_name, signatures, outfile):
"""Writes a windows def file to the given output file object.
The def file format is basically a list of function names. Generation is
simple. After outputting the LIBRARY and EXPORTS lines, print out each
function name, one to a line, preceeded by 2 spaces.
Args:
module_name: The name of the module we are writing a stub for.
signatures: The list of signature hashes, as produced by ParseSignatures,
to create stubs for.
outfile: File handle to populate with definitions.
"""
outfile.write('LIBRARY %s\n' % module_name)
outfile.write('EXPORTS\n')
for sig in signatures:
outfile.write(' %s\n' % sig['name'])
def QuietRun(args, filter=None, write_to=sys.stdout):
"""Invoke |args| as command via subprocess.Popen, filtering lines starting
with |filter|."""
popen = subprocess.Popen(args, stdout=subprocess.PIPE)
out, _ = popen.communicate()
for line in out.splitlines():
if not filter or not line.startswith(filter):
write_to.write(line + '\n')
return popen.returncode
def CreateWindowsLib(module_name, signatures, intermediate_dir, outdir_path,
machine):
"""Creates a windows library file.
Calling this function will create a lib file in the outdir_path that exports
the signatures passed into the object. A temporary def file will be created
in the intermediate_dir.
Args:
module_name: The name of the module we are writing a stub for.
signatures: The list of signature hashes, as produced by ParseSignatures,
to create stubs for.
intermediate_dir: The directory where the generated .def files should go.
outdir_path: The directory where generated .lib files should go.
machine: String holding the machine type, 'X86' or 'X64'.
Raises:
SubprocessError: If invoking the windows "lib" tool fails, this is raised
with the error code.
"""
def_file_path = os.path.join(intermediate_dir,
module_name + '.def')
lib_file_path = os.path.join(outdir_path,
module_name + '.lib')
outfile = open(def_file_path, 'w')
try:
WriteWindowsDefFile(module_name, signatures, outfile)
finally:
outfile.close()
# Invoke the "lib" program on Windows to create stub .lib files for the
# generated definitions. These .lib files can then be used during
# delayloading of the dynamic libraries.
ret = QuietRun(['lib', '/nologo',
'/machine:' + machine,
'/def:' + def_file_path,
'/out:' + lib_file_path],
filter=' Creating library')
if ret != 0:
raise SubprocessError(
'Failed creating %s for %s' % (lib_file_path, def_file_path),
ret)
class PosixStubWriter(object):
"""Creates a file of stub functions for a library that is opened via dlopen.
Windows provides a function in their compiler known as delay loading, which
effectively generates a set of stub functions for a dynamic library that
delays loading of the dynamic library/resolution of the symbols until one of
the needed functions are accessed.
In posix, RTLD_LAZY does something similar with DSOs. This is the default
link mode for DSOs. However, even though the symbol is not resolved until
first usage, the DSO must be present at load time of the main binary.
To simulate the windows delay load procedure, we need to create a set of
stub functions that allow for correct linkage of the main binary, but
dispatch to the dynamically resolved symbol when the module is initialized.
This class takes a list of function signatures, and generates a set of stub
functions plus initialization code for them.
"""
def __init__(self, module_name, export_macro, signatures):
"""Initializes PosixStubWriter for this set of signatures and module_name.
Args:
module_name: The name of the module we are writing a stub for.
export_macro: A preprocessor macro used to annotate stub symbols with
an EXPORT marking, to control visibility.
signatures: The list of signature hashes, as produced by ParseSignatures,
to create stubs for.
"""
self.signatures = signatures
self.module_name = module_name
self.export_macro = export_macro
@classmethod
def CStyleIdentifier(cls, identifier):
"""Generates a C style identifier.
The module_name has all invalid identifier characters removed (anything
that's not [_a-zA-Z0-9]) and is run through string.capwords to try
and approximate camel case.
Args:
identifier: The string with the module name to turn to C-style.
Returns:
A string that can be used as part of a C identifier.
"""
return string.capwords(re.sub(INVALID_C_IDENT_CHARS, '', identifier))
@classmethod
def EnumName(cls, module_name):
"""Gets the enum name for the module.
Takes the module name and creates a suitable enum name. The module_name
is munged to be a valid C identifier then prefixed with the string
"kModule" to generate a Google style enum name.
Args:
module_name: The name of the module to generate an enum name for.
Returns:
A string with the name of the enum value representing this module.
"""
return 'kModule%s' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def IsInitializedName(cls, module_name):
"""Gets the name of function that checks initialization of this module.
The name is in the format IsModuleInitialized. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the initialization check function.
"""
return 'Is%sInitialized' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def InitializeModuleName(cls, module_name):
"""Gets the name of the function that initializes this module.
The name is in the format InitializeModule. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the initialization function.
"""
return 'Initialize%s' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def UninitializeModuleName(cls, module_name):
"""Gets the name of the function that uninitializes this module.
The name is in the format UninitializeModule. Where "Module" is replaced
with the module name, munged to be a valid C identifier.
Args:
module_name: The name of the module to generate the function name for.
Returns:
A string with the name of the uninitialization function.
"""
return 'Uninitialize%s' % PosixStubWriter.CStyleIdentifier(module_name)
@classmethod
def StubFunctionPointer(cls, signature):
"""Generates a function pointer declaration for the given signature.
Args:
signature: A signature hash, as produced by ParseSignatures,
representating the function signature.
Returns:
A string with the declaration of the function pointer for the signature.
"""
return 'static %s (*%s_ptr)(%s) = NULL;' % (signature['return_type'],
signature['name'],
', '.join(signature['params']))
@classmethod
def StubFunction(cls, signature):
"""Generates a stub function definition for the given signature.
The function definitions are created with __attribute__((weak)) so that
they may be overridden by a real static link or mock versions to be used
when testing.
Args:
signature: A signature hash, as produced by ParseSignatures,
representating the function signature.
Returns:
A string with the stub function definition.
"""
return_prefix = ''
if signature['return_type'] != 'void':
return_prefix = 'return '
# Generate the argument list.
arguments = [re.split('[\*& ]', arg)[-1].strip() for arg in
signature['params']]
arg_list = ', '.join(arguments)
if arg_list == 'void':
arg_list = ''
if arg_list != '' and len(arguments) > 1 and arguments[-1] == '...':
# If the last argment is ... then this is a variadic function.
if return_prefix != '':
return VARIADIC_STUB_FUNCTION_DEFINITION % {
'return_type': signature['return_type'],
'name': signature['name'],
'params': ', '.join(signature['params']),
'arg_list': ', '.join(arguments[0:-1]),
'last_named_arg': arguments[-2],
'export': signature.get('export', '')}
else:
return VOID_VARIADIC_STUB_FUNCTION_DEFINITION % {
'name': signature['name'],
'params': ', '.join(signature['params']),
'arg_list': ', '.join(arguments[0:-1]),
'last_named_arg': arguments[-2],
'export': signature.get('export', '')}
else:
# This is a regular function.
return STUB_FUNCTION_DEFINITION % {
'return_type': signature['return_type'],
'name': signature['name'],
'params': ', '.join(signature['params']),
'return_prefix': return_prefix,
'arg_list': arg_list,
'export': signature.get('export', '')}
@classmethod
def WriteImplementationPreamble(cls, header_path, outfile):
"""Write the necessary includes for the implementation file.
Args:
header_path: The path to the header file.
outfile: The file handle to populate.
"""
outfile.write(IMPLEMENTATION_PREAMBLE % header_path)
@classmethod
def WriteUmbrellaInitializer(cls, module_names, namespace, outfile):
"""Writes a single function that will open + initialize each module.
This intializer will take in an stl map of that lists the correct
dlopen target for each module. The map type is
std::map<enum StubModules, vector<std::string>> which matches one module
to a list of paths to try in dlopen.
This function is an all-or-nothing function. If any module fails to load,
all other modules are dlclosed, and the function returns. Though it is
not enforced, this function should only be called once.
Args:
module_names: A list with the names of the modules in this stub file.
namespace: The namespace these functions should be in.
outfile: The file handle to populate with pointer definitions.
"""
outfile.write(UMBRELLA_INITIALIZER_START % namespace)
outfile.write(UMBRELLA_INITIALIZER_CLEANUP_FUNCTION)
# Create the initializaiton function that calls all module initializers,
# checks if they succeeded, and backs out module loads on an error.
outfile.write(UMBRELLA_INITIALIZER_INITIALIZE_FUNCTION_START)
outfile.write(
'\n // Initialize each module if we have not already failed.\n')
for module in module_names:
outfile.write(' %s(opened_libraries[%s]);\n' %
(PosixStubWriter.InitializeModuleName(module),
PosixStubWriter.EnumName(module)))
outfile.write('\n')
# Output code to check the initialization status, clean up on error.
initializer_checks = ['!%s()' % PosixStubWriter.IsInitializedName(name)
for name in module_names]
uninitializers = ['%s()' % PosixStubWriter.UninitializeModuleName(name)
for name in module_names]
outfile.write(UMBRELLA_INITIALIZER_CHECK_AND_CLEANUP % {
'conditional': ' ||\n '.join(initializer_checks),
'uninitializers': ';\n '.join(uninitializers)})
outfile.write('\n} // namespace %s\n' % namespace)
@classmethod
def WriteHeaderContents(cls, module_names, namespace, header_guard, outfile):
"""Writes a header file for the stub file generated for module_names.
The header file exposes the following:
1) An enum, StubModules, listing with an entry for each enum.
2) A typedef for a StubPathMap allowing for specification of paths to
search for each module.
3) The IsInitialized/Initialize/Uninitialize functions for each module.
4) An umbrella initialize function for all modules.
Args:
module_names: A list with the names of each module in this stub file.
namespace: The namespace these functions should be in.
header_guard: The macro to use as our header guard.
outfile: The output handle to populate.
"""
outfile.write(STUB_HEADER_PREAMBLE %
{'guard_name': header_guard, 'namespace': namespace})
# Generate the Initializer protoypes for each module.
outfile.write('// Individual module initializer functions.\n')
for name in module_names:
outfile.write(MODULE_FUNCTION_PROTOTYPES % {
'is_initialized': PosixStubWriter.IsInitializedName(name),
'initialize': PosixStubWriter.InitializeModuleName(name),
'uninitialize': PosixStubWriter.UninitializeModuleName(name)})
# Generate the enum for umbrella initializer.
outfile.write(UMBRELLA_ENUM_START)
outfile.write(' %s = 0,\n' % PosixStubWriter.EnumName(module_names[0]))
for name in module_names[1:]:
outfile.write(' %s,\n' % PosixStubWriter.EnumName(name))
outfile.write(UMBRELLA_ENUM_END)
outfile.write(UMBRELLA_INITIALIZER_PROTOTYPE)
outfile.write(STUB_HEADER_CLOSER % {
'namespace': namespace, 'guard_name':
header_guard})
def WriteImplementationContents(self, namespace, outfile):
"""Given a file handle, write out the stub definitions for this module.
Args:
namespace: The namespace these functions should be in.
outfile: The file handle to populate.
"""
outfile.write(IMPLEMENTATION_CONTENTS_C_START)
self.WriteFunctionPointers(outfile)
self.WriteStubFunctions(outfile)
outfile.write(IMPLEMENTATION_CONTENTS_C_END)
outfile.write(NAMESPACE_START % namespace)
self.WriteModuleInitializeFunctions(outfile)
outfile.write(NAMESPACE_END % namespace)
def WriteFunctionPointers(self, outfile):
"""Write the function pointer declarations needed by the stubs.
We need function pointers to hold the actual location of the function
implementation returned by dlsym. This function outputs a pointer
definition for each signature in the module.
Pointers will be named with the following pattern "FuntionName_ptr".
Args:
outfile: The file handle to populate with pointer definitions.
"""
outfile.write(FUNCTION_POINTER_SECTION_COMMENT)
for sig in self.signatures:
outfile.write('%s\n' % PosixStubWriter.StubFunctionPointer(sig))
outfile.write('\n')
def WriteStubFunctions(self, outfile):
"""Write the function stubs to handle dispatching to real implementations.
Functions that have a return type other than void will look as follows:
ReturnType FunctionName(A a) {
return FunctionName_ptr(a);
}
Functions with a return type of void will look as follows:
void FunctionName(A a) {
FunctionName_ptr(a);
}
Args:
outfile: The file handle to populate.
"""
outfile.write('// Stubs that dispatch to the real implementations.\n')
for sig in self.signatures:
sig['export'] = self.export_macro
outfile.write('%s\n' % PosixStubWriter.StubFunction(sig))
def WriteModuleInitializeFunctions(self, outfile):
"""Write functions to initialize/query initlialization of the module.
This creates 2 functions IsModuleInitialized and InitializeModule where
"Module" is replaced with the module name, first letter capitalized.
The InitializeModule function takes a handle that is retrieved from dlopen
and attempts to assign each function pointer above via dlsym.
The IsModuleInitialized returns true if none of the required functions
pointers are NULL.
Args:
outfile: The file handle to populate.
"""
ptr_names = ['%s_ptr' % sig['name'] for sig in self.signatures]
# Construct the conditional expression to check the initialization of
# all the function pointers above. It should generate a conjuntion
# with each pointer on its own line, indented by six spaces to match
# the indentation level of MODULE_INITIALIZATION_CHECK_FUNCTION.
initialization_conditional = ' &&\n '.join(ptr_names)
outfile.write(MODULE_INITIALIZATION_CHECK_FUNCTION % (
PosixStubWriter.IsInitializedName(self.module_name),
initialization_conditional))
# Create function that initializes the module.
outfile.write(MODULE_INITIALIZE_START %
PosixStubWriter.InitializeModuleName(self.module_name))
for sig in self.signatures:
outfile.write(STUB_POINTER_INITIALIZER % {
'name': sig['name'],
'return_type': sig['return_type'],
'parameters': ', '.join(sig['params'])})
outfile.write(MODULE_INITIALIZE_END)
# Create function that uninitializes the module (sets all pointers to
# NULL).
outfile.write(MODULE_UNINITIALIZE_START %
PosixStubWriter.UninitializeModuleName(self.module_name))
for sig in self.signatures:
outfile.write(' %s_ptr = NULL;\n' % sig['name'])
outfile.write(MODULE_UNINITIALIZE_END)
def CreateOptionParser():
"""Creates an OptionParser for the configuration options of script.
Returns:
A OptionParser object.
"""
parser = optparse.OptionParser(usage='usage: %prog [options] input')
parser.add_option('-o',
'--output',
dest='out_dir',
default=None,
help='Output location.')
parser.add_option('-i',
'--intermediate_dir',
dest='intermediate_dir',
default=None,
help=('Location of intermediate files. Ignored for %s type'
% FILE_TYPE_WIN_DEF))
parser.add_option('-t',
'--type',
dest='type',
default=None,
help=('Type of file. Valid types are "%s" or "%s" or "%s" '
'or "%s"' %
(FILE_TYPE_POSIX_STUB, FILE_TYPE_WIN_X86,
FILE_TYPE_WIN_X64, FILE_TYPE_WIN_DEF)))
parser.add_option('-s',
'--stubfile_name',
dest='stubfile_name',
default=None,
help=('Name of posix_stubs output file. Only valid with '
'%s type.' % FILE_TYPE_POSIX_STUB))
parser.add_option('-p',
'--path_from_source',
dest='path_from_source',
default=None,
help=('The relative path from the project root that the '
'generated file should consider itself part of (eg. '
'third_party/ffmpeg). This is used to generate the '
'header guard and namespace for our initializer '
'functions and does NOT affect the physical output '
'location of the file like -o does. Ignored for '
'%s and %s types.' %
(FILE_TYPE_WIN_X86, FILE_TYPE_WIN_X64)))
parser.add_option('-e',
'--extra_stub_header',
dest='extra_stub_header',
default=None,
help=('File to insert after the system includes in the '
'generated stub implemenation file. Ignored for '
'%s and %s types.' %
(FILE_TYPE_WIN_X86, FILE_TYPE_WIN_X64)))
parser.add_option('-m',
'--module_name',
dest='module_name',
default=None,
help=('Name of output DLL or LIB for DEF creation using '
'%s type.' % FILE_TYPE_WIN_DEF))
parser.add_option('-x',
'--export_macro',
dest='export_macro',
default='',
help=('A macro to place between the return type and '
'function name, e.g. MODULE_EXPORT, to control the '
'visbility of the stub functions.'))
return parser
def ParseOptions():
"""Parses the options and terminates program if they are not sane.
Returns:
The pair (optparse.OptionValues, [string]), that is the output of
a successful call to parser.parse_args().
"""
parser = CreateOptionParser()
options, args = parser.parse_args()
if not args:
parser.error('No inputs specified')
if options.out_dir is None:
parser.error('Output location not specified')
if (options.type not in
[FILE_TYPE_WIN_X86, FILE_TYPE_WIN_X64, FILE_TYPE_POSIX_STUB,
FILE_TYPE_WIN_DEF]):
parser.error('Invalid output file type: %s' % options.type)
if options.type == FILE_TYPE_POSIX_STUB:
if options.stubfile_name is None:
parser.error('Output file name needed for %s' % FILE_TYPE_POSIX_STUB)
if options.path_from_source is None:
parser.error('Path from source needed for %s' % FILE_TYPE_POSIX_STUB)
if options.type == FILE_TYPE_WIN_DEF:
if options.module_name is None:
parser.error('Module name needed for %s' % FILE_TYPE_WIN_DEF)
return options, args
def EnsureDirExists(dir):
"""Creates a directory. Does not use the more obvious 'if not exists: create'
to avoid race with other invocations of the same code, which will error out
on makedirs if another invocation has succeeded in creating the directory
since the existence check."""
try:
os.makedirs(dir)
except:
if not os.path.isdir(dir):
raise
def CreateOutputDirectories(options):
"""Creates the intermediate and final output directories.
Given the parsed options, create the intermediate and final output
directories if they do not exist. Returns the paths to both directories
as a pair.
Args:
options: An OptionParser.OptionValues object with the parsed options.
Returns:
The pair (out_dir, intermediate_dir), both of which are strings.
"""
out_dir = os.path.normpath(options.out_dir)
intermediate_dir = os.path.normpath(options.intermediate_dir)
if intermediate_dir is None:
intermediate_dir = out_dir
EnsureDirExists(out_dir)
EnsureDirExists(intermediate_dir)
return out_dir, intermediate_dir
def CreateWindowsLibForSigFiles(sig_files, out_dir, intermediate_dir, machine,
export_macro):
"""For each signature file, create a windows lib.
Args:
sig_files: Array of strings with the paths to each signature file.
out_dir: String holding path to directory where the generated libs go.
intermediate_dir: String holding path to directory generated intermdiate
artifacts.
machine: String holding the machine type, 'X86' or 'X64'.
export_macro: A preprocessor macro used to annotate stub symbols with
an EXPORT marking, to control visibility.
"""
for input_path in sig_files:
infile = open(input_path, 'r')
try:
signatures = ParseSignatures(infile)
module_name = ExtractModuleName(os.path.basename(input_path))
for sig in signatures:
sig['export'] = export_macro
CreateWindowsLib(module_name, signatures, intermediate_dir, out_dir,
machine)
finally:
infile.close()
def CreateWindowsDefForSigFiles(sig_files, out_dir, module_name):
"""For all signature files, create a single windows def file.
Args:
sig_files: Array of strings with the paths to each signature file.
out_dir: String holding path to directory where the generated def goes.
module_name: Name of the output DLL or LIB which will link in the def file.
"""
signatures = []
for input_path in sig_files:
infile = open(input_path, 'r')
try:
signatures += ParseSignatures(infile)
finally:
infile.close()
def_file_path = os.path.join(
out_dir, os.path.splitext(os.path.basename(module_name))[0] + '.def')
outfile = open(def_file_path, 'w')
try:
WriteWindowsDefFile(module_name, signatures, outfile)
finally:
outfile.close()
def CreatePosixStubsForSigFiles(sig_files, stub_name, out_dir,
intermediate_dir, path_from_source,
extra_stub_header, export_macro):
"""Create a posix stub library with a module for each signature file.
Args:
sig_files: Array of strings with the paths to each signature file.
stub_name: String with the basename of the generated stub file.
out_dir: String holding path to directory for the .h files.
intermediate_dir: String holding path to directory for the .cc files.
path_from_source: String with relative path of generated files from the
project root.
extra_stub_header: String with path to file of extra lines to insert
into the generated header for the stub library.
export_macro: A preprocessor macro used to annotate stub symbols with
an EXPORT marking, to control visibility.
"""
header_base_name = stub_name + '.h'
header_path = os.path.join(out_dir, header_base_name)
impl_path = os.path.join(intermediate_dir, stub_name + '.cc')
module_names = [ExtractModuleName(path) for path in sig_files]
namespace = path_from_source.replace('/', '_').lower()
header_guard = '%s_' % namespace.upper()
header_include_path = os.path.join(path_from_source, header_base_name)
# First create the implementation file.
impl_file = open(impl_path, 'w')
try:
# Open the file, and create the preamble which consists of a file
# header plus any necessary includes.
PosixStubWriter.WriteImplementationPreamble(header_include_path,
impl_file)
if extra_stub_header is not None:
extra_header_file = open(extra_stub_header, 'r')
try:
impl_file.write('\n')
for line in extra_header_file:
impl_file.write(line)
impl_file.write('\n')
finally:
extra_header_file.close()
# For each signature file, generate the stub population functions
# for that file. Each file represents one module.
for input_path in sig_files:
name = ExtractModuleName(input_path)
infile = open(input_path, 'r')
try:
signatures = ParseSignatures(infile)
finally:
infile.close()
writer = PosixStubWriter(name, export_macro, signatures)
writer.WriteImplementationContents(namespace, impl_file)
# Lastly, output the umbrella function for the file.
PosixStubWriter.WriteUmbrellaInitializer(module_names, namespace,
impl_file)
finally:
impl_file.close()
# Then create the associated header file.
header_file = open(header_path, 'w')
try:
PosixStubWriter.WriteHeaderContents(module_names, namespace,
header_guard, header_file)
finally:
header_file.close()
def main():
options, args = ParseOptions()
out_dir, intermediate_dir = CreateOutputDirectories(options)
if options.type == FILE_TYPE_WIN_X86:
CreateWindowsLibForSigFiles(args, out_dir, intermediate_dir, 'X86',
options.export_macro)
elif options.type == FILE_TYPE_WIN_X64:
CreateWindowsLibForSigFiles(args, out_dir, intermediate_dir, 'X64',
options.export_macro)
elif options.type == FILE_TYPE_POSIX_STUB:
CreatePosixStubsForSigFiles(args, options.stubfile_name, out_dir,
intermediate_dir, options.path_from_source,
options.extra_stub_header, options.export_macro)
elif options.type == FILE_TYPE_WIN_DEF:
CreateWindowsDefForSigFiles(args, out_dir, options.module_name)
if __name__ == '__main__':
main()
|
{
"content_hash": "cc1bcbd30cfaa38c25f7b362000aef8b",
"timestamp": "",
"source": "github",
"line_count": 1153,
"max_line_length": 80,
"avg_line_length": 36.78577623590633,
"alnum_prop": 0.6631772527938888,
"repo_name": "vadimtk/chrome4sdp",
"id": "7d2f91b8b67879dcd542a0e7d94e71db6e3f19d2",
"size": "42603",
"binary": false,
"copies": "23",
"ref": "refs/heads/master",
"path": "tools/generate_stubs/generate_stubs.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('domain_api', '0010_topleveldomain_slug'),
]
operations = [
migrations.AlterField(
model_name='topleveldomain',
name='slug',
field=models.CharField(default='', max_length=100, unique=True),
preserve_default=False,
),
]
|
{
"content_hash": "f7032386062686f06f0493ee294a02a6",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 23.68421052631579,
"alnum_prop": 0.6022222222222222,
"repo_name": "heytrav/drs-project",
"id": "3516279cf0c214d2c767accae1fc20f833c10688",
"size": "523",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "domain_api/migrations/0011_auto_20170404_2248.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "271912"
}
],
"symlink_target": ""
}
|
import refcycle
import gc
gc.disable()
gc.collect()
class A(object):
pass
a = A()
b = A()
a.foo = b
b.foo = a
del a, b, A
graph = refcycle.garbage()
graph.export_image('garbage.svg')
graph.export_image('garbage.pdf')
sccs = graph.strongly_connected_components()
sccs
sccs.sort(key=len)
sccs[-1].export_image('scc1.svg')
sccs[-1].export_image('scc1.pdf')
sccs[-2].export_image('scc2.svg')
sccs[-2].export_image('scc2.pdf')
print(graph.source_components())
|
{
"content_hash": "7bcb02e7a8ada2a94a82153117d9e943",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 44,
"avg_line_length": 16.06896551724138,
"alnum_prop": 0.6866952789699571,
"repo_name": "mdickinson/refcycle",
"id": "6d8e7e53e4e05a486c716371662a798fb7cfab90",
"size": "466",
"binary": false,
"copies": "1",
"ref": "refs/heads/ci/run-tests-workflow-1",
"path": "docs/source/guide/images/garbage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "118681"
}
],
"symlink_target": ""
}
|
import argparse
import base64
import hashlib
import re
import os
import sys
def ComputeIntegrity(input_path):
hasher = hashlib.sha256()
with open(input_path, 'rb') as f:
hasher.update(f.read())
return base64.b64encode(hasher.digest())
def WriteHeader(input_paths_and_integrity, output_path):
with open(output_path, 'w') as f:
f.write('// DO NOT MODIFY THIS FILE DIRECTLY!\n')
f.write('// IT IS GENERATED BY generate_integrity_header.py\n')
f.write('// FROM:\n')
for (input_filename, _) in input_paths_and_integrity:
f.write('// * ' + input_filename + '\n')
f.write('\n')
for (input_filename, integrity) in input_paths_and_integrity:
define_name = re.sub('\W', '_', input_filename.upper())
define_name = define_name + '_INTEGRITY'
f.write('#define ' + define_name + ' "' + integrity.decode() + '"\n')
f.write('\n')
def main():
parser = argparse.ArgumentParser(
description='Generate a C++ header containing a sha256 checksum of the '
'input files.')
parser.add_argument('input_path', help='Path to an input file.', nargs='+')
parser.add_argument('--output_path', help='Path to an output header file.')
args = parser.parse_args()
input_paths = args.input_path
output_path = args.output_path
input_paths_and_integrity = [(os.path.basename(path), ComputeIntegrity(path))
for path in input_paths]
WriteHeader(input_paths_and_integrity, output_path)
return 0
if __name__ == '__main__':
sys.exit(main())
|
{
"content_hash": "baa6d1a9203abce81f6de1d3ab1610cd",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 79,
"avg_line_length": 28.51851851851852,
"alnum_prop": 0.6441558441558441,
"repo_name": "endlessm/chromium-browser",
"id": "228aa139c6046a7d1c4a55e1507b257cf8dc3075",
"size": "1725",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "chrome/browser/search/tools/generate_integrity_header.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import http.client
from lxml import html
from datetime import datetime
import csv
import os
import time
conn = http.client.HTTPSConnection("www.hockey-reference.com")
def writeCsv(coach, years):
if len(years) == 0:
print("No years")
return
directory = "./stats/coach"
filename = "%s/%s.csv" % (directory, coach)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filename, 'w', newline='') as csvfile:
fieldnames = []
for fieldname in years[0]:
fieldnames.append(fieldname)
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for i in range(len(years)):
writer.writerow(years[i])
def buildYears(tree):
seasons = tree.xpath('//*[@id="coach"]/tbody/tr/th/text()')
agesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[1]')
teams = tree.xpath('//*[@id="coach"]/tbody/tr/td[2]/a/text()')
leagues = tree.xpath('//*[@id="coach"]/tbody/tr/td[3]/a/text()')
gamesPlayedPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[4]')
winsPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[5]')
losesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[6]')
tiesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[7]')
otLosesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[8]')
pointsPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[9]')
pointsPrecentagePre = tree.xpath('//*[@id="coach"]/tbody/tr/td[10]')
finishPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[12]')
playoffWinsPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[13]')
playoffLosesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[14]')
playoffTiesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[15]')
playoffWinLosePre = tree.xpath('//*[@id="coach"]/tbody/tr/td[16]')
playoffNotesPre = tree.xpath('//*[@id="coach"]/tbody/tr/td[17]')
ages = [x.text if x.text else 0 for x in agesPre]
gamesPlayed = [x.text if x.text else 0 for x in gamesPlayedPre]
wins = [x.text if x.text else 0 for x in winsPre]
loses = [x.text if x.text else 0 for x in losesPre]
ties = [x.text if x.text else 0 for x in tiesPre]
otLoses = [x.text if x.text else 0 for x in otLosesPre]
points = [x.text if x.text else 0 for x in pointsPre]
pointsPercentages = [x.text if x.text else 0 for x in pointsPrecentagePre]
finishes = [x.text if x.text else '' for x in finishPre]
playoffWins = [x.text if x.text else 0 for x in playoffWinsPre]
playoffLoses = [x.text if x.text else 0 for x in playoffLosesPre]
playoffTies = [x.text if x.text else 0 for x in playoffTiesPre]
playoffWinLose = [x.text if x.text else 0 for x in playoffWinLosePre]
playoffNotes = [x.text if x.text else '' for x in playoffNotesPre]
years = []
i = 0
while i < len(seasons):
year = {
"season": seasons[i],
"leage": leagues[i],
"team": teams[i],
"games_played": gamesPlayed[i],
"wins": wins[i],
"loses": loses[i],
"ties": ties[i],
"overtime_loses": otLoses[i],
"points": points[i],
"points_percentage": pointsPercentages[i],
"finishing_position": finishes[i],
"playoff_note": playoffNotes[i],
"playoff_wins": playoffWins[i],
"playoff_loses": playoffLoses[i],
"playoff_ties": playoffTies[i],
"playoff_win_lose": playoffWinLose[i]
}
years.append(year)
i = i + 1
return years
def getHtml(url):
time.sleep(2)
conn.request("GET", url)
response = conn.getresponse()
print(url, response.status, response.reason)
return response.read()
def crawlCoach(url):
content = getHtml(url)
coachName = url.split("/")[2].split(".")[0]
tree = html.fromstring(content)
years = buildYears(tree)
writeCsv(coachName, years)
content = getHtml("/coaches/")
tree = html.fromstring(content)
unBoldCoachPages = tree.xpath('//*[@id="coaches"]/tbody/tr/th/a/@href')
boldCoachPages = tree.xpath('//*[@id="coaches"]/tbody/tr/th//strong/a/@href')
coachPages = unBoldCoachPages + boldCoachPages
numberFound = len(coachPages)
print("Found %s" % (numberFound))
for i in range(numberFound):
crawlCoach(coachPages[i])
|
{
"content_hash": "6220976bacfb56892a1969f61020578d",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 78,
"avg_line_length": 34.774193548387096,
"alnum_prop": 0.6076066790352505,
"repo_name": "icambridge/hockeystats",
"id": "2b3ab8c0a401e787f4b62943203f36e37c14de03",
"size": "4898",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hr_coach_crawler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71700"
}
],
"symlink_target": ""
}
|
"""
Currency exchange rate support that comes from Yahoo Finance.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.yahoo_finance/
"""
import logging
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['yahoo-finance==1.2.1']
_LOGGER = logging.getLogger(__name__)
CONF_SYMBOL = 'symbol'
DEFAULT_SYMBOL = 'YHOO'
DEFAULT_NAME = 'Yahoo Stock'
ICON = 'mdi:currency-usd'
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
ATTR_CHANGE = 'Change'
ATTR_OPEN = 'Open'
ATTR_PREV_CLOSE = 'Prev. Close'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_SYMBOL, default=DEFAULT_SYMBOL): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Yahoo Finance sensor."""
name = config.get(CONF_NAME)
symbol = config.get(CONF_SYMBOL)
data = YahooFinanceData(name, symbol)
add_devices([YahooFinanceSensor(name, data, symbol)])
# pylint: disable=too-few-public-methods
class YahooFinanceSensor(Entity):
"""Representation of a Yahoo Finance sensor."""
def __init__(self, name, data, symbol):
"""Initialize the sensor."""
self._name = name
self.data = data
self._symbol = symbol
self._state = None
self._unit_of_measurement = None
self.update()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._symbol
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._state is not None:
return {
ATTR_CHANGE: self.data.price_change,
ATTR_OPEN: self.data.price_open,
ATTR_PREV_CLOSE: self.data.prev_close,
'About': "Stock market information delivered by Yahoo!"
" Inc. are provided free of charge for use"
" by individuals and non-profit organizations"
" for personal, non-commercial uses."
}
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug('Updating sensor %s - %s', self._name, self._state)
self.data.update()
self._state = self.data.state
class YahooFinanceData(object):
"""Get data from Yahoo Finance."""
def __init__(self, name, symbol):
"""Initialize the data object."""
from yahoo_finance import Share
self._name = name
self._symbol = symbol
self.state = None
self.price_change = None
self.price_open = None
self.prev_close = None
self.stock = Share(symbol)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data and updates the states."""
self.stock.refresh()
self.state = self.stock.get_price()
self.price_change = self.stock.get_change()
self.price_open = self.stock.get_open()
self.prev_close = self.stock.get_prev_close()
|
{
"content_hash": "9fcd6c2468cbd5d22b1919dec223ab3c",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 74,
"avg_line_length": 29.792,
"alnum_prop": 0.6345327604726101,
"repo_name": "leoc/home-assistant",
"id": "822c50823fc1d472074faad6b96cd99320588ea4",
"size": "3724",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/yahoo_finance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1366220"
},
{
"name": "Python",
"bytes": "3636900"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contriblog', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('slug', models.SlugField(unique=True, max_length=200)),
],
),
migrations.AddField(
model_name='post',
name='tags',
field=models.ManyToManyField(to='contriblog.Tag'),
),
]
|
{
"content_hash": "f29a7a5fc8d756ad2f3d8788a0b8a254",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 114,
"avg_line_length": 26.68,
"alnum_prop": 0.553223388305847,
"repo_name": "Heasummn/contributr",
"id": "19ae07d7657047f68843fb8de243bab076099281",
"size": "691",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "contributr/contriblog/migrations/0002_auto_20150924_1705.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "278"
},
{
"name": "HTML",
"bytes": "6289"
},
{
"name": "JavaScript",
"bytes": "1"
},
{
"name": "Python",
"bytes": "13033"
}
],
"symlink_target": ""
}
|
import re
import threading
import pyfirmata
from util import EventEmitter, setInterval, debounce
class ArduinoNotSuppliedException(Exception):
pass
class ServoOutOfRangeException(Exception):
pass
class InvalidPercentageException(Exception):
pass
class Component(EventEmitter):
def __init__(self, board, pin):
if not board:
raise ArduinoNotSuppliedException
super(Component, self).__init__()
self._board = board
analog_regex = re.compile('A(\d)')
match = analog_regex.match(str(pin))
if match:
self._pin = self._board.analog[int(match.group(1))]
else:
self._pin = self._board.digital[int(pin)]
@property
def value(self):
return self._pin.value
class Sensor(Component):
def __init__(self, board, pin):
super(Sensor, self).__init__(board, pin)
self.threshold = 0.01
self._pin.mode = pyfirmata.INPUT
self._pin.enable_reporting()
self._old_value = self.value
self._board.on('data', self._handle_data)
def _handle_data(self):
value = self.value or 0
high_value = value + self.threshold
low_value = value - self.threshold
if self._old_value < low_value or self._old_value > high_value:
self._old_value = value
self._handle_state_changed()
@debounce(0.005)
def _handle_state_changed(self):
self.emit('change')
def change(self, cb):
self.on('change', cb)
class Led(Component):
def __init__(self, board, pin):
super(Led, self).__init__(board, pin)
self._isOn = False
self._interval = None
def on(self):
self._pin.write(1)
self._isOn = True
return self
def off(self, clear=True):
self._pin.write(0)
self._isOn = False
if self._interval and clear:
self._interval.clear()
return self
def toggle(self):
if self._isOn:
return self.off(clear=False)
else:
return self.on()
def blink(self, millis):
if self._interval:
self._interval.clear()
self._interval = setInterval(self.toggle, millis)
def brightness(self, value):
if int(value) > 100 or int(value) < 0:
raise InvalidPercentageException
if self._pin.mode != pyfirmata.PWM:
self._pin.mode = pyfirmata.PWM
_new_value = value / 100.0
if _new_value == 0:
self._isOn = False
else:
self.isOn = True
self._pin.write(_new_value)
return self
class RGBLed(EventEmitter):
def __init__(self, board, pins):
if not board:
raise ArduinoNotSuppliedException
# TODO: Check that pins is dict
super(RGBLed, self).__init__()
self._red = Led(board, pins["red"])
self._green = Led(board, pins["green"])
self._blue = Led(board, pins["blue"])
def off(self):
self._red.off()
self._green.off()
self._blue.off()
return self
def red(self):
self._red.on()
self._green.off()
self._blue.off()
return self
def green(self):
self._red.off()
self._green.on()
self._blue.off()
return self
def blue(self):
self._red.off()
self._green.off()
self._blue.on()
return self
def yellow(self):
self._red.on()
self._green.on()
self._blue.off()
return self
def cyan(self):
self._red.off()
self._green.on()
self._blue.on()
return self
def purple(self):
self._red.on()
self._green.off()
self._blue.on()
return self
def white(self):
self._red.on()
self._green.on()
self._blue.on()
return self
class Buzzer(Led):
pass
class Button(Sensor):
def __init__(self, board, pin):
super(Button, self).__init__(board, pin)
self._old_value = False
self._timeout = None
self.change(self._emit_button_events)
def _handle_data(self):
value = self.value
if self._old_value != value:
self._old_value = value
# This sucks, wish I could just call Super
self._handle_state_changed()
def _emit_button_events(self):
if self.value is False:
if(self._timeout):
self._timeout.cancel()
self.emit('up')
elif self.value:
def emit_hold():
self.emit('hold')
self._timeout = threading.Timer(1, emit_hold)
self._timeout.start()
self.emit('down')
def down(self, cb):
self.on('down', cb)
def up(self, cb):
self.on('up', cb)
def hold(self, cb):
self.on('hold', cb)
class RotaryEncoder(EventEmitter):
"""
2-bit Rotary Encoder
Between clicks on the encoder, the two pins will alternate between being high and low:
A B
Click: 0 0
1 0
1 1
0 1
Click: 0 0
Using interrupts would be more accurate when turning the encoder quickly, but this works:
Watch for the moments when pin "A" turns high, and compare its current value to pin "B"
When A turns high while B is low, it's moving clockwise. If B is high, it's counter-clockwise.
This requires the Rotary Encoder to be connected like two buttons, with the pin tied to ground
with a resistor when low, and with voltage by way of the encoder's common pin for the high state.
"""
def __init__(self, board, pins):
if not board:
raise ArduinoNotSuppliedException
super(RotaryEncoder, self).__init__()
self._pinA = Button(board, pins[0])
self._pinB = Button(board, pins[1])
# Watch for when pin "A" changes to True
self._pinA.down(self.changed)
def changed(self):
# When A turns high (and this callback is called), check to see B's state
# If B is already low, it's moving CW
# If B was high, it's moving CCW
bValue = self._pinB.value
if bValue:
self.emit('ccw')
else: self.emit('cw')
def cw(self, cb):
self.on('cw', cb)
def ccw(self, cb):
self.on('ccw', cb)
class Servo(Component):
def __init__(self, board, pin):
super(Servo, self).__init__(board, pin)
self._pin.mode = pyfirmata.SERVO
def set_position(self, degrees):
if int(degrees) > 180 or int(degrees) < 0:
raise ServoOutOfRangeException
self._pin.write(degrees)
def move(self, degrees):
self.set_position(self.value + int(degrees))
def center(self):
self.set_position(90)
def reset(self):
self.set_position(0)
class Motor(Component):
def __init__(self, board, pin):
super(Motor, self).__init__(board, pin)
self._speed = 0
self._pin.mode = pyfirmata.PWM
def start(self, speed=50):
self.speed = speed
def stop(self):
self.speed = 0
@property
def speed(self):
return self._speed
@speed.setter
def speed(self, speed):
if int(speed) > 100 or int(speed) < 0:
raise InvalidPercentageException
self._speed = speed
self._pin.write(speed / 100.0)
self.emit('change', speed)
|
{
"content_hash": "3cf991abf7659ae09f3fc0fa445ab27c",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 103,
"avg_line_length": 23.03048780487805,
"alnum_prop": 0.5536139793486894,
"repo_name": "andyclymer/ControlBoard",
"id": "8a2fafd616e336bbc03d423f6c4105d083283e29",
"size": "7554",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ControlBoard.roboFontExt/lib/modules/BreakfastSerial/BreakfastSerial/components.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1246"
},
{
"name": "HTML",
"bytes": "58046"
},
{
"name": "Makefile",
"bytes": "6264"
},
{
"name": "Python",
"bytes": "939636"
},
{
"name": "Shell",
"bytes": "2108"
}
],
"symlink_target": ""
}
|
from pathlib import Path
from synthtool.gcp import samples
from . import util
FIXTURES = Path(__file__).parent / "fixtures"
def test_load_node_samples():
with util.chdir(FIXTURES / "node_templates" / "standard"):
all_samples = samples.all_samples(["samples/*.js"])
# should have loaded samples.
assert all_samples[3]["title"] == "Requester Pays"
assert all_samples[3]["file"] == "samples/requesterPays.js"
assert len(all_samples) == 4
# should have included additional meta-information provided.
assert all_samples[0]["title"] == "Metadata Example 1"
assert all_samples[0]["usage"] == "node hello-world.js"
assert all_samples[1]["title"] == "Metadata Example 2"
assert all_samples[1]["usage"] == "node goodnight-moon.js"
def test_bad_metadata():
with util.chdir(FIXTURES / "node_templates" / "bad_metadata"):
all_samples = samples.all_samples(["samples/*.js"])
# should have included additional meta-information provided.
assert all_samples[0]["title"] == "Bad_metadata1"
assert "usage" not in all_samples[0]
assert all_samples[1]["title"] == "Metadata Example 1"
assert all_samples[1]["usage"] == "node hello-world.js"
|
{
"content_hash": "d78f804bad2ecad7d04f60d830fdec0b",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 68,
"avg_line_length": 39.5,
"alnum_prop": 0.6392405063291139,
"repo_name": "googleapis/synthtool",
"id": "090b376873ce207ec772a4f25363e8c54b3a6faa",
"size": "1840",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_samples.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "801"
},
{
"name": "Dockerfile",
"bytes": "13507"
},
{
"name": "HTML",
"bytes": "6091"
},
{
"name": "Java",
"bytes": "21963"
},
{
"name": "JavaScript",
"bytes": "3390"
},
{
"name": "Jinja",
"bytes": "85687"
},
{
"name": "Python",
"bytes": "396495"
},
{
"name": "Shell",
"bytes": "67707"
}
],
"symlink_target": ""
}
|
from nipype.testing import assert_equal
from nipype.interfaces.freesurfer.utils import ApplyMask
def test_ApplyMask_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_file=dict(argstr='%s',
mandatory=True,
position=-3,
),
invert_xfm=dict(argstr='-invert',
),
mask_file=dict(argstr='%s',
mandatory=True,
position=-2,
),
mask_thresh=dict(argstr='-T %.4f',
),
out_file=dict(argstr='%s',
genfile=True,
position=-1,
),
subjects_dir=dict(),
terminal_output=dict(mandatory=True,
nohash=True,
),
use_abs=dict(argstr='-abs',
),
xfm_file=dict(argstr='-xform %s',
),
xfm_source=dict(argstr='-lta_src %s',
),
xfm_target=dict(argstr='-lta_dst %s',
),
)
inputs = ApplyMask.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_ApplyMask_outputs():
output_map = dict(out_file=dict(),
)
outputs = ApplyMask.output_spec()
for key, metadata in output_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
{
"content_hash": "572e8d1ddc7629469d30a6fe06c35ba9",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 78,
"avg_line_length": 24.821428571428573,
"alnum_prop": 0.6071942446043166,
"repo_name": "carlohamalainen/nipype",
"id": "a2a1c4fa49533868a6e8b566d92b968af32625f7",
"size": "1444",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nipype/interfaces/freesurfer/tests/test_auto_ApplyMask.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "9090"
},
{
"name": "Matlab",
"bytes": "5018"
},
{
"name": "Python",
"bytes": "3788646"
},
{
"name": "Shell",
"bytes": "2959"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
from yowsup.layers import YowLayer
from yowsup import ProtocolTreeNode
from .mediadownloader import MediaDownloader
import shutil, os, logging
logger = logging.getLogger(__name__)
class YowMediaPictureLayer(YowLayer):
def send(self, data):
self.toLower(data)
def receive(self, node):
if ProtocolTreeNode.tagEquals(node, "message") and node.getAttributeValue("type") == "media":
self.downloadMedia(node.getChild("media").getAttributeValue("url"))
else:
self.toUpper(node)
def downloadMedia(self, url):
logger.debug("Downloading %s" % url)
downloader = MediaDownloader(self.onSuccess, self.onError, self.onProgress)
downloader.download(url)
def onError(self):
logger.error("Error download file")
def onSuccess(self, path):
outPath = "/tmp/yowfiles/%s.jpg" % os.path.basename(path)
shutil.copyfile(path, outPath)
logger.debug("Picture downloaded to %s" % outPath)
def onProgress(self, progress):
logger.debug("Download progress %s" % progress)
|
{
"content_hash": "4120811ea5755d5accf092345ca279b5",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 101,
"avg_line_length": 34.935483870967744,
"alnum_prop": 0.6768236380424746,
"repo_name": "ongair/yowsup",
"id": "36fec459d0b5a038d7e4dca42e58aabc992fd398",
"size": "1083",
"binary": false,
"copies": "66",
"ref": "refs/heads/master",
"path": "yowsup/layers/protocol_media/picture.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "216657"
}
],
"symlink_target": ""
}
|
from elasticsearch_raven.udp_server import run_server
if __name__ == '__main__':
run_server()
|
{
"content_hash": "19f20f32b686fdc896db38a1b4cb8e60",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 53,
"avg_line_length": 20,
"alnum_prop": 0.65,
"repo_name": "pozytywnie/elasticsearch-raven",
"id": "a500a5917c51ced8ecf669df1a34416e2c1bc431",
"size": "122",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "bin/elasticsearch-raven.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "46718"
}
],
"symlink_target": ""
}
|
from django.db import migrations
from django.db import models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
("lizard_auth_server", "0008_auto_20160824_1640"),
]
operations = [
migrations.AlterField(
model_name="profile",
name="company",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="members",
to="lizard_auth_server.Company",
verbose_name="company",
),
),
]
|
{
"content_hash": "499c9008b51fc7b76979ba7495029414",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 60,
"avg_line_length": 25.03846153846154,
"alnum_prop": 0.543778801843318,
"repo_name": "lizardsystem/lizard-auth-server",
"id": "8f24adc4086b038d0f7593b12a865e3b1be55d50",
"size": "723",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lizard_auth_server/migrations/0009_auto_20160825_1132.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "565"
},
{
"name": "HTML",
"bytes": "34451"
},
{
"name": "Makefile",
"bytes": "4607"
},
{
"name": "Python",
"bytes": "236091"
}
],
"symlink_target": ""
}
|
from msrest.serialization import Model
class JobScheduleDisableOptions(Model):
"""Additional parameters for the JobSchedule_disable operation.
:param timeout: The maximum time that the server can spend processing the
request, in seconds. The default is 30 seconds. Default value: 30 .
:type timeout: int
:param client_request_id: The caller-generated request identity, in the
form of a GUID with no decoration such as curly braces, e.g.
9C4D50EE-2D56-4CD3-8152-34347DC9F2B0.
:type client_request_id: str
:param return_client_request_id: Whether the server should return the
client-request-id in the response. Default value: False .
:type return_client_request_id: bool
:param ocp_date: The time the request was issued. Client libraries
typically set this to the current system clock time; set it explicitly if
you are calling the REST API directly.
:type ocp_date: datetime
:param if_match: An ETag value associated with the version of the resource
known to the client. The operation will be performed only if the
resource's current ETag on the service exactly matches the value specified
by the client.
:type if_match: str
:param if_none_match: An ETag value associated with the version of the
resource known to the client. The operation will be performed only if the
resource's current ETag on the service does not match the value specified
by the client.
:type if_none_match: str
:param if_modified_since: A timestamp indicating the last modified time of
the resource known to the client. The operation will be performed only if
the resource on the service has been modified since the specified time.
:type if_modified_since: datetime
:param if_unmodified_since: A timestamp indicating the last modified time
of the resource known to the client. The operation will be performed only
if the resource on the service has not been modified since the specified
time.
:type if_unmodified_since: datetime
"""
def __init__(self, timeout=30, client_request_id=None, return_client_request_id=False, ocp_date=None, if_match=None, if_none_match=None, if_modified_since=None, if_unmodified_since=None):
self.timeout = timeout
self.client_request_id = client_request_id
self.return_client_request_id = return_client_request_id
self.ocp_date = ocp_date
self.if_match = if_match
self.if_none_match = if_none_match
self.if_modified_since = if_modified_since
self.if_unmodified_since = if_unmodified_since
|
{
"content_hash": "b3b03873f12e1a0b7548e9f8febd7b2d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 191,
"avg_line_length": 52.4,
"alnum_prop": 0.7251908396946565,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "cf6d2b7b81736e10ed490a8ceb99acea6685dd84",
"size": "3094",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "azure-batch/azure/batch/models/job_schedule_disable_options.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from flexget.plugin import get_plugin_by_name
class TestURLRewriters(object):
"""
Bad example, does things manually, you should use task.find_entry to check existance
"""
config = """
tasks:
test:
# make test data
mock:
- {title: 'tpb page', url: 'http://thepiratebay.org/tor/8492471/Test.avi'}
- {title: 'tbp search', url: 'http://thepiratebay.com/search/something'}
- {title: 'tbp torrent', url: 'http://torrents.thepiratebay.se/8492471/Test.torrent'}
- {title: 'tbp torrent subdomain', url: 'http://torrents.thepiratebay.se/8492471/Test.avi'}
- {title: 'tbp torrent bad subdomain', url: 'http://torrent.thepiratebay.se/8492471/Test.avi'}
- {title: 'nyaa', url: 'http://www.nyaa.eu/?page=torrentinfo&tid=12345'}
- {title: 'cinemageddon download', url: 'http://cinemageddon.net/details.php?id=1234'}
"""
def get_urlrewriter(self, name):
info = get_plugin_by_name(name)
return info.instance
def test_piratebay(self, execute_task):
task = execute_task('test')
# test with piratebay entry
urlrewriter = self.get_urlrewriter('piratebay')
entry = task.find_entry(title='tpb page')
assert urlrewriter.url_rewritable(task, entry)
entry = task.find_entry(title='tbp torrent')
assert not urlrewriter.url_rewritable(task, entry), \
'TPB direct torrent link should not be url_rewritable'
entry = task.find_entry(title='tbp torrent subdomain')
assert urlrewriter.url_rewritable(task, entry)
entry = task.find_entry(title='tbp torrent bad subdomain')
assert not urlrewriter.url_rewritable(task, entry), \
'TPB link with invalid subdomain should not be url_rewritable'
def test_piratebay_search(self, execute_task):
task = execute_task('test')
# test with piratebay entry
urlrewriter = self.get_urlrewriter('piratebay')
entry = task.find_entry(title='tbp search')
assert urlrewriter.url_rewritable(task, entry)
def test_nyaa_torrents(self, execute_task):
task = execute_task('test')
entry = task.find_entry(title='nyaa')
urlrewriter = self.get_urlrewriter('nyaa')
assert entry['url'] == 'http://www.nyaa.eu/?page=torrentinfo&tid=12345'
assert urlrewriter.url_rewritable(task, entry)
urlrewriter.url_rewrite(task, entry)
assert entry['url'] == 'http://www.nyaa.eu/?page=download&tid=12345'
def test_cinemageddon(self, execute_task):
task = execute_task('test')
entry = task.find_entry(title='cinemageddon download')
urlrewriter = self.get_urlrewriter('cinemageddon')
assert urlrewriter.url_rewritable(task, entry)
urlrewriter.url_rewrite(task, entry)
assert entry['url'] == 'http://cinemageddon.net/download.php?id=1234&name=cinemageddon%20download.torrent'
class TestRegexpurlrewriter(object):
# TODO: this test is broken?
config = """
tasks:
test:
mock:
- {title: 'irrelevant', url: 'http://newzleech.com/?p=123'}
accept_all: yes
urlrewrite:
newzleech:
regexp: 'http://newzleech.com/\?p=(?P<id>\d+)'
format: 'http://newzleech.com/?m=gen&dl=1&post=\g<id>'
"""
def test_newzleech(self, execute_task):
task = execute_task('test')
assert task.find_entry(url='http://newzleech.com/?m=gen&dl=1&post=123'), \
'did not url_rewrite properly'
|
{
"content_hash": "ee8630d2971437c90f6206f4c98ee483",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 114,
"avg_line_length": 43.06818181818182,
"alnum_prop": 0.6213720316622692,
"repo_name": "oxc/Flexget",
"id": "df6a70c4b90edda9fdd12189a88b7a242073f30c",
"size": "3790",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "flexget/tests/test_urlrewriting.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "9267"
},
{
"name": "HTML",
"bytes": "49610"
},
{
"name": "JavaScript",
"bytes": "239825"
},
{
"name": "Python",
"bytes": "2749010"
},
{
"name": "SRecode Template",
"bytes": "3"
}
],
"symlink_target": ""
}
|
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.setWindowModality(QtCore.Qt.NonModal)
MainWindow.resize(696, 814)
font = QtGui.QFont()
font.setPointSize(11)
MainWindow.setFont(font)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.formLayout = QtGui.QFormLayout(self.centralwidget)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName(_fromUtf8("formLayout"))
self.groupBox = QtGui.QGroupBox(self.centralwidget)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_4 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4"))
self.label_2 = QtGui.QLabel(self.groupBox)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_4.addWidget(self.label_2, 2, 0, 1, 1)
self.label = QtGui.QLabel(self.groupBox)
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout_4.addWidget(self.label, 1, 0, 1, 1)
self.label_10 = QtGui.QLabel(self.groupBox)
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout_4.addWidget(self.label_10, 3, 0, 1, 1)
self.spinBox = QtGui.QSpinBox(self.groupBox)
self.spinBox.setEnabled(True)
self.spinBox.setLayoutDirection(QtCore.Qt.LeftToRight)
self.spinBox.setMinimum(1)
self.spinBox.setMaximum(99)
self.spinBox.setProperty("value", 30)
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.gridLayout_4.addWidget(self.spinBox, 1, 2, 1, 1)
self.lineEdit_2 = QtGui.QLineEdit(self.groupBox)
self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2"))
self.gridLayout_4.addWidget(self.lineEdit_2, 3, 2, 1, 1)
self.checkBox = QtGui.QCheckBox(self.groupBox)
self.checkBox.setChecked(False)
self.checkBox.setTristate(False)
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.gridLayout_4.addWidget(self.checkBox, 4, 0, 1, 1)
self.lineEdit = QtGui.QLineEdit(self.groupBox)
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.gridLayout_4.addWidget(self.lineEdit, 2, 2, 1, 1)
self.label_8 = QtGui.QLabel(self.groupBox)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout_4.addWidget(self.label_8, 0, 0, 1, 1)
self.label_9 = QtGui.QLabel(self.groupBox)
self.label_9.setWordWrap(False)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout_4.addWidget(self.label_9, 0, 2, 1, 1)
self.pushButton = QtGui.QPushButton(self.groupBox)
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.gridLayout_4.addWidget(self.pushButton, 0, 3, 1, 1)
self.formLayout.setWidget(0, QtGui.QFormLayout.SpanningRole, self.groupBox)
self.groupBox_5 = QtGui.QGroupBox(self.centralwidget)
self.groupBox_5.setObjectName(_fromUtf8("groupBox_5"))
self.gridLayout_10 = QtGui.QGridLayout(self.groupBox_5)
self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10"))
self.pushAnalisar = QtGui.QPushButton(self.groupBox_5)
self.pushAnalisar.setObjectName(_fromUtf8("pushAnalisar"))
self.gridLayout_10.addWidget(self.pushAnalisar, 0, 0, 1, 1)
self.progressBar = QtGui.QProgressBar(self.groupBox_5)
self.progressBar.setEnabled(False)
self.progressBar.setProperty("value", 0)
self.progressBar.setObjectName(_fromUtf8("progressBar"))
self.gridLayout_10.addWidget(self.progressBar, 0, 2, 1, 1)
self.pushCancelar = QtGui.QPushButton(self.groupBox_5)
self.pushCancelar.setObjectName(_fromUtf8("pushCancelar"))
self.gridLayout_10.addWidget(self.pushCancelar, 0, 1, 1, 1)
self.formLayout.setWidget(1, QtGui.QFormLayout.SpanningRole, self.groupBox_5)
self.groupBox_3 = QtGui.QGroupBox(self.centralwidget)
self.groupBox_3.setObjectName(_fromUtf8("groupBox_3"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_3)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_4 = QtGui.QLabel(self.groupBox_3)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_2.addWidget(self.label_4, 1, 1, 1, 1)
self.label_3 = QtGui.QLabel(self.groupBox_3)
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout_2.addWidget(self.label_3, 1, 0, 1, 1)
self.label_30 = QtGui.QLabel(self.groupBox_3)
self.label_30.setObjectName(_fromUtf8("label_30"))
self.gridLayout_2.addWidget(self.label_30, 3, 0, 1, 1)
self.line = QtGui.QFrame(self.groupBox_3)
self.line.setFrameShape(QtGui.QFrame.HLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.gridLayout_2.addWidget(self.line, 6, 0, 1, 1)
self.label_31 = QtGui.QLabel(self.groupBox_3)
self.label_31.setObjectName(_fromUtf8("label_31"))
self.gridLayout_2.addWidget(self.label_31, 3, 1, 1, 1)
self.line_4 = QtGui.QFrame(self.groupBox_3)
self.line_4.setFrameShape(QtGui.QFrame.HLine)
self.line_4.setFrameShadow(QtGui.QFrame.Sunken)
self.line_4.setObjectName(_fromUtf8("line_4"))
self.gridLayout_2.addWidget(self.line_4, 2, 1, 1, 1)
self.label_12 = QtGui.QLabel(self.groupBox_3)
self.label_12.setObjectName(_fromUtf8("label_12"))
self.gridLayout_2.addWidget(self.label_12, 7, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.groupBox_3)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout_2.addWidget(self.label_5, 4, 0, 1, 1)
self.label_7 = QtGui.QLabel(self.groupBox_3)
self.label_7.setTextFormat(QtCore.Qt.AutoText)
self.label_7.setScaledContents(False)
self.label_7.setWordWrap(False)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout_2.addWidget(self.label_7, 4, 1, 1, 1)
self.label_19 = QtGui.QLabel(self.groupBox_3)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.gridLayout_2.addWidget(self.label_19, 8, 0, 1, 1)
self.line_3 = QtGui.QFrame(self.groupBox_3)
self.line_3.setFrameShape(QtGui.QFrame.HLine)
self.line_3.setFrameShadow(QtGui.QFrame.Sunken)
self.line_3.setObjectName(_fromUtf8("line_3"))
self.gridLayout_2.addWidget(self.line_3, 2, 0, 1, 1)
self.label_28 = QtGui.QLabel(self.groupBox_3)
self.label_28.setObjectName(_fromUtf8("label_28"))
self.gridLayout_2.addWidget(self.label_28, 8, 1, 1, 1)
self.label_11 = QtGui.QLabel(self.groupBox_3)
self.label_11.setObjectName(_fromUtf8("label_11"))
self.gridLayout_2.addWidget(self.label_11, 7, 0, 1, 1)
self.line_2 = QtGui.QFrame(self.groupBox_3)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.gridLayout_2.addWidget(self.line_2, 6, 1, 1, 1)
self.formLayout.setWidget(3, QtGui.QFormLayout.SpanningRole, self.groupBox_3)
self.groupBox_4 = QtGui.QGroupBox(self.centralwidget)
self.groupBox_4.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.groupBox_4.setObjectName(_fromUtf8("groupBox_4"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.groupBox_4)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.gridLayout_8 = QtGui.QGridLayout()
self.gridLayout_8.setObjectName(_fromUtf8("gridLayout_8"))
self.label_23 = QtGui.QLabel(self.groupBox_4)
font = QtGui.QFont()
font.setPointSize(8)
self.label_23.setFont(font)
self.label_23.setWordWrap(True)
self.label_23.setObjectName(_fromUtf8("label_23"))
self.gridLayout_8.addWidget(self.label_23, 7, 0, 1, 1)
self.label_26 = QtGui.QLabel(self.groupBox_4)
font = QtGui.QFont()
font.setPointSize(8)
self.label_26.setFont(font)
self.label_26.setWordWrap(True)
self.label_26.setObjectName(_fromUtf8("label_26"))
self.gridLayout_8.addWidget(self.label_26, 1, 0, 1, 1)
self.label_29 = QtGui.QLabel(self.groupBox_4)
font = QtGui.QFont()
font.setPointSize(8)
self.label_29.setFont(font)
self.label_29.setObjectName(_fromUtf8("label_29"))
self.gridLayout_8.addWidget(self.label_29, 8, 0, 1, 1)
self.label_24 = QtGui.QLabel(self.groupBox_4)
font = QtGui.QFont()
font.setPointSize(8)
self.label_24.setFont(font)
self.label_24.setWordWrap(True)
self.label_24.setObjectName(_fromUtf8("label_24"))
self.gridLayout_8.addWidget(self.label_24, 6, 0, 1, 1)
self.label_20 = QtGui.QLabel(self.groupBox_4)
font = QtGui.QFont()
font.setPointSize(8)
self.label_20.setFont(font)
self.label_20.setWordWrap(True)
self.label_20.setObjectName(_fromUtf8("label_20"))
self.gridLayout_8.addWidget(self.label_20, 3, 0, 1, 1)
self.label_25 = QtGui.QLabel(self.groupBox_4)
font = QtGui.QFont()
font.setPointSize(8)
self.label_25.setFont(font)
self.label_25.setWordWrap(True)
self.label_25.setObjectName(_fromUtf8("label_25"))
self.gridLayout_8.addWidget(self.label_25, 0, 0, 1, 1)
self.label_27 = QtGui.QLabel(self.groupBox_4)
font = QtGui.QFont()
font.setPointSize(8)
self.label_27.setFont(font)
self.label_27.setWordWrap(True)
self.label_27.setObjectName(_fromUtf8("label_27"))
self.gridLayout_8.addWidget(self.label_27, 2, 0, 1, 1)
self.label_22 = QtGui.QLabel(self.groupBox_4)
font = QtGui.QFont()
font.setPointSize(8)
self.label_22.setFont(font)
self.label_22.setWordWrap(True)
self.label_22.setObjectName(_fromUtf8("label_22"))
self.gridLayout_8.addWidget(self.label_22, 5, 0, 1, 1)
self.label_21 = QtGui.QLabel(self.groupBox_4)
font = QtGui.QFont()
font.setPointSize(8)
self.label_21.setFont(font)
self.label_21.setWordWrap(True)
self.label_21.setObjectName(_fromUtf8("label_21"))
self.gridLayout_8.addWidget(self.label_21, 4, 0, 1, 1)
self.horizontalLayout_2.addLayout(self.gridLayout_8)
self.formLayout.setWidget(4, QtGui.QFormLayout.SpanningRole, self.groupBox_4)
self.groupBox_2 = QtGui.QGroupBox(self.centralwidget)
self.groupBox_2.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.gridLayout_7 = QtGui.QGridLayout(self.groupBox_2)
self.gridLayout_7.setObjectName(_fromUtf8("gridLayout_7"))
self.label_13 = QtGui.QLabel(self.groupBox_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_13.sizePolicy().hasHeightForWidth())
self.label_13.setSizePolicy(sizePolicy)
self.label_13.setMaximumSize(QtCore.QSize(100, 40))
self.label_13.setText(_fromUtf8(""))
self.label_13.setPixmap(QtGui.QPixmap(_fromUtf8("figuras/Logo coelce endesa brasil12.jpg")))
self.label_13.setScaledContents(True)
self.label_13.setObjectName(_fromUtf8("label_13"))
self.gridLayout_7.addWidget(self.label_13, 0, 0, 1, 1)
self.label_15 = QtGui.QLabel(self.groupBox_2)
self.label_15.setMaximumSize(QtCore.QSize(100, 40))
self.label_15.setText(_fromUtf8(""))
self.label_15.setPixmap(QtGui.QPixmap(_fromUtf8("figuras/LOGO P&D ANEEL (horizontal).jpg")))
self.label_15.setScaledContents(True)
self.label_15.setObjectName(_fromUtf8("label_15"))
self.gridLayout_7.addWidget(self.label_15, 0, 4, 1, 1)
self.label_6 = QtGui.QLabel(self.groupBox_2)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.label_6.sizePolicy().hasHeightForWidth())
self.label_6.setSizePolicy(sizePolicy)
self.label_6.setMaximumSize(QtCore.QSize(100, 40))
self.label_6.setText(_fromUtf8(""))
self.label_6.setPixmap(QtGui.QPixmap(_fromUtf8("figuras/logo2.png")))
self.label_6.setScaledContents(True)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout_7.addWidget(self.label_6, 0, 5, 1, 1)
self.label_14 = QtGui.QLabel(self.groupBox_2)
self.label_14.setMaximumSize(QtCore.QSize(100, 40))
self.label_14.setText(_fromUtf8(""))
self.label_14.setPixmap(QtGui.QPixmap(_fromUtf8("figuras/concema.png")))
self.label_14.setScaledContents(True)
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout_7.addWidget(self.label_14, 0, 3, 1, 1)
self.label_16 = QtGui.QLabel(self.groupBox_2)
self.label_16.setMaximumSize(QtCore.QSize(100, 60))
self.label_16.setText(_fromUtf8(""))
self.label_16.setPixmap(QtGui.QPixmap(_fromUtf8("figuras/labterra.png")))
self.label_16.setScaledContents(True)
self.label_16.setObjectName(_fromUtf8("label_16"))
self.gridLayout_7.addWidget(self.label_16, 0, 2, 1, 1)
self.formLayout.setWidget(6, QtGui.QFormLayout.SpanningRole, self.groupBox_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 696, 21))
self.menubar.setObjectName(_fromUtf8("menubar"))
self.menuArquivo = QtGui.QMenu(self.menubar)
self.menuArquivo.setObjectName(_fromUtf8("menuArquivo"))
self.menuAvan_ado = QtGui.QMenu(self.menubar)
self.menuAvan_ado.setObjectName(_fromUtf8("menuAvan_ado"))
self.menuInjetar_pulso_unico = QtGui.QMenu(self.menuAvan_ado)
self.menuInjetar_pulso_unico.setObjectName(_fromUtf8("menuInjetar_pulso_unico"))
self.menuSobre = QtGui.QMenu(self.menubar)
self.menuSobre.setObjectName(_fromUtf8("menuSobre"))
self.menuU2531A = QtGui.QMenu(self.menubar)
self.menuU2531A.setObjectName(_fromUtf8("menuU2531A"))
self.menuAnalise = QtGui.QMenu(self.menubar)
self.menuAnalise.setObjectName(_fromUtf8("menuAnalise"))
self.menu_Plotar_Graficos = QtGui.QMenu(self.menuAnalise)
self.menu_Plotar_Graficos.setObjectName(_fromUtf8("menu_Plotar_Graficos"))
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName(_fromUtf8("statusbar"))
MainWindow.setStatusBar(self.statusbar)
self.actionAbrir = QtGui.QAction(MainWindow)
self.actionAbrir.setObjectName(_fromUtf8("actionAbrir"))
self.actionSalvar = QtGui.QAction(MainWindow)
self.actionSalvar.setObjectName(_fromUtf8("actionSalvar"))
self.actionNovo = QtGui.QAction(MainWindow)
self.actionNovo.setObjectName(_fromUtf8("actionNovo"))
self.actionSair = QtGui.QAction(MainWindow)
self.actionSair.setObjectName(_fromUtf8("actionSair"))
self.actionLamotriz = QtGui.QAction(MainWindow)
self.actionLamotriz.setObjectName(_fromUtf8("actionLamotriz"))
self.actionLer_tens_o = QtGui.QAction(MainWindow)
self.actionLer_tens_o.setObjectName(_fromUtf8("actionLer_tens_o"))
self.actionLer_corrente = QtGui.QAction(MainWindow)
self.actionLer_corrente.setObjectName(_fromUtf8("actionLer_corrente"))
self.actionResetar = QtGui.QAction(MainWindow)
self.actionResetar.setObjectName(_fromUtf8("actionResetar"))
self.actionO_Projeto = QtGui.QAction(MainWindow)
self.actionO_Projeto.setObjectName(_fromUtf8("actionO_Projeto"))
self.actionSobre_o_Software = QtGui.QAction(MainWindow)
self.actionSobre_o_Software.setObjectName(_fromUtf8("actionSobre_o_Software"))
self.actionE_ler_corrente = QtGui.QAction(MainWindow)
self.actionE_ler_corrente.setObjectName(_fromUtf8("actionE_ler_corrente"))
self.actionCom_leitura_de_Corrente = QtGui.QAction(MainWindow)
self.actionCom_leitura_de_Corrente.setObjectName(_fromUtf8("actionCom_leitura_de_Corrente"))
self.actionCom_leitura_da_imped_ncia = QtGui.QAction(MainWindow)
self.actionCom_leitura_da_imped_ncia.setObjectName(_fromUtf8("actionCom_leitura_da_imped_ncia"))
self.actionLer_imped_ncia = QtGui.QAction(MainWindow)
self.actionLer_imped_ncia.setObjectName(_fromUtf8("actionLer_imped_ncia"))
self.actionConectar = QtGui.QAction(MainWindow)
self.actionConectar.setObjectName(_fromUtf8("actionConectar"))
self.actionDesconectar = QtGui.QAction(MainWindow)
self.actionDesconectar.setObjectName(_fromUtf8("actionDesconectar"))
self.actionStatus = QtGui.QAction(MainWindow)
self.actionStatus.setObjectName(_fromUtf8("actionStatus"))
self.actionArvore = QtGui.QAction(MainWindow)
self.actionArvore.setObjectName(_fromUtf8("actionArvore"))
self.actionAnalisar = QtGui.QAction(MainWindow)
self.actionAnalisar.setObjectName(_fromUtf8("actionAnalisar"))
self.actionTreinar = QtGui.QAction(MainWindow)
self.actionTreinar.setObjectName(_fromUtf8("actionTreinar"))
self.actionAnalisar_Amostras = QtGui.QAction(MainWindow)
self.actionAnalisar_Amostras.setObjectName(_fromUtf8("actionAnalisar_Amostras"))
self.actionPlot_V_I_transiente = QtGui.QAction(MainWindow)
self.actionPlot_V_I_transiente.setObjectName(_fromUtf8("actionPlot_V_I_transiente"))
self.actionPlot_V_I_raw = QtGui.QAction(MainWindow)
self.actionPlot_V_I_raw.setObjectName(_fromUtf8("actionPlot_V_I_raw"))
self.actionCANCELAR = QtGui.QAction(MainWindow)
self.actionCANCELAR.setObjectName(_fromUtf8("actionCANCELAR"))
self.menuArquivo.addAction(self.actionAbrir)
self.menuArquivo.addSeparator()
self.menuArquivo.addAction(self.actionSair)
self.menuInjetar_pulso_unico.addAction(self.actionE_ler_corrente)
self.menuInjetar_pulso_unico.addAction(self.actionCom_leitura_de_Corrente)
self.menuInjetar_pulso_unico.addAction(self.actionCom_leitura_da_imped_ncia)
self.menuAvan_ado.addAction(self.actionAnalisar)
self.menuAvan_ado.addAction(self.actionTreinar)
self.menuAvan_ado.addAction(self.menuInjetar_pulso_unico.menuAction())
self.menuAvan_ado.addAction(self.actionLer_tens_o)
self.menuAvan_ado.addAction(self.actionLer_corrente)
self.menuAvan_ado.addAction(self.actionLer_imped_ncia)
self.menuAvan_ado.addAction(self.actionCANCELAR)
self.menuSobre.addAction(self.actionLamotriz)
self.menuSobre.addAction(self.actionO_Projeto)
self.menuSobre.addAction(self.actionSobre_o_Software)
self.menuU2531A.addAction(self.actionConectar)
self.menuU2531A.addAction(self.actionDesconectar)
self.menuU2531A.addAction(self.actionStatus)
self.menuU2531A.addAction(self.actionResetar)
self.menu_Plotar_Graficos.addAction(self.actionPlot_V_I_transiente)
self.menu_Plotar_Graficos.addAction(self.actionPlot_V_I_raw)
self.menuAnalise.addAction(self.actionAnalisar_Amostras)
self.menuAnalise.addAction(self.actionArvore)
self.menuAnalise.addAction(self.menu_Plotar_Graficos.menuAction())
self.menubar.addAction(self.menuArquivo.menuAction())
self.menubar.addAction(self.menuAvan_ado.menuAction())
self.menubar.addAction(self.menuU2531A.menuAction())
self.menubar.addAction(self.menuAnalise.menuAction())
self.menubar.addAction(self.menuSobre.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "Sistema de Aquisição", None))
self.groupBox.setTitle(_translate("MainWindow", "Entrada", None))
self.label_2.setText(_translate("MainWindow", "Nome do Terreno", None))
self.label.setText(_translate("MainWindow", "Número de Amostras", None))
self.label_10.setText(_translate("MainWindow", "Nome do Arranjo / Rótulo", None))
self.lineEdit_2.setText(_translate("MainWindow", "haste4", None))
self.checkBox.setText(_translate("MainWindow", "Plotar o transiente após análise", None))
self.lineEdit.setText(_translate("MainWindow", "Formigueiro", None))
self.label_8.setText(_translate("MainWindow", "Área de Trabalho:", None))
self.label_9.setText(_translate("MainWindow", "Selecione uma área de trabalho", None))
self.pushButton.setText(_translate("MainWindow", "...", None))
self.groupBox_5.setTitle(_translate("MainWindow", "Ações", None))
self.pushAnalisar.setText(_translate("MainWindow", "Iniciar", None))
self.pushCancelar.setText(_translate("MainWindow", "Cancelar", None))
self.groupBox_3.setTitle(_translate("MainWindow", "Resultados", None))
self.label_4.setText(_translate("MainWindow", "Parado", None))
self.label_3.setText(_translate("MainWindow", "Status:", None))
self.label_30.setText(_translate("MainWindow", "Número exato:", None))
self.label_31.setText(_translate("MainWindow", "Siga as instruções abaixo", None))
self.label_12.setText(_translate("MainWindow", "0", None))
self.label_5.setText(_translate("MainWindow", "Número aproximado:", None))
self.label_7.setText(_translate("MainWindow", "<html><head/><body><p>Siga as instruções abaixo.</p></body></html>", None))
self.label_19.setText(_translate("MainWindow", "Tempo estimado (seg):", None))
self.label_28.setText(_translate("MainWindow", "0", None))
self.label_11.setText(_translate("MainWindow", "Tempo total (seg):", None))
self.groupBox_4.setTitle(_translate("MainWindow", "Instruções", None))
self.label_23.setText(_translate("MainWindow", "8. Pressione o botão \"Iniciar\".", None))
self.label_26.setText(_translate("MainWindow", "2. Insira o nome do Terreno a ser analisado. ", None))
self.label_29.setText(_translate("MainWindow", "9. Pressione o botão \"Cancelar\" para cancelar o ensaio.", None))
self.label_24.setText(_translate("MainWindow", "7. Certifique-se que as distâncias entre as hastes auxiliares estejam corretas.", None))
self.label_20.setText(_translate("MainWindow", "4. Conecte a malha a ser analisada no borne/conector vermelho.", None))
self.label_25.setText(_translate("MainWindow", "1. Insira o número de amostras a ser adquirido (Default: 30)", None))
self.label_27.setText(_translate("MainWindow", "3. Insira o nome do arranjo/rótulo.", None))
self.label_22.setText(_translate("MainWindow", "6. Conecte a haste auxiliar-retorno no borne preto.", None))
self.label_21.setText(_translate("MainWindow", "5. Conecte a haste auxiliar-central no borne verde.", None))
self.groupBox_2.setTitle(_translate("MainWindow", "Desenvolvimento", None))
self.menuArquivo.setTitle(_translate("MainWindow", "&Arquivo", None))
self.menuAvan_ado.setTitle(_translate("MainWindow", "Açõe&s", None))
self.menuInjetar_pulso_unico.setTitle(_translate("MainWindow", "&Injetar pulso unico", None))
self.menuSobre.setTitle(_translate("MainWindow", "&Sobre", None))
self.menuU2531A.setTitle(_translate("MainWindow", "&U2531A", None))
self.menuAnalise.setTitle(_translate("MainWindow", "Aná&lise", None))
self.menu_Plotar_Graficos.setTitle(_translate("MainWindow", "&Plotar Graficos", None))
self.actionAbrir.setText(_translate("MainWindow", "Área &Trabalho", None))
self.actionAbrir.setShortcut(_translate("MainWindow", "Ctrl+Q", None))
self.actionSalvar.setText(_translate("MainWindow", "Sa&lvar", None))
self.actionNovo.setText(_translate("MainWindow", "&Novo", None))
self.actionSair.setText(_translate("MainWindow", "&Sair", None))
self.actionLamotriz.setText(_translate("MainWindow", "&Lamotriz", None))
self.actionLer_tens_o.setText(_translate("MainWindow", "Ler &tensão", None))
self.actionLer_corrente.setText(_translate("MainWindow", "Ler &corrente", None))
self.actionResetar.setText(_translate("MainWindow", "&Resetar", None))
self.actionO_Projeto.setText(_translate("MainWindow", "O &Projeto", None))
self.actionSobre_o_Software.setText(_translate("MainWindow", "&Sobre o Software", None))
self.actionE_ler_corrente.setText(_translate("MainWindow", "Com leitura de tensão", None))
self.actionCom_leitura_de_Corrente.setText(_translate("MainWindow", "Com leitura de corrente", None))
self.actionCom_leitura_da_imped_ncia.setText(_translate("MainWindow", "Com leitura da impedância", None))
self.actionLer_imped_ncia.setText(_translate("MainWindow", "Ler &impedância", None))
self.actionConectar.setText(_translate("MainWindow", "&Conectar", None))
self.actionDesconectar.setText(_translate("MainWindow", "&Desconectar", None))
self.actionStatus.setText(_translate("MainWindow", "&Status", None))
self.actionArvore.setText(_translate("MainWindow", "A&rvore", None))
self.actionAnalisar.setText(_translate("MainWindow", "Analisar", None))
self.actionAnalisar.setShortcut(_translate("MainWindow", "Ctrl+R", None))
self.actionTreinar.setText(_translate("MainWindow", "Treinar", None))
self.actionAnalisar_Amostras.setText(_translate("MainWindow", "A&nalisar Amostras", None))
self.actionPlot_V_I_transiente.setText(_translate("MainWindow", "Plot V I transiente", None))
self.actionPlot_V_I_transiente.setShortcut(_translate("MainWindow", "Ctrl+P", None))
self.actionPlot_V_I_raw.setText(_translate("MainWindow", "Plot V I raw", None))
self.actionPlot_V_I_raw.setShortcut(_translate("MainWindow", "Ctrl+O", None))
self.actionCANCELAR.setText(_translate("MainWindow", "CANCELAR", None))
self.actionCANCELAR.setShortcut(_translate("MainWindow", "Ctrl+W", None))
if __name__ == "__main__":
import sys
app = QtGui.QApplication(sys.argv)
MainWindow = QtGui.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
|
{
"content_hash": "df5971a3729269768d1f3dd39c066041",
"timestamp": "",
"source": "github",
"line_count": 456,
"max_line_length": 144,
"avg_line_length": 60.29824561403509,
"alnum_prop": 0.6848268839103869,
"repo_name": "lamotriz/sistemas-de-aterramento",
"id": "d46baf612e156916c4377b2c6adba876ada22266",
"size": "27765",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/sistemaAquisicao_python.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "112"
},
{
"name": "C",
"bytes": "8043"
},
{
"name": "Eagle",
"bytes": "2432977"
},
{
"name": "Makefile",
"bytes": "14339"
},
{
"name": "NSIS",
"bytes": "2319"
},
{
"name": "Python",
"bytes": "110904"
},
{
"name": "Shell",
"bytes": "2188"
},
{
"name": "Stata",
"bytes": "132"
},
{
"name": "TeX",
"bytes": "29554"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('contacts', '0004_person_yatra'),
]
operations = [
migrations.AlterField(
model_name='person',
name='first_name',
field=models.CharField(blank=True, help_text='Enter your first name.', max_length=100),
preserve_default=True,
),
migrations.AlterField(
model_name='person',
name='last_name',
field=models.CharField(blank=True, help_text='Enter your surname.', max_length=100),
preserve_default=True,
),
]
|
{
"content_hash": "b34aaf0f219b22a0edbd17acaf2a4b1f",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 99,
"avg_line_length": 27.92,
"alnum_prop": 0.5845272206303725,
"repo_name": "mayapurmedia/tovp",
"id": "651563ba8ae6c288f94c34d82c8aa467f1000305",
"size": "722",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tovp/contacts/migrations/0005_auto_20150305_1427.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190169"
},
{
"name": "HTML",
"bytes": "281143"
},
{
"name": "JavaScript",
"bytes": "2888"
},
{
"name": "Python",
"bytes": "504316"
}
],
"symlink_target": ""
}
|
from djangoappengine.settings_base import *
import os
import sys
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
PROJECT_NAME = os.path.split(PROJECT_ROOT)[-1]
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'static/')
#applications
#sys.path.insert(0, os.path.join(PROJECT_ROOT, 'src'))
sys.path.insert(0, os.path.join(PROJECT_ROOT, 'apps'))
sys.path.insert(0, os.path.join(PROJECT_ROOT, 'apps/externals'))
#sys.path.insert(0, os.path.join(PROJECT_ROOT, 'applications/libs'))
#sys.path.insert(0, os.path.join(PROJECT_ROOT, 'applications/externals'))
#sys.path.insert(0, os.path.join(PROJECT_ROOT, 'applications/internals'))
SECRET_KEY = '=r-$b*8hglm+858&9t043hlm6-&6-3d3vfc4((7yd0dbrakhvi'
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.auth',
'django.contrib.sessions',
'django.contrib.sites',
'djangotoolbox',
'registration',
'gadget',
#'easy_thumbnails',
#'guardian',
#'userena',
# djangoappengine should come last, so it can override a few manage.py commands
'djangoappengine',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'lsettings.disable.DisableCSRF'
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.load_template_source',
'django.template.loaders.app_directories.load_template_source',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.auth',
'django.core.context_processors.request',
'django.core.context_processors.media',
)
# This test runner captures stdout and associates tracebacks with their
# corresponding output. Helps a lot with print-debugging.
TEST_RUNNER = 'djangotoolbox.test.CapturingTestSuiteRunner'
ADMIN_MEDIA_PREFIX = '/media/admin/'
TEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)
ROOT_URLCONF = 'urls'
TIME_ZONE = 'Europe/Berlin'
LANGUAGE_CODE = 'en'
LANGUAGES = (('de', 'German'),
('en', 'English'))
USE_I18N = True
SITE_ID = 3
#============== APPS EXT====================
#giga + userena
GIGYAUTH_API_KEYS = '2_qIePKPJ_T-NIHQ0_JpdNrhUgj7-B7YmCkeqIus1U68S8ElsLsN5BQOtTnhvX6nYg'
#localhost:808
#'2_ZVBWfZFQwm2ovDUJ1lJWtQKq7J1Gr3nCMfC2_g3FNiYd37lhrtEot6T4M1JianRD'
AUTHENTICATION_BACKENDS = (
#'userena.UserenaAuthenticationBackend',
#'guardian.backends.ObjectPermissionBackend',
'django.contrib.auth.backends.ModelBackend',
#'gigyauth.backends.gigyaoauth.GigyaBackend',
)
AUTH_PROFILE_MODULE = False
USERENA_MUGSHOT_GRAVATAR = False
#guardian
LOGIN_REDIRECT_URL = '/'
ANONYMOUS_USER_ID = -1
#registration
ACCOUNT_ACTIVATION_DAYS = 7 # One-week activation window; you may, of course, use a different value.
# Activate django-dbindexer if available
try:
import dbindexer
DATABASES['native'] = DATABASES['default']
DATABASES['default'] = {'ENGINE': 'dbindexer', 'TARGET': 'native'}
INSTALLED_APPS += ('dbindexer',)
DBINDEXER_SITECONF = 'dbindexes'
MIDDLEWARE_CLASSES = ('dbindexer.middleware.DBIndexerMiddleware',) + \
MIDDLEWARE_CLASSES
except ImportError:
pass
|
{
"content_hash": "202311ac2f8aa3ea3827c70aca885bb6",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 100,
"avg_line_length": 30.885714285714286,
"alnum_prop": 0.7190872648781992,
"repo_name": "MediaSapiens/wavesf",
"id": "f4fdde3b9b31910770b495bcd17f378824d620a7",
"size": "3457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settings.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "120591"
},
{
"name": "Python",
"bytes": "4074314"
}
],
"symlink_target": ""
}
|
from django import forms
from satchmo.contact import signals
import logging
log = logging.getLogger('contact.listeners')
def au_postcode_validator(sender, postcode=None, country=None, **kwargs):
if country.iso2_code == 'AU':
from satchmo.l10n.validators import aupostcode
try:
pc = aupostcode.validate(postcode)
return pc
except ValueError, ve:
raise forms.ValidationError('Please enter a valid Australian postal code.')
signals.validate_postcode.connect(au_postcode_validator)
def ca_postcode_validator(sender, postcode=None, country=None, **kwargs):
if country.iso2_code == 'CA':
from satchmo.l10n.validators import capostcode
try:
pc = capostcode.validate(postcode)
return pc
except ValueError, ve:
raise forms.ValidationError('Please enter a valid Canadian postal code.')
signals.validate_postcode.connect(ca_postcode_validator)
def uk_postcode_validator(sender, postcode=None, country=None, **kwargs):
"""Validates UK postcodes"""
if country.iso2_code == 'GB':
from satchmo.l10n.validators import ukpostcode
try:
pc = ukpostcode.parse_uk_postcode(postcode)
except ValueError, ve:
log.debug('UK Postcode validator caught error: %s', ve)
raise forms.ValidationError('Please enter a valid UK postcode.')
return ' '.join(pc)
signals.validate_postcode.connect(uk_postcode_validator)
def us_postcode_validator(sender, postcode=None, country=None, **kwargs):
if country.iso2_code == 'US':
from satchmo.l10n.validators import uspostcode
try:
pc = uspostcode.validate(postcode)
return pc
except ValueError, ve:
raise forms.ValidationError('Please enter a valid US ZIP code.')
signals.validate_postcode.connect(us_postcode_validator)
|
{
"content_hash": "a42a3137df354a24922c3e1b18bf8642",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 87,
"avg_line_length": 38.857142857142854,
"alnum_prop": 0.6769957983193278,
"repo_name": "roadhead/satchmo",
"id": "4518f63840356630268d46d778cae18d22b68598",
"size": "1904",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "satchmo/contact/listeners.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Examples to show how to create async EventHubProducerClient and EventHubConsumerClient that connect to custom endpoint.
"""
import os
from azure.servicebus import ServiceBusClient, ServiceBusMessage
CONNECTION_STR = os.environ['SERVICEBUS_CONNECTION_STR']
QUEUE_NAME = os.environ["SERVICEBUS_QUEUE_NAME"]
# The custom endpoint address to use for establishing a connection to the Service Bus service,
# allowing network requests to be routed through any application gateways
# or other paths needed for the host environment.
CUSTOM_ENDPOINT_ADDRESS = 'sb://<custom_endpoint_hostname>:<custom_endpoint_port>'
# The optional absolute path to the custom certificate file used by client to authenticate the
# identity of the connection endpoint in the case that endpoint has its own issued CA.
# If not set, the certifi library will be used to load certificates.
CUSTOM_CA_BUNDLE_PATH = '<your_custom_ca_bundle_file_path>'
def send_single_message(sender):
message = ServiceBusMessage("Single Message")
sender.send_messages(message)
servicebus_client = ServiceBusClient.from_connection_string(
conn_str=CONNECTION_STR,
logging_enable=True,
custom_endpoint_address=CUSTOM_ENDPOINT_ADDRESS,
connection_verify=CUSTOM_CA_BUNDLE_PATH
)
with servicebus_client:
sender = servicebus_client.get_queue_sender(queue_name=QUEUE_NAME)
with sender:
send_single_message(sender)
|
{
"content_hash": "67d92580954e9a14e8a124e4bb4f8efd",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 119,
"avg_line_length": 42.81818181818182,
"alnum_prop": 0.7763623496107572,
"repo_name": "Azure/azure-sdk-for-python",
"id": "78af7e885659482352d93dafa5897cfbb5129f65",
"size": "1782",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/servicebus/azure-servicebus/samples/sync_samples/connection_to_custom_endpoint_address.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
"""--- Day 2: I Was Told There Would Be No Math ---
The elves are running low on wrapping paper, and so they need to submit an
order for more. They have a list of the dimensions (length l, width w, and
height h) of each present, and only want to order exactly as much as they need.
Fortunately, every present is a box (a perfect right rectangular prism), which
makes calculating the required wrapping paper for each gift a little easier:
1. Find the surface area of the box, which is 2*l*w + 2*w*h + 2*h*l.
1. The elves also need a little extra paper for each present: the area of
the smallest side.
For example:
A present with dimensions 2x3x4 requires 2*6 + 2*12 + 2*8 = 52 square feet of
wrapping paper plus 6 square feet of slack, for a total of 58 square feet. A
present with dimensions 1x1x10 requires 2*1 + 2*10 + 2*10 = 42 square feet of
wrapping paper plus 1 square foot of slack, for a total of 43 square feet. All
numbers in the elves' list are in feet. How many total square feet of wrapping
paper should they order?
--- Part Two ---
The elves are also running low on ribbon. Ribbon is all the same width, so they
only have to worry about the length they need to order, which they would again
like to be exact.
The ribbon required to wrap a present is the shortest distance around its
sides, or the smallest perimeter of any one face. Each present also requires a
bow made out of ribbon as well; the feet of ribbon required for the perfect bow
is equal to the cubic feet of volume of the present. Don't ask how they tie the
bow, though; they'll never tell.
For example:
A present with dimensions 2x3x4 requires 2+2+3+3 = 10 feet of ribbon to wrap
the present plus 2*3*4 = 24 feet of ribbon for the bow, for a total of 34 feet.
A present with dimensions 1x1x10 requires 1+1+1+1 = 4 feet of ribbon to wrap
the present plus 1*1*10 = 10 feet of ribbon for the bow, for a total of 14
feet. How many total feet of ribbon should they order?
"""
from functools import reduce
import operator
import sys
import click
def format_lines(text):
"""Returns list of list of ints"""
return [[int(i) for i in str.split(line, 'x')] for line in str.split(text)]
def calculate_area(length, width, height):
"""Calculates the area + slack based on dimensions"""
area_of_sides = [length*width,
width*height,
height*length]
return sum(2*area_of_sides, min(area_of_sides))
def total_area(text):
"""Returns total area of wrapping paper"""
return sum(calculate_area(*dimensions) for dimensions in format_lines(text))
def calculate_wribbon(dimensions):
"""Returns required wribbon"""
dimensions.sort()
return reduce(operator.mul, dimensions, 1) + sum(dimensions[:2])*2
def total_wribbon(text):
return sum(calculate_wribbon(dimensions) for dimensions in format_lines(text))
def calculate_solution_1(data):
return total_area(data)
def calculate_solution_2(data):
return total_wribbon(data)
@click.command()
@click.option('--source_file', default='data/02.txt',
help='source data file for problem')
def main(source_file):
"""Simple solution to adventofcode problem 2."""
data = ''
with open(source_file) as source:
data = source.read()
print('Total required wrapping paper for part 1 is {}'.format(total_area(data)))
print('Total required wribbon for part 2 is {}'.format(total_wribbon(data)))
if __name__ == "__main__":
sys.exit(main())
|
{
"content_hash": "ed0930da5605d2bd780541691aaf7640",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 84,
"avg_line_length": 35.755102040816325,
"alnum_prop": 0.708904109589041,
"repo_name": "MattJDavidson/python-adventofcode",
"id": "45896c85a489a7eb2718ac18217e71b65ac35bff",
"size": "3550",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "advent/problem_02.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "301301"
}
],
"symlink_target": ""
}
|
from aquilon.aqdb.model import ResourceGroup
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.show_resource import show_resource
class CommandShowResourceGroup(BrokerCommand):
required_parameters = []
def render(self, session, hostname, cluster, all, resourcegroup,
**arguments):
return show_resource(session, hostname, cluster, None, all,
resourcegroup, ResourceGroup)
|
{
"content_hash": "71c52f98617cb14759667ec39d2f0158",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 72,
"avg_line_length": 34.857142857142854,
"alnum_prop": 0.7110655737704918,
"repo_name": "jrha/aquilon",
"id": "18db27061cbf3cf3ca69ee5f390021c9c96c4570",
"size": "1198",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "lib/python2.6/aquilon/worker/commands/show_resourcegroup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
"""YAML utility functions."""
import glob
import logging
import os
import sys
from collections import OrderedDict
from typing import Union, List, Dict
import yaml
try:
import keyring
except ImportError:
keyring = None
from homeassistant.exceptions import HomeAssistantError
_LOGGER = logging.getLogger(__name__)
_SECRET_NAMESPACE = 'homeassistant'
_SECRET_YAML = 'secrets.yaml'
__SECRET_CACHE = {} # type: Dict
# pylint: disable=too-many-ancestors
class SafeLineLoader(yaml.SafeLoader):
"""Loader class that keeps track of line numbers."""
def compose_node(self, parent: yaml.nodes.Node, index) -> yaml.nodes.Node:
"""Annotate a node with the first line it was seen."""
last_line = self.line # type: int
node = super(SafeLineLoader,
self).compose_node(parent, index) # type: yaml.nodes.Node
node.__line__ = last_line + 1
return node
def load_yaml(fname: str) -> Union[List, Dict]:
"""Load a YAML file."""
try:
with open(fname, encoding='utf-8') as conf_file:
# If configuration file is empty YAML returns None
# We convert that to an empty dict
return yaml.load(conf_file, Loader=SafeLineLoader) or {}
except yaml.YAMLError as exc:
_LOGGER.error(exc)
raise HomeAssistantError(exc)
def clear_secret_cache() -> None:
"""Clear the secret cache."""
__SECRET_CACHE.clear()
def _include_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node) -> Union[List, Dict]:
"""Load another YAML file and embeds it using the !include tag.
Example:
device_tracker: !include device_tracker.yaml
"""
fname = os.path.join(os.path.dirname(loader.name), node.value)
return load_yaml(fname)
def _include_dir_named_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node):
"""Load multiple files from directory as a dictionary."""
mapping = OrderedDict() # type: OrderedDict
files = os.path.join(os.path.dirname(loader.name), node.value, '*.yaml')
for fname in glob.glob(files):
filename = os.path.splitext(os.path.basename(fname))[0]
mapping[filename] = load_yaml(fname)
return mapping
def _include_dir_merge_named_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node):
"""Load multiple files from directory as a merged dictionary."""
mapping = OrderedDict() # type: OrderedDict
files = os.path.join(os.path.dirname(loader.name), node.value, '*.yaml')
for fname in glob.glob(files):
if os.path.basename(fname) == _SECRET_YAML:
continue
loaded_yaml = load_yaml(fname)
if isinstance(loaded_yaml, dict):
mapping.update(loaded_yaml)
return mapping
def _include_dir_list_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node):
"""Load multiple files from directory as a list."""
files = os.path.join(os.path.dirname(loader.name), node.value, '*.yaml')
return [load_yaml(f) for f in glob.glob(files)
if os.path.basename(f) != _SECRET_YAML]
def _include_dir_merge_list_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node):
"""Load multiple files from directory as a merged list."""
files = os.path.join(os.path.dirname(loader.name),
node.value, '*.yaml') # type: str
merged_list = [] # type: List
for fname in glob.glob(files):
if os.path.basename(fname) == _SECRET_YAML:
continue
loaded_yaml = load_yaml(fname)
if isinstance(loaded_yaml, list):
merged_list.extend(loaded_yaml)
return merged_list
def _ordered_dict(loader: SafeLineLoader,
node: yaml.nodes.MappingNode) -> OrderedDict:
"""Load YAML mappings into an ordered dictionary to preserve key order."""
loader.flatten_mapping(node)
nodes = loader.construct_pairs(node)
seen = {} # type: Dict
min_line = None
for (key, _), (node, _) in zip(nodes, node.value):
line = getattr(node, '__line__', 'unknown')
if line != 'unknown' and (min_line is None or line < min_line):
min_line = line
try:
hash(key)
except TypeError:
fname = getattr(loader.stream, 'name', '')
raise yaml.MarkedYAMLError(
context="invalid key: \"{}\"".format(key),
context_mark=yaml.Mark(fname, 0, min_line, -1, None, None)
)
if key in seen:
fname = getattr(loader.stream, 'name', '')
first_mark = yaml.Mark(fname, 0, seen[key], -1, None, None)
second_mark = yaml.Mark(fname, 0, line, -1, None, None)
raise yaml.MarkedYAMLError(
context="duplicate key: \"{}\"".format(key),
context_mark=first_mark, problem_mark=second_mark,
)
seen[key] = line
processed = OrderedDict(nodes)
setattr(processed, '__config_file__', loader.name)
setattr(processed, '__line__', min_line)
return processed
def _env_var_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node):
"""Load environment variables and embed it into the configuration YAML."""
if node.value in os.environ:
return os.environ[node.value]
else:
_LOGGER.error("Environment variable %s not defined.", node.value)
raise HomeAssistantError(node.value)
def _load_secret_yaml(secret_path: str) -> Dict:
"""Load the secrets yaml from path."""
secret_path = os.path.join(secret_path, _SECRET_YAML)
if secret_path in __SECRET_CACHE:
return __SECRET_CACHE[secret_path]
_LOGGER.debug('Loading %s', secret_path)
try:
secrets = load_yaml(secret_path)
if 'logger' in secrets:
logger = str(secrets['logger']).lower()
if logger == 'debug':
_LOGGER.setLevel(logging.DEBUG)
else:
_LOGGER.error("secrets.yaml: 'logger: debug' expected,"
" but 'logger: %s' found", logger)
del secrets['logger']
except FileNotFoundError:
secrets = {}
__SECRET_CACHE[secret_path] = secrets
return secrets
# pylint: disable=protected-access
def _secret_yaml(loader: SafeLineLoader,
node: yaml.nodes.Node):
"""Load secrets and embed it into the configuration YAML."""
secret_path = os.path.dirname(loader.name)
while True:
secrets = _load_secret_yaml(secret_path)
if node.value in secrets:
_LOGGER.debug('Secret %s retrieved from secrets.yaml in '
'folder %s', node.value, secret_path)
return secrets[node.value]
if secret_path == os.path.dirname(sys.path[0]):
break # sys.path[0] set to config/deps folder by bootstrap
secret_path = os.path.dirname(secret_path)
if not os.path.exists(secret_path) or len(secret_path) < 5:
break # Somehow we got past the .homeassistant config folder
if keyring:
# do some keyring stuff
pwd = keyring.get_password(_SECRET_NAMESPACE, node.value)
if pwd:
_LOGGER.debug('Secret %s retrieved from keyring.', node.value)
return pwd
_LOGGER.error('Secret %s not defined.', node.value)
raise HomeAssistantError(node.value)
yaml.SafeLoader.add_constructor('!include', _include_yaml)
yaml.SafeLoader.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
_ordered_dict)
yaml.SafeLoader.add_constructor('!env_var', _env_var_yaml)
yaml.SafeLoader.add_constructor('!secret', _secret_yaml)
yaml.SafeLoader.add_constructor('!include_dir_list', _include_dir_list_yaml)
yaml.SafeLoader.add_constructor('!include_dir_merge_list',
_include_dir_merge_list_yaml)
yaml.SafeLoader.add_constructor('!include_dir_named', _include_dir_named_yaml)
yaml.SafeLoader.add_constructor('!include_dir_merge_named',
_include_dir_merge_named_yaml)
|
{
"content_hash": "5185f1642a4793c0d71e34d12163dc2b",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 79,
"avg_line_length": 36.54708520179372,
"alnum_prop": 0.6131288343558282,
"repo_name": "hexxter/home-assistant",
"id": "035a96b657e4c6e6e61608a15790bca70176f0fb",
"size": "8150",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/util/yaml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1371597"
},
{
"name": "Python",
"bytes": "3699472"
},
{
"name": "Ruby",
"bytes": "379"
},
{
"name": "Shell",
"bytes": "7255"
}
],
"symlink_target": ""
}
|
"""
Collects sidekiq data from Redis
#### Dependencies
* redis
"""
from itertools import izip
try:
import redis
from redis.sentinel import Sentinel
except ImportError:
redis = None
import diamond.collector
class SidekiqCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(SidekiqCollector,
self).get_default_config_help()
config_help.update({
'host': 'Redis hostname',
'ports': 'Redis ports',
'password': 'Redis Auth password',
'databases': 'how many database instances to collect',
'sentinel_ports': 'Redis sentinel ports',
'sentinel_name': 'Redis sentinel name',
'cluster_prefix': 'Redis cluster name prefix'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(SidekiqCollector, self).get_default_config()
config.update({
'path': 'sidekiq',
'host': 'localhost',
'ports': '6379',
'password': None,
'databases': 16,
'sentinel_ports': None,
'sentinel_name': None,
'cluster_prefix': None
})
return config
def get_master(self, host, port, sentinel_port, sentinel_name):
"""
:param host: Redis host to send request
:param port: Redis port to send request
:param sentinel_port: sentinel_port optional
:param sentinel_name: sentinel_name optional
:return: master ip and port
"""
if sentinel_port and sentinel_name:
master = Sentinel([(host, sentinel_port)], socket_timeout=1)\
.discover_master(sentinel_name)
return master
return host, port
def get_redis_client(self):
"""
:return: Redis client
"""
host = self.config['host']
ports = self.config['ports']
sentinel_ports = self.config['sentinel_ports']
sentinel_name = self.config['sentinel_name']
password = self.config['password']
databases = self.config['databases']
if not isinstance(ports, list):
ports = [ports]
if not isinstance(sentinel_ports, list):
sentinel_ports = [sentinel_ports]
if sentinel_ports:
assert len(sentinel_ports) == len(ports)
else:
sentinel_ports = [None for _ in xrange(len(ports))]
for port, sentinel_port in izip(ports, sentinel_ports):
for db in xrange(0, int(databases)):
master = self.get_master(
host, port, sentinel_port, sentinel_name
)
pool = redis.ConnectionPool(
host=master[0], port=int(master[1]),
password=password, db=db
)
yield redis.Redis(connection_pool=pool), port, db
def collect(self):
"""
Collect Sidekiq metrics
:return:
"""
if redis is None:
self.log.error('Unable to import module redis')
return {}
try:
for redis_client, port, db in self.get_redis_client():
try:
self.publish_queue_length(redis_client, port, db)
self.publish_schedule_length(redis_client, port, db)
self.publish_retry_length(redis_client, port, db)
except Exception as execption:
self.log.error(execption)
except Exception as execption:
self.log.error(execption)
def publish_schedule_length(self, redis_client, port, db):
"""
:param redis_client: Redis client
:param db: Redis Database index
:param port: Redis port
:return: Redis schedule length
"""
schedule_length = redis_client.zcard('schedule')
self.__publish(port, db, 'schedule', schedule_length)
def publish_retry_length(self, redis_client, port, db):
"""
:param redis_client: Redis client
:param db: Redis Database index
:param port: Redis port
:return: Redis schedule length
"""
retry_length = redis_client.zcard('retry')
self.__publish(port, db, 'retry', retry_length)
def publish_queue_length(self, redis_client, port, db):
"""
:param redis_client: Redis client
:param db: Redis Database index
:param port: Redis port
:return: Redis queue length
"""
for queue in redis_client.smembers('queues'):
queue_length = redis_client.llen('queue:%s' % queue)
self.__publish(port, db, queue, queue_length)
def __publish(self, port, db, queue, queue_length):
"""
:param port: Redis port
:param db: Redis db index to report
:param queue: Queue name to report
:param queue_length: Queue length to report
:return:
"""
metric_name_segaments = ['queue']
cluster = self.config['cluster_prefix']
if cluster:
metric_name_segaments.append(cluster)
metric_name_segaments.append(port)
metric_name_segaments.append(str(db))
metric_name_segaments.append(queue)
self.publish_gauge(
name='.'.join(metric_name_segaments), value=queue_length
)
|
{
"content_hash": "849601b285d17fe808a37230cd007431",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 73,
"avg_line_length": 32.75595238095238,
"alnum_prop": 0.5615119025985826,
"repo_name": "MichaelDoyle/Diamond",
"id": "dcaa1e7df740b50189a0edadcd9b5faca3e9cff5",
"size": "5519",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/collectors/sidekiq/sidekiq.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "21681"
},
{
"name": "Makefile",
"bytes": "4465"
},
{
"name": "Python",
"bytes": "1595996"
},
{
"name": "Roff",
"bytes": "23868"
},
{
"name": "Ruby",
"bytes": "230"
},
{
"name": "Shell",
"bytes": "12795"
}
],
"symlink_target": ""
}
|
import sys
import os
import numpy
from osgeo import gdal, gdal_array
TAIL_TRIM = 0.01
def get_band(filename, target_percent):
ds = gdal.Open(filename)
xsize = int(ds.RasterXSize * target_percent / 100.0)
ysize = int(ds.RasterYSize * target_percent / 100.0)
image = ds.GetRasterBand(1).ReadAsArray(resample_alg = gdal.GRIORA_Average,
buf_xsize = xsize,
buf_ysize = ysize)
return image
def get_scale(image):
'''
Return the values at which to clip an image.
'''
histogram = numpy.histogram(image, 65536, (-0.5, 65535.5))[0]
# Clear the nodata:
histogram[:1] = 0
count = numpy.sum(histogram)
# Walk up the near-black side of the histogram until
# we reach the end of the first percentile:
counter = 0
scale_min = None
for i in range(len(histogram)):
counter += histogram[i]
if counter > count * TAIL_TRIM:
scale_min = i
break
# Same, but moving left from the white end:
counter = 0
scale_max = None
for i in range(len(histogram)-1, 0, -1):
counter += histogram[i]
if counter > count * TAIL_TRIM:
scale_max = i
break
return scale_min, scale_max
def scale_image(image, scale_min, scale_max):
'''
Take a (presumptively uint16) image and return it scaled into
a uint8 image stretched linearly so that scale_min is mapped
to 0 and scale_max is mapped to 255.
'''
image = image.astype('float32')
image = (255 * (image - scale_min) / (scale_max - scale_min))
image = numpy.maximum(0, numpy.minimum(255, image))
image = image.astype('uint8')
return image
def thumbnail(root_scene, scene_dir, verbose=False):
red_file = '%s/%s_B4.TIF' % (scene_dir, root_scene)
grn_file = '%s/%s_B3.TIF' % (scene_dir, root_scene)
blu_file = '%s/%s_B2.TIF' % (scene_dir, root_scene)
if not os.path.exists(red_file) or not os.path.exists(grn_file) \
or not os.path.exists(blu_file):
print 'Missing one or more of %s, %s and %s, skip thumbnailing.' % (
red_file, grn_file, blu_file)
return
large_thumbnail = numpy.array([
get_band(red_file, 15),
get_band(grn_file, 15),
get_band(blu_file, 15)])
small_thumbnail = numpy.array([
get_band(red_file, 3),
get_band(grn_file, 3),
get_band(blu_file, 3)])
# Set the scale values for both images from the larger one:
scale_min, scale_max = get_scale(large_thumbnail)
large_thumbnail = scale_image(large_thumbnail, scale_min, scale_max)
small_thumbnail = scale_image(small_thumbnail, scale_min, scale_max)
# TODO: Georeference these jpegs
gdal_array.SaveArray(
large_thumbnail,
'%s/%s_thumb_large.jpg' % (scene_dir, root_scene),
format = 'JPEG')
gdal_array.SaveArray(
small_thumbnail,
'%s/%s_thumb_small.jpg' % (scene_dir, root_scene),
format = 'JPEG')
for filename in os.listdir(scene_dir):
if filename.endswith('.aux.xml'):
os.unlink(os.path.join(scene_dir,filename))
if __name__ == '__main__':
if len(sys.argv) < 3:
print 'Usage: thumbnailer.py <root_scene> <scene_dir_path>'
sys.exit(1)
thumbnail(sys.argv[1], sys.argv[2])
|
{
"content_hash": "8b754d816d1118e7ca6d1b8212a9608f",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 79,
"avg_line_length": 30.150442477876105,
"alnum_prop": 0.5917229233930144,
"repo_name": "landsat-pds/landsat_ingestor",
"id": "61e5ee2bfbb9dc946691b3a31ce01d9f0c994a1d",
"size": "3430",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ingestor/thumbnailer.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "366"
},
{
"name": "Python",
"bytes": "48045"
},
{
"name": "Shell",
"bytes": "326"
}
],
"symlink_target": ""
}
|
from threading import Thread
from abc import ABC, abstractmethod
class AbstractWindow(ABC):
def __init__(self, config):
self.config = config
self.setup(config)
def setup(self, config):
pass
@abstractmethod
def get_outer_window_rect(self):
pass
@abstractmethod
def key_press(self, key):
pass
@abstractmethod
def escape_listener(self, callback):
pass
def listen_for_escape(self, callback):
t = Thread(target=self.escape_listener, args=(callback,))
t.setDaemon(True)
t.start()
|
{
"content_hash": "ea6aa846c08f0756b321ae686ca0f5f6",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 65,
"avg_line_length": 21.77777777777778,
"alnum_prop": 0.6224489795918368,
"repo_name": "Frizz925/gbf-autopilot",
"id": "9d9c8f7af98d705003bee70848879e58c7880ce6",
"size": "588",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "controller/window/abstract_window.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1039"
},
{
"name": "JavaScript",
"bytes": "65878"
},
{
"name": "Python",
"bytes": "15416"
}
],
"symlink_target": ""
}
|
"""Ghost Buster. Static site generator for Ghost.
Usage:
buster.py setup [--gh-repo=<repo-url>] [--dir=<path>]
buster.py generate [--domain=<local-address>] [--dir=<path>]
buster.py preview [--dir=<path>]
buster.py deploy [--dir=<path>]
buster.py add-domain <domain-name> [--dir=<path>]
buster.py (-h | --help)
buster.py --version
Options:
-h --help Show this screen.
--version Show version.
--dir=<path> Absolute path of directory to store static pages.
--domain=<local-address> Address of local ghost installation [default: localhost:2368].
--gh-repo=<repo-url> URL of your gh-pages repository.
"""
import os
import re
import sys
import fnmatch
import shutil
from docopt import docopt
from time import gmtime, strftime
from git import Repo
from pyquery import PyQuery
from lxml import etree
from io import StringIO, BytesIO
def main():
is_windows = os.name == 'nt'
query_string_separator = '@' if is_windows else '#'
arguments = docopt(__doc__, version='0.1.3')
if arguments['--dir'] is not None:
static_path = arguments['--dir']
else:
static_path = os.path.join(os.getcwd(), 'static')
if arguments['generate']:
command = ("wget "
"--recursive " # follow links to download entire site
"{2} " # make links relative
"--page-requisites " # grab everything: css / inlined images
"--no-parent " # don't go to parent level
"--directory-prefix {1} " # download contents to static/ folder
"--no-host-directories " # don't create domain named folder
"--restrict-file-name={3} " # don't escape query string
"{0}").format(arguments['--domain'], static_path, '' if is_windows else '--convert-links', 'windows' if is_windows else 'unix')
result = os.system(command)
if result > 0:
raise IOError('Your ghost server is dead')
# remove query string since Ghost 0.4
file_regex = re.compile(r'.*?(' + query_string_separator + '.*)')
html_regex = re.compile(r".*?(\.html)")
for root, dirs, filenames in os.walk(static_path):
for filename in filenames:
if is_windows and html_regex.match(filename):
path = ("{0}").format(os.path.join(root, filename).replace("\\", "/"))
with open(path, "r+") as f:
file_contents = f.read()
file_contents = file_contents.replace(arguments['--domain'], "")
file_contents = file_contents.replace("%hurl", arguments['--domain'])
f.seek(0)
f.write(file_contents)
f.close()
if file_regex.match(filename):
newname = re.sub(query_string_separator + r'.*', '', filename)
newpath = os.path.join(root, newname)
try:
os.remove(newpath)
except OSError:
pass
os.rename(os.path.join(root, filename), newpath)
# remove superfluous "index.html" from relative hyperlinks found in text
abs_url_regex = re.compile(r'^(?:[a-z]+:)?//', flags=re.IGNORECASE)
def fixLinks(text, parser):
parser = etree.HTMLParser()
tree = etree.fromstring(text, parser)
# edit all the <a> and <link> elements first
nodeList = tree.findall(".//a") + tree.findall(".//link")
for element in nodeList:
href = element.attrib['href']
if href is None:
continue
new_href = re.sub(r'(rss/index\.html)|((?<!\.)rss/?)$', 'rss/index.rss', href)
if new_href[0] == "/" and new_href[:2] != "//":
new_href = "/static" + new_href
if not abs_url_regex.search(href):
new_href = re.sub(r'/index\.html$', '/', new_href)
if href != new_href:
element.attrib['href'] = new_href
print(str(href) + " => " + str(new_href))
# Make sure all <script> and <img> elements are referenced correctly
nodeList = tree.findall(".//script") + tree.findall(".//img")
for element in nodeList:
if not 'src' in element.attrib:
continue
src = element.attrib['src']
if src is None:
continue
new_src = re.sub(r'(rss/index\.html)|((?<!\.)rss/?)$', 'rss/index.rss', src)
if new_src[0] == "/" and new_src[:2] != "//":
new_src = "/static" + new_src
if src != new_src:
element.attrib['src'] = new_src
print(str(src) + " => " + str(new_src))
# Fix to make code display correctly:
#for element in tree.findall(".//code"):
# if not "\n" in element.text:
# element.getparent().attrib['style'] = "text-align: center;"
return etree.tostring(tree, pretty_print=True, method="html")
# fix links in all html files
scripts_reg = re.compile(r"(<script.*?/>)", re.IGNORECASE)
for root, dirs, filenames in os.walk(static_path):
for filename in fnmatch.filter(filenames, '*.html'):
filepath = os.path.join(root, filename)
parser = 'html'
if root.endswith(os.path.sep + 'rss'): # rename rss index.html to index.rss
parser = 'xml'
newfilepath = os.path.join(root, os.path.splitext(filename)[0] + '.rss')
try:
os.remove(newfilepath)
except OSError:
pass
os.rename(filepath, newfilepath)
filepath = newfilepath
with open(filepath) as f:
filetext = f.read().decode('utf8')
print('fixing links in ' + str(filepath))
newtext = fixLinks(filetext, parser)
with open(filepath, 'w') as f:
# Make sure the scripts are not terminated like this '/>'
# Stupid fix, but it works and maintains menu functionality in casper
output = scripts_reg.findall(newtext)
for script_item in output:
newtext = newtext.replace(script_item, script_item[:-2] + "></script>")
f.write(newtext)
elif arguments['setup']:
if arguments['--gh-repo']:
repo_url = arguments['--gh-repo']
else:
repo_url = raw_input("Enter the Github repository URL:\n").strip()
# Create a fresh new static files directory
if os.path.isdir(static_path):
confirm = raw_input("This will destroy everything inside static/."
" Are you sure you want to continue? (y/N)").strip()
if confirm != 'y' and confirm != 'Y':
sys.exit(0)
shutil.rmtree(static_path)
# User/Organization page -> master branch
# Project page -> gh-pages branch
branch = 'gh-pages'
regex = re.compile(".*[\w-]+\.github\.(?:io|com).*")
if regex.match(repo_url):
branch = 'master'
# Prepare git repository
repo = Repo.init(static_path)
git = repo.git
if branch == 'gh-pages':
git.checkout(b='gh-pages')
repo.create_remote('origin', repo_url)
# Add README
file_path = os.path.join(static_path, 'README.md')
with open(file_path, 'w') as f:
f.write('# Blog\nPowered by [Ghost](http://ghost.org) and [Buster](https://github.com/axitkhurana/buster/).\n')
print("All set! You can generate and deploy now.")
elif arguments['deploy']:
repo = Repo(static_path)
repo.git.add('.')
current_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
repo.index.commit('Blog update at {}'.format(current_time))
origin = repo.remotes.origin
repo.git.execute(['git', 'push', '-u', origin.name,
repo.active_branch.name])
print("Good job! Deployed to Github Pages.")
elif arguments['add-domain']:
repo = Repo(static_path)
custom_domain = arguments['<domain-name>']
file_path = os.path.join(static_path, 'CNAME')
with open(file_path, 'w') as f:
f.write(custom_domain + '\n')
print("Added CNAME file to repo. Use `deploy` to deploy")
else:
print(__doc__)
if __name__ == '__main__':
main()
|
{
"content_hash": "a45e73312bd27ddcbf2b778382ec00c2",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 146,
"avg_line_length": 39.34649122807018,
"alnum_prop": 0.5137665812061085,
"repo_name": "Timebutt/timebutt.github.io",
"id": "1715c996685e9fa19e395ab75b5918c1b24678e9",
"size": "8971",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "buster.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "237423"
},
{
"name": "HTML",
"bytes": "253555"
},
{
"name": "JavaScript",
"bytes": "10541382"
},
{
"name": "Python",
"bytes": "8971"
},
{
"name": "Shell",
"bytes": "109"
},
{
"name": "XSLT",
"bytes": "7177"
}
],
"symlink_target": ""
}
|
"""
Target builder factories for the ``pyltc`` framework.
Target builders are objects implementing ``ITarget`` (see the ``pyltc.core.target``
package for the default implementations). The main target builder flavor is
``TcTarget`` with two subclasses, representing a ``tc``-compatible commands target.
The ``tc`` command is part of the ``iproute2`` network utilities for Linux.
See http://man7.org/linux/man-pages/man8/tc.8.html for details about the ``tc``
command.
"""
from pyltc.core import DIR_EGRESS, DIR_INGRESS
from pyltc.core.target import TcCommandTarget, TcFileTarget, PrintingTcTarget
def default_target_factory(iface, direction, callback=None):
"""
The default target factory. If no custom factory is provided to the
framework, this factory is used by the ``NetDevice.new_instance()`` method
to connfigure its egress and ingress target builders.
A custom target factory may be provided to ``NetDevice.new_instance()`` that
returns a target instance (that is, a class implementing ITarget).
As the ITarget interface limits the arguments at creation time, the
ITarget.configure() method can be used to further configure a target object.
:param iface: NetDevice - the network device object
:param direction: string - a string representing flow direction (DIR_EGRESS or DIR_INGRESS)
:return: ITarget - the ITarget object created by this factory.
:param callback: callable - a callback function to be called to complete this target configuration.
:return: TcCommandTarget
"""
accepted_values = (DIR_EGRESS, DIR_INGRESS)
assert direction in accepted_values, "direction must be one of {!r}".format(accepted_values)
target = TcCommandTarget(iface, direction)
if callback:
target.configure(callback=callback)
return target
def tc_file_target_factory(iface, direction):
"""
tc factory returning a new TcCommandTarget.
:param iface: NetDevice - the network device object
:param direction: string - a string representing flow direction (DIR_EGRESS or DIR_INGRESS)
:param callback: callable - a callback function to be called to complete this target configuration.
:return: TcCommandTarget - the ITarget object created by this factory.
"""
accepted_values = (DIR_EGRESS, DIR_INGRESS)
assert direction in accepted_values, "direction must be one of {!r}".format(accepted_values)
target = TcFileTarget(iface, direction)
target.configure(verbose=True)
return target
#: Note that in case a tc target is not configurable via ``target.configure()``,
#: then the class can sreve as the factory:
printing_target_factory = PrintingTcTarget
|
{
"content_hash": "2783f7ca5f19d1d92176d2e79d89fd27",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 103,
"avg_line_length": 43.85245901639344,
"alnum_prop": 0.7401869158878505,
"repo_name": "yassen-itlabs/py-linux-traffic-control",
"id": "48ee1c4edac70fec002dd11f7a526aa6823a53ac",
"size": "2675",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyltc/core/tfactory.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "157866"
},
{
"name": "Shell",
"bytes": "1265"
}
],
"symlink_target": ""
}
|
import copy
from itertools import chain
from django import forms
from django.contrib.postgres.validators import (
ArrayMaxLengthValidator, ArrayMinLengthValidator,
)
from django.core.exceptions import ValidationError
from django.utils import six
from django.utils.translation import ugettext_lazy as _
from ..utils import prefix_validation_error
class SimpleArrayField(forms.CharField):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, delimiter=',', max_length=None, min_length=None, *args, **kwargs):
self.base_field = base_field
self.delimiter = delimiter
super(SimpleArrayField, self).__init__(*args, **kwargs)
if min_length is not None:
self.min_length = min_length
self.validators.append(ArrayMinLengthValidator(int(min_length)))
if max_length is not None:
self.max_length = max_length
self.validators.append(ArrayMaxLengthValidator(int(max_length)))
def prepare_value(self, value):
if isinstance(value, list):
return self.delimiter.join(six.text_type(self.base_field.prepare_value(v)) for v in value)
return value
def to_python(self, value):
if isinstance(value, list):
items = value
elif value:
items = value.split(self.delimiter)
else:
items = []
errors = []
values = []
for index, item in enumerate(items):
try:
values.append(self.base_field.to_python(item))
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
return values
def validate(self, value):
super(SimpleArrayField, self).validate(value)
errors = []
for index, item in enumerate(value):
try:
self.base_field.validate(item)
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
def run_validators(self, value):
super(SimpleArrayField, self).run_validators(value)
errors = []
for index, item in enumerate(value):
try:
self.base_field.run_validators(item)
except ValidationError as error:
errors.append(prefix_validation_error(
error,
prefix=self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
if errors:
raise ValidationError(errors)
class SplitArrayWidget(forms.Widget):
template_name = 'postgres/widgets/split_array.html'
def __init__(self, widget, size, **kwargs):
self.widget = widget() if isinstance(widget, type) else widget
self.size = size
super(SplitArrayWidget, self).__init__(**kwargs)
@property
def is_hidden(self):
return self.widget.is_hidden
def value_from_datadict(self, data, files, name):
return [self.widget.value_from_datadict(data, files, '%s_%s' % (name, index))
for index in range(self.size)]
def value_omitted_from_data(self, data, files, name):
return all(
self.widget.value_omitted_from_data(data, files, '%s_%s' % (name, index))
for index in range(self.size)
)
def id_for_label(self, id_):
# See the comment for RadioSelect.id_for_label()
if id_:
id_ += '_0'
return id_
def get_context(self, name, value, attrs=None):
attrs = {} if attrs is None else attrs
context = super(SplitArrayWidget, self).get_context(name, value, attrs)
if self.is_localized:
self.widget.is_localized = self.is_localized
value = value or []
context['widget']['subwidgets'] = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id')
for i in range(max(len(value), self.size)):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
context['widget']['subwidgets'].append(
self.widget.get_context(name + '_%s' % i, widget_value, final_attrs)['widget']
)
return context
@property
def media(self):
return self.widget.media
def __deepcopy__(self, memo):
obj = super(SplitArrayWidget, self).__deepcopy__(memo)
obj.widget = copy.deepcopy(self.widget)
return obj
@property
def needs_multipart_form(self):
return self.widget.needs_multipart_form
class SplitArrayField(forms.Field):
default_error_messages = {
'item_invalid': _('Item %(nth)s in the array did not validate: '),
}
def __init__(self, base_field, size, remove_trailing_nulls=False, **kwargs):
self.base_field = base_field
self.size = size
self.remove_trailing_nulls = remove_trailing_nulls
widget = SplitArrayWidget(widget=base_field.widget, size=size)
kwargs.setdefault('widget', widget)
super(SplitArrayField, self).__init__(**kwargs)
def clean(self, value):
cleaned_data = []
errors = []
if not any(value) and self.required:
raise ValidationError(self.error_messages['required'])
max_size = max(self.size, len(value))
for index in range(max_size):
item = value[index]
try:
cleaned_data.append(self.base_field.clean(item))
except ValidationError as error:
errors.append(prefix_validation_error(
error,
self.error_messages['item_invalid'],
code='item_invalid',
params={'nth': index},
))
cleaned_data.append(None)
else:
errors.append(None)
if self.remove_trailing_nulls:
null_index = None
for i, value in reversed(list(enumerate(cleaned_data))):
if value in self.base_field.empty_values:
null_index = i
else:
break
if null_index is not None:
cleaned_data = cleaned_data[:null_index]
errors = errors[:null_index]
errors = list(filter(None, errors))
if errors:
raise ValidationError(list(chain.from_iterable(errors)))
return cleaned_data
|
{
"content_hash": "00d8d31e9ff5d9038a34977e45ac8e82",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 102,
"avg_line_length": 35.60696517412935,
"alnum_prop": 0.5623864747799358,
"repo_name": "mbayon/TFG-MachineLearning",
"id": "4fbeca88776c36ff405203daf85d59067542f8d9",
"size": "7157",
"binary": false,
"copies": "34",
"ref": "refs/heads/master",
"path": "venv/lib/python3.6/site-packages/django/contrib/postgres/forms/array.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "24787"
},
{
"name": "Julia",
"bytes": "11103"
},
{
"name": "Matlab",
"bytes": "98571"
},
{
"name": "Perl",
"bytes": "716"
},
{
"name": "Python",
"bytes": "115284"
},
{
"name": "Shell",
"bytes": "643"
}
],
"symlink_target": ""
}
|
import unittest
from telemetry import decorators
# These are not real unittests.
# They are merely to test our Enable/Disable annotations.
class DisabledCases(unittest.TestCase):
def testAllEnabled(self):
pass
@decorators.Disabled('all')
def testAllDisabled(self):
pass
@decorators.Enabled('mavericks')
def testMavericksOnly(self):
pass
@decorators.Disabled('mavericks')
def testNoMavericks(self):
pass
@decorators.Enabled('mac')
def testMacOnly(self):
pass
@decorators.Disabled('mac')
def testNoMac(self):
pass
@decorators.Enabled('chromeos')
def testChromeOSOnly(self):
pass
@decorators.Disabled('chromeos')
def testNoChromeOS(self):
pass
@decorators.Enabled('win', 'linux')
def testWinOrLinuxOnly(self):
pass
@decorators.Disabled('win', 'linux')
def testNoWinLinux(self):
pass
@decorators.Enabled('system')
def testSystemOnly(self):
pass
@decorators.Disabled('system')
def testNoSystem(self):
pass
@decorators.Enabled('has tabs')
def testHasTabs(self):
pass
|
{
"content_hash": "c98edef4a42905e8d2dadb326abffd3e",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 57,
"avg_line_length": 18.28813559322034,
"alnum_prop": 0.7025023169601483,
"repo_name": "Workday/OpenFrame",
"id": "bb4641a5897f04c8c7a42d5cfd663b748ab4de5d",
"size": "1242",
"binary": false,
"copies": "17",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/testing/disabled_cases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
from collections import OrderedDict
import argparse
import httplib
import json
import os.path
import re
import sys
import time
# Seconds to wait for the admin address output file to appear. The script exits
# with failure if the file is not found.
ADMIN_FILE_TIMEOUT_SECS = 20
# Because the hot restart files are yaml but yaml support is not included in
# python by default, we parse this fairly manually.
def GenerateNewConfig(original_yaml, admin_address, updated_json):
# Get original listener addresses
with open(original_yaml, 'r') as original_file:
sys.stdout.write('Admin address is ' + admin_address + '\n')
try:
admin_conn = httplib.HTTPConnection(admin_address)
admin_conn.request('GET', '/listeners')
admin_response = admin_conn.getresponse()
if not admin_response.status == 200:
return False
discovered_listeners = json.loads(admin_response.read())
except Exception as e:
sys.stderr.write('Cannot connect to admin: %s\n' % e)
return False
else:
raw_yaml = original_file.readlines()
index = 0
for discovered in discovered_listeners:
replaced = False
if discovered.startswith('/'):
for index in range(index + 1, len(raw_yaml) - 1):
if 'pipe:' in raw_yaml[index] and 'path:' in raw_yaml[index + 1]:
raw_yaml[index + 1] = re.sub('path:.*', 'path: "' + discovered + '"',
raw_yaml[index + 1])
replaced = True
break
else:
addr, _, port = discovered.rpartition(':')
if addr[0] == '[':
addr = addr[1:-1] # strip [] from ipv6 address.
for index in range(index + 1, len(raw_yaml) - 2):
if ('socket_address:' in raw_yaml[index] and 'address:' in raw_yaml[index + 1] and
'port_value:' in raw_yaml[index + 2]):
raw_yaml[index + 1] = re.sub('address:.*', 'address: "' + addr + '"',
raw_yaml[index + 1])
raw_yaml[index + 2] = re.sub('port_value:.*', 'port_value: ' + port,
raw_yaml[index + 2])
replaced = True
break
if replaced:
sys.stderr.write('replaced listener at line ' + str(index) + ' with ' + discovered + '\n')
else:
sys.stderr.write('Failed to replace a discovered listener ' + discovered + '\n')
return False
with open(updated_json, 'w') as outfile:
outfile.writelines(raw_yaml)
finally:
admin_conn.close()
return True
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Replace listener addressses in json file.')
parser.add_argument(
'-o',
'--original_json',
type=str,
required=True,
help='Path of the original config json file')
parser.add_argument(
'-a', '--admin_address_path', type=str, required=True, help='Path of the admin address file')
parser.add_argument(
'-u',
'--updated_json',
type=str,
required=True,
help='Path to output updated json config file')
args = parser.parse_args()
admin_address_path = args.admin_address_path
# Read admin address from file
counter = 0
while not os.path.exists(admin_address_path):
time.sleep(1)
counter += 1
if counter > ADMIN_FILE_TIMEOUT_SECS:
break
if not os.path.exists(admin_address_path):
sys.exit(1)
with open(admin_address_path, 'r') as admin_address_file:
admin_address = admin_address_file.read()
success = GenerateNewConfig(args.original_json, admin_address, args.updated_json)
if not success:
sys.exit(1)
|
{
"content_hash": "f29dd064d0cb217c32ac0c011cb35781",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 100,
"avg_line_length": 35.16981132075472,
"alnum_prop": 0.5971030042918455,
"repo_name": "dnoe/envoy",
"id": "da41dc5c5bbdc5ee67983a33141cea2870a3fe9c",
"size": "4190",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/socket_passing.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9173"
},
{
"name": "C++",
"bytes": "12933900"
},
{
"name": "Dockerfile",
"bytes": "245"
},
{
"name": "Emacs Lisp",
"bytes": "966"
},
{
"name": "Go",
"bytes": "836"
},
{
"name": "PowerShell",
"bytes": "4285"
},
{
"name": "PureBasic",
"bytes": "472"
},
{
"name": "Python",
"bytes": "946275"
},
{
"name": "Shell",
"bytes": "98909"
},
{
"name": "Thrift",
"bytes": "748"
}
],
"symlink_target": ""
}
|
"""SNR agent."""
from acme.agents.jax.sac.config import target_entropy_from_env_spec
from jrl.agents.snr import config
from jrl.agents.snr import networks
from jrl.agents.snr.builder import SNRBuilder
from jrl.agents.snr.learning import SNRLearner
from jrl.agents.snr.networks import apply_policy_and_sample
from jrl.utils.agent_utils import RLComponents
class SNRRLComponents(RLComponents):
def __init__(self, spec, create_data_iter_fn):
self._spec = spec
self._config = config.SNRConfig(
target_entropy=target_entropy_from_env_spec(spec))
# self._config = config.SNRConfig(
# target_entropy=0.,
# # entropy_coefficient=10.)
# entropy_coefficient=1.)
self._create_data_iter_fn = create_data_iter_fn
def make_builder(self):
return SNRBuilder(
config=self._config,
make_demonstrations=self._create_data_iter_fn)
def make_networks(self):
return networks.make_networks(
self._spec,
actor_hidden_layer_sizes=self._config.actor_network_hidden_sizes,
critic_hidden_layer_sizes=self._config.critic_network_hidden_sizes,
num_critics=self._config.num_critics,)
def make_behavior_policy(self, network):
return networks.apply_policy_and_sample(network, eval_mode=False)
def make_eval_behavior_policy(self, network):
return networks.apply_policy_and_sample(network, eval_mode=True)
|
{
"content_hash": "459df5dd665dd6fb661d9c7aba3de29d",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 75,
"avg_line_length": 34.170731707317074,
"alnum_prop": 0.715203426124197,
"repo_name": "google-research/google-research",
"id": "fea3a9a2452d1cef5855f69402766d9e5561e4ee",
"size": "2009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jrl/agents/snr/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "9817"
},
{
"name": "C++",
"bytes": "4166670"
},
{
"name": "CMake",
"bytes": "6412"
},
{
"name": "CSS",
"bytes": "27092"
},
{
"name": "Cuda",
"bytes": "1431"
},
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Gnuplot",
"bytes": "11125"
},
{
"name": "HTML",
"bytes": "77599"
},
{
"name": "ImageJ Macro",
"bytes": "50488"
},
{
"name": "Java",
"bytes": "487585"
},
{
"name": "JavaScript",
"bytes": "896512"
},
{
"name": "Julia",
"bytes": "67986"
},
{
"name": "Jupyter Notebook",
"bytes": "71290299"
},
{
"name": "Lua",
"bytes": "29905"
},
{
"name": "MATLAB",
"bytes": "103813"
},
{
"name": "Makefile",
"bytes": "5636"
},
{
"name": "NASL",
"bytes": "63883"
},
{
"name": "Perl",
"bytes": "8590"
},
{
"name": "Python",
"bytes": "53790200"
},
{
"name": "R",
"bytes": "101058"
},
{
"name": "Roff",
"bytes": "1208"
},
{
"name": "Rust",
"bytes": "2389"
},
{
"name": "Shell",
"bytes": "730444"
},
{
"name": "Smarty",
"bytes": "5966"
},
{
"name": "Starlark",
"bytes": "245038"
}
],
"symlink_target": ""
}
|
from google.cloud import secretmanager_v1
def sample_destroy_secret_version():
# Create a client
client = secretmanager_v1.SecretManagerServiceClient()
# Initialize request argument(s)
request = secretmanager_v1.DestroySecretVersionRequest(
name="name_value",
)
# Make the request
response = client.destroy_secret_version(request=request)
# Handle the response
print(response)
# [END secretmanager_v1_generated_SecretManagerService_DestroySecretVersion_sync]
|
{
"content_hash": "7c1b236dbde777287a2faf54b90d462c",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 81,
"avg_line_length": 26.789473684210527,
"alnum_prop": 0.7387033398821218,
"repo_name": "googleapis/python-secret-manager",
"id": "3c2a0bf564de76974ee72b3fb775e23cdc02efe1",
"size": "1928",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "samples/generated_samples/secretmanager_v1_generated_secret_manager_service_destroy_secret_version_sync.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "950029"
},
{
"name": "Shell",
"bytes": "30684"
}
],
"symlink_target": ""
}
|
from looker_sdk import methods, models40
import looker_sdk
import exceptions
sdk = looker_sdk.init40("../looker.ini")
def create_simple_schedule(dashboard_id:int,user_id:int,schedule_title:str, format:str, email:str,type:str, message:str, crontab:str):
### For more information on the Params accepted https://github.com/looker-open-source/sdk-codegen/blob/master/python/looker_sdk/sdk/api31/methods.py#L2144
### And for schedule destination go: https://github.com/looker-open-source/sdk-codegen/blob/master/python/looker_sdk/sdk/api31/models.py#L4601
### Supported formats vary by destination, but include: "txt", "csv", "inline_json", "json", "json_detail", "xlsx", "html", "wysiwyg_pdf", "assembled_pdf", "wysiwyg_png"
### type: Type of the address ('email', 'webhook', 's3', or 'sftp')
schedule = sdk.create_scheduled_plan(
body=models40.WriteScheduledPlan(name=schedule_title, dashboard_id=dashboard_id, user_id=user_id, run_as_recipient= True, crontab=crontab, scheduled_plan_destination = [models40.ScheduledPlanDestination(format=format, apply_formatting=True, apply_vis=True, address=email, type=type, message=message)]))
create_simple_schedule(1234,453,"This is an automated test", "assembled_pdf", "test@looker.com", "email", "Hi Looker User!", "0 1 * * *")
|
{
"content_hash": "b530758ddf27c327c534a72f86bdf212",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 307,
"avg_line_length": 86.4,
"alnum_prop": 0.7376543209876543,
"repo_name": "looker-open-source/sdk-codegen",
"id": "8e21888289d14a4f057b1d7a6ce4004fbd045aa8",
"size": "1296",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/python/simple_schedule_plan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "1558559"
},
{
"name": "Go",
"bytes": "780579"
},
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "JavaScript",
"bytes": "46766"
},
{
"name": "Jupyter Notebook",
"bytes": "44336"
},
{
"name": "Kotlin",
"bytes": "1224618"
},
{
"name": "Nix",
"bytes": "132"
},
{
"name": "Python",
"bytes": "2119978"
},
{
"name": "Shell",
"bytes": "4961"
},
{
"name": "Swift",
"bytes": "1996724"
},
{
"name": "TypeScript",
"bytes": "2759848"
}
],
"symlink_target": ""
}
|
"""oslo.i18n integration module.
See http://docs.openstack.org/developer/oslo.i18n/usage.html
"""
try:
import oslo_i18n
# NOTE(dhellmann): This reference to o-s-l-o will be replaced by the
# application name when this module is synced into the separate
# repository. It is OK to have more than one translation function
# using the same domain, since there will still only be one message
# catalog.
_translators = oslo_i18n.TranslatorFactory(domain='designate')
# The primary translation function using the well-known name "_"
_ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
except ImportError:
# NOTE(dims): Support for cases where a project wants to use
# code from oslo-incubator, but is not ready to be internationalized
# (like tempest)
_ = _LI = _LW = _LE = _LC = lambda x: x
|
{
"content_hash": "04267b614d563acf6da2b4e2be677ac9",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 73,
"avg_line_length": 35.06060606060606,
"alnum_prop": 0.682800345721694,
"repo_name": "muraliselva10/designate",
"id": "fd1b98e6df54951b5c1b910dbffa6386b49923f4",
"size": "1730",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "designate/openstack/common/_i18n.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2185372"
},
{
"name": "Ruby",
"bytes": "4170"
},
{
"name": "Shell",
"bytes": "12933"
}
],
"symlink_target": ""
}
|
from sanic import Sanic
from sanic.blueprints import Blueprint
from sanic.response import json
app = Sanic(name="blue-print-group-version-example")
bp1 = Blueprint(name="ultron", url_prefix="/ultron")
bp2 = Blueprint(name="vision", url_prefix="/vision", strict_slashes=None)
bpg = Blueprint.group([bp1, bp2], url_prefix="/sentient/robot", version=1, strict_slashes=True)
@bp1.get("/name")
async def bp1_name(request):
"""This will expose an Endpoint GET /v1/sentient/robot/ultron/name"""
return json({"name": "Ultron"})
@bp2.get("/name")
async def bp2_name(request):
"""This will expose an Endpoint GET /v1/sentient/robot/vision/name"""
return json({"name": "vision"})
@bp2.get("/name", version=2)
async def bp2_revised_name(request):
"""This will expose an Endpoint GET /v2/sentient/robot/vision/name"""
return json({"name": "new vision"})
app.blueprint(bpg)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000)
|
{
"content_hash": "f6b7b2a2baa46fffdda67a0f11cae150",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 95,
"avg_line_length": 27.514285714285716,
"alnum_prop": 0.6843198338525441,
"repo_name": "channelcat/sanic",
"id": "77360f5d6f4b896f8a53cfa70ef18d37567923c5",
"size": "963",
"binary": false,
"copies": "1",
"ref": "refs/heads/exception-message-defaults",
"path": "examples/versioned_blueprint_group.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "529"
},
{
"name": "Go",
"bytes": "482"
},
{
"name": "HTML",
"bytes": "1173"
},
{
"name": "Makefile",
"bytes": "129"
},
{
"name": "Python",
"bytes": "380871"
},
{
"name": "Shell",
"bytes": "462"
}
],
"symlink_target": ""
}
|
"""Tests for tensorflow_model.object_detection.metrics.coco_tools."""
import json
import os
import re
import numpy as np
from pycocotools import mask
import tensorflow as tf
from object_detection.metrics import coco_tools
class CocoToolsTest(tf.test.TestCase):
def setUp(self):
groundtruth_annotations_list = [
{
'id': 1,
'image_id': 'first',
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'area': 100.**2,
'iscrowd': 0
},
{
'id': 2,
'image_id': 'second',
'category_id': 1,
'bbox': [50., 50., 50., 50.],
'area': 50.**2,
'iscrowd': 0
},
]
image_list = [{'id': 'first'}, {'id': 'second'}]
category_list = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
self._groundtruth_dict = {
'annotations': groundtruth_annotations_list,
'images': image_list,
'categories': category_list
}
self._detections_list = [
{
'image_id': 'first',
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'score': .8
},
{
'image_id': 'second',
'category_id': 1,
'bbox': [50., 50., 50., 50.],
'score': .7
},
]
def testCocoWrappers(self):
groundtruth = coco_tools.COCOWrapper(self._groundtruth_dict)
detections = groundtruth.LoadAnnotations(self._detections_list)
evaluator = coco_tools.COCOEvalWrapper(groundtruth, detections)
summary_metrics, _ = evaluator.ComputeMetrics()
self.assertAlmostEqual(1.0, summary_metrics['Precision/mAP'])
def testExportGroundtruthToCOCO(self):
image_ids = ['first', 'second']
groundtruth_boxes = [np.array([[100, 100, 200, 200]], np.float),
np.array([[50, 50, 100, 100]], np.float)]
groundtruth_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'groundtruth.json')
result = coco_tools.ExportGroundtruthToCOCO(
image_ids,
groundtruth_boxes,
groundtruth_classes,
categories,
output_path=output_path)
self.assertDictEqual(result, self._groundtruth_dict)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
# The json output should have floats written to 4 digits of precision.
matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE)
self.assertTrue(matcher.findall(written_result))
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testExportDetectionsToCOCO(self):
image_ids = ['first', 'second']
detections_boxes = [np.array([[100, 100, 200, 200]], np.float),
np.array([[50, 50, 100, 100]], np.float)]
detections_scores = [np.array([.8], np.float), np.array([.7], np.float)]
detections_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'detections.json')
result = coco_tools.ExportDetectionsToCOCO(
image_ids,
detections_boxes,
detections_scores,
detections_classes,
categories,
output_path=output_path)
self.assertListEqual(result, self._detections_list)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
# The json output should have floats written to 4 digits of precision.
matcher = re.compile(r'"bbox":\s+\[\n\s+\d+.\d\d\d\d,', re.MULTILINE)
self.assertTrue(matcher.findall(written_result))
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testExportSegmentsToCOCO(self):
image_ids = ['first', 'second']
detection_masks = [np.array(
[[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]],
dtype=np.uint8), np.array(
[[[0, 1, 0, 1], [0, 1, 1, 0], [0, 0, 0, 1], [0, 1, 0, 1]]],
dtype=np.uint8)]
for i, detection_mask in enumerate(detection_masks):
detection_masks[i] = detection_mask[:, :, :, None]
detection_scores = [np.array([.8], np.float), np.array([.7], np.float)]
detection_classes = [np.array([1], np.int32), np.array([1], np.int32)]
categories = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'segments.json')
result = coco_tools.ExportSegmentsToCOCO(
image_ids,
detection_masks,
detection_scores,
detection_classes,
categories,
output_path=output_path)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
written_result = json.loads(written_result)
mask_load = mask.decode([written_result[0]['segmentation']])
self.assertTrue(np.allclose(mask_load, detection_masks[0]))
self.assertAlmostEqual(result, written_result)
def testExportKeypointsToCOCO(self):
image_ids = ['first', 'second']
detection_keypoints = [
np.array(
[[[100, 200], [300, 400], [500, 600]],
[[50, 150], [250, 350], [450, 550]]], dtype=np.int32),
np.array(
[[[110, 210], [310, 410], [510, 610]],
[[60, 160], [260, 360], [460, 560]]], dtype=np.int32)]
detection_scores = [np.array([.8, 0.2], np.float),
np.array([.7, 0.3], np.float)]
detection_classes = [np.array([1, 1], np.int32), np.array([1, 1], np.int32)]
categories = [{'id': 1, 'name': 'person', 'num_keypoints': 3},
{'id': 2, 'name': 'cat'},
{'id': 3, 'name': 'dog'}]
output_path = os.path.join(tf.test.get_temp_dir(), 'keypoints.json')
result = coco_tools.ExportKeypointsToCOCO(
image_ids,
detection_keypoints,
detection_scores,
detection_classes,
categories,
output_path=output_path)
with tf.gfile.GFile(output_path, 'r') as f:
written_result = f.read()
written_result = json.loads(written_result)
self.assertAlmostEqual(result, written_result)
def testSingleImageDetectionBoxesExport(self):
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
coco_annotations = coco_tools.ExportSingleImageDetectionBoxesToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
detection_boxes=boxes,
detection_classes=classes,
detection_scores=scores)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertAlmostEqual(annotation['score'], scores[i])
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
def testSingleImageDetectionMaskExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
classes = np.array([1, 2, 3], dtype=np.int32)
scores = np.array([0.8, 0.2, 0.7], dtype=np.float32)
coco_annotations = coco_tools.ExportSingleImageDetectionMasksToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
detection_classes=classes,
detection_scores=scores,
detection_masks=masks)
expected_counts = ['04', '31', '4']
for i, mask_annotation in enumerate(coco_annotations):
self.assertEqual(mask_annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
mask_annotation['segmentation']), masks[i])))
self.assertEqual(mask_annotation['image_id'], 'first_image')
self.assertEqual(mask_annotation['category_id'], classes[i])
self.assertAlmostEqual(mask_annotation['score'], scores[i])
def testSingleImageGroundtruthExport(self):
masks = np.array(
[[[1, 1,], [1, 1]],
[[0, 0], [0, 1]],
[[0, 0], [0, 0]]], dtype=np.uint8)
boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, 1, 1]], dtype=np.float32)
coco_boxes = np.array([[0, 0, 1, 1],
[0, 0, .5, .5],
[.5, .5, .5, .5]], dtype=np.float32)
classes = np.array([1, 2, 3], dtype=np.int32)
is_crowd = np.array([0, 1, 0], dtype=np.int32)
next_annotation_id = 1
expected_counts = ['04', '31', '4']
# Tests exporting without passing in is_crowd (for backward compatibility).
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
# Tests exporting with is_crowd.
coco_annotations = coco_tools.ExportSingleImageGroundtruthToCoco(
image_id='first_image',
category_id_set=set([1, 2, 3]),
next_annotation_id=next_annotation_id,
groundtruth_boxes=boxes,
groundtruth_classes=classes,
groundtruth_masks=masks,
groundtruth_is_crowd=is_crowd)
for i, annotation in enumerate(coco_annotations):
self.assertEqual(annotation['segmentation']['counts'],
expected_counts[i])
self.assertTrue(np.all(np.equal(mask.decode(
annotation['segmentation']), masks[i])))
self.assertTrue(np.all(np.isclose(annotation['bbox'], coco_boxes[i])))
self.assertEqual(annotation['image_id'], 'first_image')
self.assertEqual(annotation['category_id'], classes[i])
self.assertEqual(annotation['iscrowd'], is_crowd[i])
self.assertEqual(annotation['id'], i + next_annotation_id)
if __name__ == '__main__':
tf.test.main()
|
{
"content_hash": "f37a86e12cd21286756cf65c7722d9c2",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 80,
"avg_line_length": 39.704626334519574,
"alnum_prop": 0.5678945953213229,
"repo_name": "CUFCTL/DLBD",
"id": "cfb73d8c332420d93e19029f53e4068c9fc7b23b",
"size": "11846",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "face-detection-code/object_detection/metrics/coco_tools_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "244643"
},
{
"name": "Python",
"bytes": "2520279"
},
{
"name": "Shell",
"bytes": "3463"
},
{
"name": "TeX",
"bytes": "6468"
}
],
"symlink_target": ""
}
|
import os, hashlib, hmac, base64
from ConfigParser import SafeConfigParser
from fabric.operations import prompt
def loadcredentials():
config = SafeConfigParser()
config_file_path = os.path.join(os.path.dirname(__file__), 'venv', 'aws.ini')
if not os.path.isfile(config_file_path):
config.add_section('aws')
config.set('aws', 'access_key_id', prompt("AWS access key id?"))
config.set('aws', 'secret_access_key', prompt("AWS secret access key?"))
with open(config_file_path, 'w') as fp:
config.write(fp)
else:
with open(config_file_path) as fp:
config.readfp(fp)
access_key = config.get('aws', 'access_key_id')
secret_key = config.get('aws', 'secret_access_key')
return Credentials(access_key, secret_key)
class Credentials:
def __init__(self, access_key_id, secret_access_key):
self.secret_access_key = secret_access_key
self.access_key_id = access_key_id
def sign(self, text):
digest = hmac.new(self.secret_access_key, text, hashlib.sha1).digest()
return base64.b64encode(digest)
|
{
"content_hash": "86c4f543862ce36b057d953f3f0f22f3",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 81,
"avg_line_length": 37.266666666666666,
"alnum_prop": 0.648479427549195,
"repo_name": "tomcz/aws_py",
"id": "72e0c006a3666851a560dd6a8bf40009cb3c9e9d",
"size": "1118",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "properties.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Puppet",
"bytes": "3915"
},
{
"name": "Python",
"bytes": "26202"
},
{
"name": "Shell",
"bytes": "997"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='pymongopager',
version="0.1",
packages=find_packages(),
include_package_data=False,
install_requires=[],
)
|
{
"content_hash": "2c65119905fdf8244abfaeeac7f7aebc",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 43,
"avg_line_length": 20.555555555555557,
"alnum_prop": 0.6756756756756757,
"repo_name": "guneysus/pymongopager",
"id": "965ba9721e9e895cc5cfc6d6065520711242884a",
"size": "209",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4062"
}
],
"symlink_target": ""
}
|
"""
The MIT License (MIT)
Copyright (c) [2015-2022] [Andrew Annex]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import os
import pytest
from spiceypy.tests.gettestkernels import attempt_download, CoreKernels
def test_gettestkernels():
# Force exceptions in gettestkernels.py to get complete coverage there
# To complete code coverage in spiceypy.tests.gettestkernels.py
with pytest.raises(BaseException):
# Generate .HTTPError, return BaseException
attempt_download(
"https://naif.jpl.nasa.gov/404", "httperror.txt", "httperror.txt", 1
)
with pytest.raises(BaseException):
# Generate .URLError, return BaseException
attempt_download(
"https://no_such_host.naif.jpl.nasa.gov/404",
"urlerror.txt",
"urlerror.txt",
1,
)
with pytest.raises(BaseException):
# download a file with an incorrect hash
attempt_download(
CoreKernels.lsk_url,
"badhashkernel.txt",
"badhashkernel.txt",
1,
provided_hash="11c9b4793b6676d464266e790262b986",
)
|
{
"content_hash": "659cfd7efdf10261d505a62e91a9ed47",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 80,
"avg_line_length": 38.70909090909091,
"alnum_prop": 0.7130108031939878,
"repo_name": "AndrewAnnex/SpiceyPy",
"id": "ffd09bb5e71fbcced3c3638eb816a9cc0f7374a5",
"size": "2129",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "src/spiceypy/tests/test_gettestkernels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1117"
},
{
"name": "Python",
"bytes": "1132264"
},
{
"name": "TeX",
"bytes": "7777"
}
],
"symlink_target": ""
}
|
import fixtures
from nova import test
from nova.virt.vmwareapi import fake
from nova.virt.vmwareapi import vim_util
def _fake_get_object_properties(vim, collector, mobj,
type, properties):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.ObjectContent(None))
return fake_objects
def _fake_get_object_properties_missing(vim, collector, mobj,
type, properties):
fake_objects = fake.FakeRetrieveResult()
ml = [fake.MissingProperty()]
fake_objects.add_object(fake.ObjectContent(None, missing_list=ml))
return fake_objects
class VMwareVIMUtilTestCase(test.NoDBTestCase):
def test_get_dynamic_properties_missing(self):
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.vmwareapi.vim_util.get_object_properties',
_fake_get_object_properties))
res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
'fake-type', 'fake-property')
self.assertIsNone(res)
def test_get_dynamic_properties_missing_path_exists(self):
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.vmwareapi.vim_util.get_object_properties',
_fake_get_object_properties_missing))
res = vim_util.get_dynamic_property('fake-vim', 'fake-obj',
'fake-type', 'fake-property')
self.assertIsNone(res)
|
{
"content_hash": "7ffa483538499db5335b11adb29972e5",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 73,
"avg_line_length": 37.94871794871795,
"alnum_prop": 0.6263513513513513,
"repo_name": "SUSE-Cloud/nova",
"id": "495ee4a47b3f9d55c68ede68da9e698c2495ded2",
"size": "2134",
"binary": false,
"copies": "4",
"ref": "refs/heads/stable/havana",
"path": "nova/tests/virt/vmwareapi/test_vmwareapi_vim_util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13441452"
},
{
"name": "Shell",
"bytes": "20579"
}
],
"symlink_target": ""
}
|
import os
from .repository import require_repo
from .config import set_value
themes_dir = '_themes'
default_theme = 'default'
class ThemeNotFoundError(Exception):
"""Indicates the requested theme was not found."""
def __init__(self, theme):
super(ThemeNotFoundError, self).__init__()
self.theme = theme
def list_themes(directory=None):
"""Gets a list of the installed themes."""
repo = require_repo(directory)
path = os.path.join(repo, themes_dir)
return os.listdir(path) if os.path.isdir(path) else None
def use_theme(theme, directory=None):
"""Switches to the specified theme. This returns False if switching to the already active theme."""
repo = require_repo(directory)
if theme not in list_themes(directory):
raise ThemeNotFoundError(theme)
old_theme = set_value(repo, 'theme', theme)
return old_theme != theme
|
{
"content_hash": "8fc4ec42a23e9288b94633d3e427b2d9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 103,
"avg_line_length": 28.806451612903224,
"alnum_prop": 0.6886898096304591,
"repo_name": "joeyespo/gitpress",
"id": "a18cba26276334eda71b55a0ec5f16e85fcf54ea",
"size": "893",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gitpress/themes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18798"
}
],
"symlink_target": ""
}
|
import tkinter as Tk
from tkinter import filedialog, simpledialog, colorchooser
# An implementation with many features without OOP
def init_menubar():
# Create menubar
menubar = Tk.Menu()
# Set menubar as menu for root
root.config(menu=menubar)
# Fill menubar with "File" menu
filemenu = Tk.Menu(menubar, tearoff=0)
filemenu.add_command(label="New", command=on_new, accelerator="Ctrl+N")
filemenu.add_command(label="Open...", command=on_open, accelerator="Ctrl+O")
filemenu.add_command(label="Save", command=on_save, accelerator="Ctrl+S")
filemenu.add_command(label="Save as...", command=on_save_as)
filemenu.add_separator()
filemenu.add_command(label="Exit", command=on_exit, accelerator="Ctrl+Q")
menubar.add_cascade(label="File", menu=filemenu)
# Fill menubar with "Edit" menu
editmenu = Tk.Menu(menubar, tearoff=0)
editmenu.add_command(label="Find", command=on_find, accelerator="Ctrl+F")
editmenu.add_command(label="Select all", command=on_select_all, accelerator="Ctrl+A")
menubar.add_cascade(label="Edit", menu=editmenu)
# Fill menubar with "Settings" menu
settingsmenu = Tk.Menu(menubar, tearoff=0)
settingsmenu.add_command(label="Text color...", command=on_text_color)
settingsmenu.add_command(label="Background color...", command=on_background_color)
menubar.add_cascade(label="Settings", menu=settingsmenu)
# Fill menubar with "Help" menu
helpmenu = Tk.Menu(menubar, tearoff=0)
helpmenu.add_command(label="About", command=on_about)
menubar.add_cascade(label="Help", menu=helpmenu)
def init_shortcuts():
root.bind_all('<Control-n>', handle_shortcuts)
root.bind_all('<Control-o>', handle_shortcuts)
root.bind_all('<Control-s>', handle_shortcuts)
root.bind_all('<Control-q>', handle_shortcuts)
root.bind_all('<Control-a>', handle_shortcuts)
root.bind_all('<Control-f>', handle_shortcuts)
# FILE MENU ACTIONS
def on_new():
global path
path = ''
delete_all_text()
def on_open():
global path
dialog = filedialog.Open()
new_path = dialog.show()
if new_path != '':
text = read_file(new_path)
delete_all_text()
text_input.insert('1.0', text)
path = new_path
def on_save():
global path
new_path = path
if new_path == '':
dialog = filedialog.SaveAs(defaultextension='txt')
new_path = dialog.show()
if new_path:
path = new_path
text = get_all_text()
save_file(path, text)
def on_save_as():
global path
dialog = filedialog.SaveAs(defaultextension='txt')
new_path = dialog.show()
if new_path:
text = get_all_text()
save_file(new_path, text)
path = new_path
def on_exit():
f = open("settings.txt", 'w')
f.write("background_color=" + settings['background_color'] + "\n")
f.write("text_color=" + settings['text_color'] + "\n")
f.close()
quit()
# EDIT MENU ACTIONS
def on_find():
target = simpledialog.askstring("Simple Text Editor", "Search for:")
if target:
index = text_input.search(target, Tk.INSERT, Tk.END)
if not index:
index = text_input.search(target, '1.0', Tk.END)
if index:
length = index + ('+%dc' % len(target))
text_input.tag_add(Tk.SEL, index, length)
text_input.mark_set(Tk.INSERT, length)
text_input.see(Tk.INSERT)
text_input.focus()
def on_select_all():
if get_all_text() != '':
text_input.tag_add(Tk.SEL, '1.0', Tk.END)
text_input.mark_set(Tk.INSERT, '1.0')
text_input.see(Tk.INSERT)
# SETTINGS MENU ACTIONS
def on_background_color():
(rgb, hex) = colorchooser.askcolor(settings['background_color'])
settings['background_color'] = hex
text_input.config(bg=hex)
def on_text_color():
(rgb, hex) = colorchooser.askcolor(settings['text_color'])
settings['text_color'] = hex
text_input.config(fg=hex)
# HELP MENU ACTIONS
def on_about():
top = Tk.Toplevel(root)
top.title("About")
top.resizable(width=False, height=False)
top.geometry('%dx%d+%d+%d' %
(200, 120, root.winfo_x() + 50, root.winfo_y() + 50))
top.focus()
about_message = "Simple Text Editor is a simple" \
" text editor made for educational purposes."
top.update()
msg = Tk.Message(top, text=about_message, pady=10, width=top.winfo_width())
msg.pack()
button = Tk.Button(top, text="OK", command=top.destroy, width=8)
button.pack(side=Tk.BOTTOM, pady=10)
def load_settings():
dictionary = {}
try:
f = open('settings.txt', 'r')
for line in f:
(key, val) = line.strip().split('=')
dictionary[key] = val
f.close()
return dictionary
except IOError:
f = open('settings.txt', "w")
f.write("background_color=" + "#000000\n")
f.write("text_color=" + "#FFFFFFF\n")
f.close()
return {"background_color": "#FFFFFF",
"text_color": "#000000"}
# KEYBOARD SHORTCUT HANDLER
def handle_shortcuts(event):
functions = {'n': on_new,
'o': on_open,
's': on_save,
'q': on_exit,
'a': on_select_all,
'f': on_find}
func = functions[event.keysym]
func()
# HELPERS
def get_all_text():
return text_input.get('1.0', 'end-1c')
def delete_all_text():
text_input.delete('1.0', Tk.END)
def save_file(save_path, text):
file_to_save = open(save_path, 'w')
file_to_save.write(text)
file_to_save.close()
def read_file(file_path):
file_content = open(file_path, 'r')
text = file_content.read()
return text
def center_window():
w = 400
h = 400
sw = root.winfo_screenwidth()
sh = root.winfo_screenheight()
x = (sw - w) / 2
y = (sh - h) / 2
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
root = Tk.Tk()
root.title("Simple Text Editor")
root.protocol('WM_DELETE_WINDOW', on_exit) # Call custom exit method when exiting
path = ''
settings = load_settings()
center_window()
init_menubar()
# Create scrollbar
scrollbar = Tk.Scrollbar(orient=Tk.VERTICAL)
scrollbar.pack(side=Tk.RIGHT, fill=Tk.Y)
# Create text box
text_input = Tk.Text(yscrollcommand=scrollbar.set, height=100, width=80,
bg=settings['background_color'],
fg=settings['text_color'])
text_input.pack(side=Tk.LEFT, fill=Tk.BOTH, expand=1)
scrollbar.config(command=text_input.yview)
init_shortcuts()
root.mainloop()
|
{
"content_hash": "3f901352a8d324ab820c31aa73901481",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 89,
"avg_line_length": 26.422310756972113,
"alnum_prop": 0.6117310012062727,
"repo_name": "introprogramming/exercises",
"id": "ee9e60347a6d3dfbef9ed6464aa5c082b9708fb8",
"size": "6657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercises/text-editor/textedfull.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "60831"
},
{
"name": "Python",
"bytes": "172264"
},
{
"name": "Shell",
"bytes": "171"
}
],
"symlink_target": ""
}
|
"""OpenSSL/M2Crypto 3DES implementation."""
from .cryptomath import *
from .tripledes import *
if m2cryptoLoaded:
def new(key, mode, IV):
return OpenSSL_TripleDES(key, mode, IV)
class OpenSSL_TripleDES(TripleDES):
def __init__(self, key, mode, IV):
TripleDES.__init__(self, key, mode, IV, "openssl")
self.key = key
self.IV = IV
def _createContext(self, encrypt):
context = m2.cipher_ctx_new()
cipherType = m2.des_ede3_cbc()
m2.cipher_init(context, cipherType, self.key, self.IV, encrypt)
return context
def encrypt(self, plaintext):
TripleDES.encrypt(self, plaintext)
context = self._createContext(1)
ciphertext = m2.cipher_update(context, plaintext)
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return ciphertext
def decrypt(self, ciphertext):
TripleDES.decrypt(self, ciphertext)
context = self._createContext(0)
#I think M2Crypto has a bug - it fails to decrypt and return the last block passed in.
#To work around this, we append sixteen zeros to the string, below:
plaintext = m2.cipher_update(context, ciphertext+('\0'*16))
#If this bug is ever fixed, then plaintext will end up having a garbage
#plaintext block on the end. That's okay - the below code will ignore it.
plaintext = plaintext[:len(ciphertext)]
m2.cipher_ctx_free(context)
self.IV = ciphertext[-self.block_size:]
return plaintext
|
{
"content_hash": "5a716a6c30544b693cbb907a9ccc291b",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 98,
"avg_line_length": 37.90909090909091,
"alnum_prop": 0.5989208633093526,
"repo_name": "splunk/splunk-webframework",
"id": "3f014dabc8b2665a50e3c78465a63443e7c0bd42",
"size": "1766",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "contrib/tlslite/tlslite/utils/openssl_tripledes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1808"
},
{
"name": "CSS",
"bytes": "122646"
},
{
"name": "HTML",
"bytes": "113362"
},
{
"name": "JavaScript",
"bytes": "5135595"
},
{
"name": "Python",
"bytes": "6298367"
},
{
"name": "Shell",
"bytes": "1368"
}
],
"symlink_target": ""
}
|
"""
homeassistant.components.switch.wemo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for WeMo switches.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/switch.wemo/
"""
import logging
from homeassistant.components.switch import SwitchDevice
from homeassistant.const import (
STATE_ON, STATE_OFF, STATE_STANDBY, EVENT_HOMEASSISTANT_STOP)
REQUIREMENTS = ['pywemo==0.3.9']
_LOGGER = logging.getLogger(__name__)
_WEMO_SUBSCRIPTION_REGISTRY = None
ATTR_SENSOR_STATE = "sensor_state"
ATTR_SWITCH_MODE = "switch_mode"
MAKER_SWITCH_MOMENTARY = "momentary"
MAKER_SWITCH_TOGGLE = "toggle"
# pylint: disable=unused-argument, too-many-function-args
def setup_platform(hass, config, add_devices_callback, discovery_info=None):
""" Find and return WeMo switches. """
import pywemo
import pywemo.discovery as discovery
global _WEMO_SUBSCRIPTION_REGISTRY
if _WEMO_SUBSCRIPTION_REGISTRY is None:
_WEMO_SUBSCRIPTION_REGISTRY = pywemo.SubscriptionRegistry()
_WEMO_SUBSCRIPTION_REGISTRY.start()
def stop_wemo(event):
""" Shutdown Wemo subscriptions and subscription thread on exit"""
_LOGGER.info("Shutting down subscriptions.")
_WEMO_SUBSCRIPTION_REGISTRY.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_wemo)
if discovery_info is not None:
location = discovery_info[2]
mac = discovery_info[3]
device = discovery.device_from_description(location, mac)
if device:
add_devices_callback([WemoSwitch(device)])
return
_LOGGER.info("Scanning for WeMo devices.")
switches = pywemo.discover_devices()
# Filter out the switches and wrap in WemoSwitch object
add_devices_callback(
[WemoSwitch(switch) for switch in switches
if isinstance(switch, pywemo.Switch)])
class WemoSwitch(SwitchDevice):
""" Represents a WeMo switch. """
def __init__(self, wemo):
self.wemo = wemo
self.insight_params = None
self.maker_params = None
_WEMO_SUBSCRIPTION_REGISTRY.register(wemo)
_WEMO_SUBSCRIPTION_REGISTRY.on(
wemo, None, self._update_callback)
def _update_callback(self, _device, _params):
""" Called by the wemo device callback to update state. """
_LOGGER.info(
'Subscription update for %s',
_device)
self.update_ha_state(True)
@property
def should_poll(self):
""" No polling needed with subscriptions """
return False
@property
def unique_id(self):
""" Returns the id of this WeMo switch """
return "{}.{}".format(self.__class__, self.wemo.serialnumber)
@property
def name(self):
""" Returns the name of the switch if any. """
return self.wemo.name
@property
def device_state_attributes(self):
attr = {}
if self.maker_params:
# Is the maker sensor on or off.
if self.maker_params['hassensor']:
# Note a state of 1 matches the WeMo app 'not triggered'!
if self.maker_params['sensorstate']:
attr[ATTR_SENSOR_STATE] = STATE_OFF
else:
attr[ATTR_SENSOR_STATE] = STATE_ON
# Is the maker switch configured as toggle(0) or momentary (1).
if self.maker_params['switchmode']:
attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_MOMENTARY
else:
attr[ATTR_SWITCH_MODE] = MAKER_SWITCH_TOGGLE
return attr
@property
def state(self):
""" Returns the state. """
is_on = self.is_on
if not is_on:
return STATE_OFF
elif self.is_standby:
return STATE_STANDBY
return STATE_ON
@property
def current_power_mwh(self):
""" Current power usage in mwh. """
if self.insight_params:
return self.insight_params['currentpower']
@property
def today_power_mw(self):
""" Today total power usage in mw. """
if self.insight_params:
return self.insight_params['todaymw']
@property
def is_standby(self):
""" Is the device on - or in standby. """
if self.insight_params:
standby_state = self.insight_params['state']
# Standby is actually '8' but seems more defensive
# to check for the On and Off states
if standby_state == '1' or standby_state == '0':
return False
else:
return True
@property
def is_on(self):
""" True if switch is on. """
return self.wemo.get_state()
@property
def available(self):
""" True if switch is available. """
if (self.wemo.model_name == 'Insight' and
self.insight_params is None):
return False
if (self.wemo.model_name == 'Maker' and
self.maker_params is None):
return False
return True
def turn_on(self, **kwargs):
""" Turns the switch on. """
self.wemo.on()
def turn_off(self):
""" Turns the switch off. """
self.wemo.off()
def update(self):
""" Update WeMo state. """
try:
self.wemo.get_state(True)
if self.wemo.model_name == 'Insight':
self.insight_params = self.wemo.insight_params
self.insight_params['standby_state'] = (
self.wemo.get_standby_state)
elif self.wemo.model_name == 'Maker':
self.maker_params = self.wemo.maker_params
except AttributeError:
_LOGGER.warning('Could not update status for %s', self.name)
|
{
"content_hash": "f17310d7c9405c64c766d6d61ac465c2",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 78,
"avg_line_length": 31.106951871657753,
"alnum_prop": 0.5908543922984356,
"repo_name": "Theb-1/home-assistant",
"id": "1d569449ff7caf7747956d297495b04153de1514",
"size": "5817",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/wemo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1474810"
},
{
"name": "Python",
"bytes": "1660180"
},
{
"name": "Shell",
"bytes": "4592"
}
],
"symlink_target": ""
}
|
import unittest, random, sys, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_browse as h2b, h2o_import as h2i, h2o_glm, h2o_util, h2o_jobs, h2o_gbm, h2o_exec as h2e
DO_BUG = False
DO_HDFS = False
DO_ALL_DIGITS = False
print "Uses numpy to create dataset..I guess we have to deal with jenkins not having it"
print "uses dot product off some coefficients to create output. also correlatation with constant term in cols"
SCIPY_INSTALLED = True
try:
import scipy as sp
import numpy as np
print "Both numpy and scipy are installed. Will do extra checks"
except ImportError:
print "numpy or scipy is not installed. Will only do sort-based checking"
SCIPY_INSTALLED = False
def write_syn_dataset(csvPathname, rowCount=100, colCount=10):
# http://nbviewer.ipython.org/github/fabianp/pytron/blob/master/doc/benchmark_logistic.ipynb
# http://fa.bianp.net/blog/2013/numerical-optimizers-for-logistic-regression/
# The synthetic data used in the benchmarks was generated as described in 2 and
# consists primarily of the design matrix X being Gaussian noise,
# the vector of coefficients is drawn also from a Gaussian distribution
# and the explained variable y is generated as y=sign(Xw).
# We then perturb matrix X by adding Gaussian noise with covariance 0.8.
corr = 1. # 0., 1., 10.
n_samples = rowCount
n_features = colCount
np.random.seed(0)
X = np.random.randn(n_samples, n_features)
w = np.random.randn(n_features)
# np.sign returns sign
y = np.sign(X.dot(w))
X += 0.8 * np.random.randn(n_samples, n_features) # add noise
X+= corr # this makes it correlated by adding a constant term
# X = np.hstack((X, np.ones((X.shape[0], 1)))) # add a column of ones for intercept
print X.shape
print y.shape
# concatenate X and y columns together so we can write a csv
y2 = np.reshape(y, (X.shape[0], 1))
Xy = np.hstack((X, y2))
np.savetxt(csvPathname, Xy, delimiter=',', fmt='%5.4f')
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_GLM2_mnist(self):
if not SCIPY_INSTALLED:
pass
else:
SYNDATASETS_DIR = h2o.make_syn_dir()
csvFilelist = [
(10000, 500, 'cA', 60),
]
trial = 0
for (rowCount, colCount, hex_key, timeoutSecs) in csvFilelist:
trialStart = time.time()
# PARSE test****************************************
csvFilename = 'syn_' + "binary" + "_" + str(rowCount) + 'x' + str(colCount) + '.csv'
csvPathname = SYNDATASETS_DIR + "/" + csvFilename
write_syn_dataset(csvPathname, rowCount, colCount)
start = time.time()
parseResult = h2i.import_parse(path=csvPathname, schema='put',
hex_key=hex_key, timeoutSecs=timeoutSecs)
elapsed = time.time() - start
print "parse end on ", csvFilename, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
# GLM****************************************
modelKey = 'GLM_model'
y = colCount
kwargs = {
'response': 'C' + str(y+1),
'family': 'binomial',
'lambda': 1e-4,
'alpha': 0,
'max_iter': 15,
'n_folds': 1,
'beta_epsilon': 1.0E-4,
'destination_key': modelKey,
}
# GLM wants the output col to be strictly 0,1 integer
execExpr = "aHack=%s; aHack[,%s] = aHack[,%s]==1" % (hex_key, y+1, y+1)
h2e.exec_expr(execExpr=execExpr, timeoutSecs=30)
aHack = {'destination_key': 'aHack'}
timeoutSecs = 1800
start = time.time()
glm = h2o_cmd.runGLM(parseResult=aHack, timeoutSecs=timeoutSecs, pollTimeoutSecs=60, **kwargs)
elapsed = time.time() - start
print "GLM completed in", elapsed, "seconds.", \
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
h2o_glm.simpleCheckGLM(self, glm, None, noPrint=True, **kwargs)
modelKey = glm['glm_model']['_key']
# This seems wrong..what's the format of the cm?
lambdaMax = glm['glm_model']['lambda_max']
print "lambdaMax:", lambdaMax
best_threshold= glm['glm_model']['submodels'][0]['validation']['best_threshold']
print "best_threshold", best_threshold
# pick the middle one?
cm = glm['glm_model']['submodels'][0]['validation']['_cms'][5]['_arr']
print "cm:", cm
pctWrong = h2o_gbm.pp_cm_summary(cm);
# self.assertLess(pctWrong, 9,"Should see less than 9% error (class = 4)")
print "\nTrain\n==========\n"
print h2o_gbm.pp_cm(cm)
# Score *******************************
# this messes up if you use case_mode/case_vale above
print "\nPredict\n==========\n"
predictKey = 'Predict.hex'
start = time.time()
predictResult = h2o_cmd.runPredict(
data_key='aHack',
model_key=modelKey,
destination_key=predictKey,
timeoutSecs=timeoutSecs)
predictCMResult = h2o.nodes[0].predict_confusion_matrix(
actual='aHack',
vactual='C' + str(y+1),
predict=predictKey,
vpredict='predict',
)
cm = predictCMResult['cm']
# These will move into the h2o_gbm.py
pctWrong = h2o_gbm.pp_cm_summary(cm);
self.assertLess(pctWrong, 50,"Should see less than 50% error")
print "\nTest\n==========\n"
print h2o_gbm.pp_cm(cm)
if __name__ == '__main__':
h2o.unit_main()
|
{
"content_hash": "7756cdd6ac79626214eae632182881d7",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 112,
"avg_line_length": 38.529761904761905,
"alnum_prop": 0.5300478912405376,
"repo_name": "eg-zhang/h2o-2",
"id": "a9ed7becb880268808a52042e8e9fad3eca5862e",
"size": "6473",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "py/testdir_single_jvm/test_GLM2_syn_corr.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7065"
},
{
"name": "C",
"bytes": "2461"
},
{
"name": "CSS",
"bytes": "216906"
},
{
"name": "CoffeeScript",
"bytes": "205094"
},
{
"name": "Emacs Lisp",
"bytes": "7446"
},
{
"name": "Groovy",
"bytes": "518"
},
{
"name": "HTML",
"bytes": "177967"
},
{
"name": "Java",
"bytes": "5177683"
},
{
"name": "JavaScript",
"bytes": "42958"
},
{
"name": "Makefile",
"bytes": "50927"
},
{
"name": "PHP",
"bytes": "8490"
},
{
"name": "Perl",
"bytes": "22594"
},
{
"name": "Python",
"bytes": "3244626"
},
{
"name": "R",
"bytes": "1631216"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "39365"
},
{
"name": "Shell",
"bytes": "189829"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.